From 6b70ee23abf434f82cf98f42f1eb4a3156bb21bd Mon Sep 17 00:00:00 2001
From: Nick Irvine
Date: Wed, 21 May 2014 19:24:28 -0700
Subject: [PATCH 0001/3617] Clean non-printable chars from stdout instead of
dropping the whole thing
---
lib/ansible/runner/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index adc9b7bcbd147a..077724f9f305a2 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -877,7 +877,7 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port,
if hasattr(sys.stdout, "isatty"):
if "stdout" in data and sys.stdout.isatty():
if not string_functions.isprintable(data['stdout']):
- data['stdout'] = ''
+ data['stdout'] = ''.join(c for c in data['stdout'] if string_functions.isprintable(c))
if 'item' in inject:
result.result['item'] = inject['item']
From 3d61f077ec1ba2c0fdd4d493c730a4299e2f883d Mon Sep 17 00:00:00 2001
From: Jordon Replogle
Date: Wed, 30 Jul 2014 10:08:22 -0700
Subject: [PATCH 0002/3617] Added OpenVZ Inventory python script
---
plugins/inventory/openvz.py | 74 +++++++++++++++++++++++++++++++++++++
1 file changed, 74 insertions(+)
create mode 100644 plugins/inventory/openvz.py
diff --git a/plugins/inventory/openvz.py b/plugins/inventory/openvz.py
new file mode 100644
index 00000000000000..1f441a39f540f8
--- /dev/null
+++ b/plugins/inventory/openvz.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# openvz.py
+#
+# Copyright 2014 jordonr
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+# MA 02110-1301, USA.
+#
+#
+# Inspired by libvirt_lxc.py inventory script
+# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
+#
+# Groups are determined by the description field of openvz guests
+# multiple groups can be seperated by commas: webserver,dbserver
+
+from subprocess import Popen,PIPE
+import sys
+import json
+
+
+#List openvz hosts
+vzhosts = ['192.168.1.3','192.168.1.2','192.168.1.1']
+#Add openvzhosts to the inventory
+inventory = {'vzhosts': {'hosts': vzhosts}}
+#default group, when description not defined
+default_group = ['vzguest']
+
+def getGuests():
+ #Loop through vzhosts
+ for h in vzhosts:
+ #SSH to vzhost and get the list of guests in json
+ pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True)
+
+ #Load Json info of guests
+ json_data = json.loads(pipe.stdout.read())
+
+ #loop through guests
+ for j in json_data:
+ #determine group from guest description
+ if j['description'] is not None:
+ groups = j['description'].split(",")
+ else:
+ groups = default_group
+
+ #add guest to inventory
+ for g in groups:
+ if g not in inventory:
+ inventory[g] = {'hosts': []}
+
+ for ip in j['ip']:
+ inventory[g]['hosts'].append(ip)
+
+ print json.dumps(inventory)
+
+if len(sys.argv) == 2 and sys.argv[1] == '--list':
+ getGuests()
+elif len(sys.argv) == 3 and sys.argv[1] == '--host':
+ print json.dumps({});
+else:
+ print "Need an argument, either --list or --host "
From df8dfdce06f837c49f230d5e27b513f2bfe27cf1 Mon Sep 17 00:00:00 2001
From: Serge van Ginderachter
Date: Wed, 6 Aug 2014 13:00:14 +0200
Subject: [PATCH 0003/3617] packaging: add short has and branch name in package
version for unofficial builds
---
Makefile | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index afd7162f96e502..56c63903b6d620 100644
--- a/Makefile
+++ b/Makefile
@@ -39,6 +39,11 @@ VERSION := $(shell cat VERSION)
# Get the branch information from git
ifneq ($(shell which git),)
GIT_DATE := $(shell git log -n 1 --format="%ai")
+GIT_HASH := $(shell git log -n 1 --format="%h")
+GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.]//g')
+GITINFO = .$(GIT_HASH).$(GIT_BRANCH)
+else
+GITINFO = ''
endif
ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1)
@@ -60,7 +65,7 @@ ifeq ($(OFFICIAL),yes)
DEBUILD_OPTS += -k$(DEBSIGN_KEYID)
endif
else
- DEB_RELEASE = 0.git$(DATE)
+ DEB_RELEASE = 0.git$(DATE)$(GITINFO)
# Do not sign unofficial builds
DEBUILD_OPTS += -uc -us
DPUT_OPTS += -u
@@ -76,7 +81,7 @@ RPMSPEC = $(RPMSPECDIR)/ansible.spec
RPMDIST = $(shell rpm --eval '%{?dist}')
RPMRELEASE = 1
ifneq ($(OFFICIAL),yes)
- RPMRELEASE = 0.git$(DATE)
+ RPMRELEASE = 0.git$(DATE)$(GITINFO)
endif
RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)"
From 0ff2936626afe83e2898e8ccecf59b891e550bf5 Mon Sep 17 00:00:00 2001
From: Jordon Replogle
Date: Wed, 13 Aug 2014 10:28:43 -0700
Subject: [PATCH 0004/3617] Updated per Revision Request
---
plugins/inventory/openvz.py | 73 +++++++++++++++++++------------------
1 file changed, 38 insertions(+), 35 deletions(-)
diff --git a/plugins/inventory/openvz.py b/plugins/inventory/openvz.py
index 1f441a39f540f8..fd0bd9ff79454b 100644
--- a/plugins/inventory/openvz.py
+++ b/plugins/inventory/openvz.py
@@ -5,21 +5,20 @@
#
# Copyright 2014 jordonr
#
-# This program is free software; you can redistribute it and/or modify
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
+# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
-# MA 02110-1301, USA.
-#
+# along with Ansible. If not, see .
#
# Inspired by libvirt_lxc.py inventory script
# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py
@@ -33,42 +32,46 @@
#List openvz hosts
-vzhosts = ['192.168.1.3','192.168.1.2','192.168.1.1']
-#Add openvzhosts to the inventory
-inventory = {'vzhosts': {'hosts': vzhosts}}
+vzhosts = ['vzhost1','vzhost2','vzhost3']
+#Add openvz hosts to the inventory and Add "_meta" trick
+inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
#default group, when description not defined
default_group = ['vzguest']
-def getGuests():
- #Loop through vzhosts
- for h in vzhosts:
- #SSH to vzhost and get the list of guests in json
- pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True)
+def get_guests():
+ #Loop through vzhosts
+ for h in vzhosts:
+ #SSH to vzhost and get the list of guests in json
+ pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True)
+
+ #Load Json info of guests
+ json_data = json.loads(pipe.stdout.read())
+
+ #loop through guests
+ for j in json_data:
+ #Add information to host vars
+ inventory['_meta']['hostvars'][j['hostname']] = {'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip']}
- #Load Json info of guests
- json_data = json.loads(pipe.stdout.read())
+ #determine group from guest description
+ if j['description'] is not None:
+ groups = j['description'].split(",")
+ else:
+ groups = default_group
- #loop through guests
- for j in json_data:
- #determine group from guest description
- if j['description'] is not None:
- groups = j['description'].split(",")
- else:
- groups = default_group
+ #add guest to inventory
+ for g in groups:
+ if g not in inventory:
+ inventory[g] = {'hosts': []}
- #add guest to inventory
- for g in groups:
- if g not in inventory:
- inventory[g] = {'hosts': []}
+ inventory[g]['hosts'].append(j['hostname'])
- for ip in j['ip']:
- inventory[g]['hosts'].append(ip)
+ return inventory
- print json.dumps(inventory)
if len(sys.argv) == 2 and sys.argv[1] == '--list':
- getGuests()
+ inv_json = get_guests()
+ print json.dumps(inv_json, sort_keys=True)
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print json.dumps({});
+ print json.dumps({});
else:
- print "Need an argument, either --list or --host "
+ print "Need an argument, either --list or --host "
From eccb48c8da77bf9ba884cc989251ed5d5209b1e1 Mon Sep 17 00:00:00 2001
From: Carson Gee
Date: Sat, 17 May 2014 22:10:24 -0400
Subject: [PATCH 0005/3617] Improvements to OpenStack inventory script
---
plugins/inventory/nova.ini | 9 +-
plugins/inventory/nova.py | 166 ++++++++++++++++++++++++++-----------
2 files changed, 124 insertions(+), 51 deletions(-)
mode change 100755 => 100644 plugins/inventory/nova.py
diff --git a/plugins/inventory/nova.ini b/plugins/inventory/nova.ini
index e648e5f143c12b..040c52bcee9b1c 100644
--- a/plugins/inventory/nova.ini
+++ b/plugins/inventory/nova.ini
@@ -14,7 +14,7 @@ api_key =
auth_url =
# Authentication system
-auth_system =
+auth_system = keystone
# OpenStack nova project_id
project_id =
@@ -22,6 +22,13 @@ project_id =
# Serverarm region name to use
region_name =
+# Specify a preference for public or private IPs (public is default)
+prefer_private = False
+
+# What service type (required for newer nova client)
+service_type = compute
+
+
# TODO: Some other options
# insecure =
# endpoint_type =
diff --git a/plugins/inventory/nova.py b/plugins/inventory/nova.py
old mode 100755
new mode 100644
index 585e26732ed316..b1094c72887df4
--- a/plugins/inventory/nova.py
+++ b/plugins/inventory/nova.py
@@ -25,11 +25,9 @@
try:
import json
-except:
+except ImportError:
import simplejson as json
-from ansible.module_utils.openstack import *
-
###################################################
# executed with no parameters, return the list of
# all groups and hosts
@@ -54,45 +52,129 @@ def nova_load_config_file():
return None
+
+def get_fallback(config, value, section="openstack"):
+ """
+ Get value from config object and return the value
+ or false
+ """
+ try:
+ return config.get(section, value)
+ except ConfigParser.NoOptionError:
+ return False
+
+
+def push(data, key, element):
+ """
+ Assist in items to a dictionary of lists
+ """
+ if (not element) or (not key):
+ return
+
+ if key in data:
+ data[key].append(element)
+ else:
+ data[key] = [element]
+
+
+def to_safe(word):
+ '''
+ Converts 'bad' characters in a string to underscores so they can
+ be used as Ansible groups
+ '''
+ return re.sub(r"[^A-Za-z0-9\-]", "_", word)
+
+
+def get_ips(server, access_ip=True):
+ """
+ Returns a list of the server's IPs, or the preferred
+ access IP
+ """
+ private = []
+ public = []
+ address_list = []
+ # Iterate through each servers network(s), get addresses and get type
+ addresses = getattr(server, 'addresses', {})
+ if len(addresses) > 0:
+ for network in addresses.itervalues():
+ for address in network:
+ if address.get('OS-EXT-IPS:type', False) == 'fixed':
+ private.append(address['addr'])
+ elif address.get('OS-EXT-IPS:type', False) == 'floating':
+ public.append(address['addr'])
+
+ if not access_ip:
+ address_list.append(server.accessIPv4)
+ address_list.extend(private)
+ address_list.extend(public)
+ return address_list
+
+ access_ip = None
+ # Append group to list
+ if server.accessIPv4:
+ access_ip = server.accessIPv4
+ if (not access_ip) and public and not (private and prefer_private):
+ access_ip = public[0]
+ if private and not access_ip:
+ access_ip = private[0]
+
+ return access_ip
+
+
+def get_metadata(server):
+ """Returns dictionary of all host metadata"""
+ get_ips(server, False)
+ results = {}
+ for key in vars(server):
+ # Extract value
+ value = getattr(server, key)
+
+ # Generate sanitized key
+ key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
+
+ # Att value to instance result (exclude manager class)
+ #TODO: maybe use value.__class__ or similar inside of key_name
+ if key != 'os_manager':
+ results[key] = value
+ return results
+
config = nova_load_config_file()
if not config:
sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
client = nova_client.Client(
- config.get('openstack', 'version'),
- config.get('openstack', 'username'),
- config.get('openstack', 'api_key'),
- config.get('openstack', 'project_id'),
- config.get('openstack', 'auth_url'),
+ version = config.get('openstack', 'version'),
+ username = config.get('openstack', 'username'),
+ api_key = config.get('openstack', 'api_key'),
+ auth_url = config.get('openstack', 'auth_url'),
region_name = config.get('openstack', 'region_name'),
+ project_id = config.get('openstack', 'project_id'),
auth_system = config.get('openstack', 'auth_system')
)
-if len(sys.argv) == 2 and (sys.argv[1] == '--list'):
- groups = {}
-
+# Default or added list option
+if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1:
+ groups = {'_meta': {'hostvars': {}}}
# Cycle on servers
for server in client.servers.list():
- private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
- public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
-
- # Define group (or set to empty string)
- group = server.metadata['group'] if server.metadata.has_key('group') else 'undefined'
-
- # Create group if not exist
- if group not in groups:
- groups[group] = []
-
- # Append group to list
- if server.accessIPv4:
- groups[group].append(server.accessIPv4)
- continue
- if public:
- groups[group].append(''.join(public))
- continue
- if private:
- groups[group].append(''.join(private))
- continue
+ access_ip = get_ips(server)
+
+ # Push to name group of 1
+ push(groups, server.name, access_ip)
+
+ # Run through each metadata item and add instance to it
+ for key, value in server.metadata.iteritems():
+ composed_key = to_safe('tag_{0}_{1}'.format(key, value))
+ push(groups, composed_key, access_ip)
+
+ # Do special handling of group for backwards compat
+ # inventory groups
+ group = server.metadata['group'] if 'group' in server.metadata else 'undefined'
+ push(groups, group, access_ip)
+
+ # Add vars to _meta key for performance optimization in
+ # Ansible 1.3+
+ groups['_meta']['hostvars'][access_ip] = get_metadata(server)
# Return server list
print(json.dumps(groups, sort_keys=True, indent=2))
@@ -105,25 +187,9 @@ def nova_load_config_file():
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
results = {}
ips = []
- for instance in client.servers.list():
- private = openstack_find_nova_addresses(getattr(instance, 'addresses'), 'fixed', 'private')
- public = openstack_find_nova_addresses(getattr(instance, 'addresses'), 'floating', 'public')
- ips.append( instance.accessIPv4)
- ips.append(''.join(private))
- ips.append(''.join(public))
- if sys.argv[2] in ips:
- for key in vars(instance):
- # Extract value
- value = getattr(instance, key)
-
- # Generate sanitized key
- key = 'os_' + re.sub("[^A-Za-z0-9\-]", "_", key).lower()
-
- # Att value to instance result (exclude manager class)
- #TODO: maybe use value.__class__ or similar inside of key_name
- if key != 'os_manager':
- results[key] = value
-
+ for server in client.servers.list():
+ if sys.argv[2] in (get_ips(server) or []):
+ results = get_metadata(server)
print(json.dumps(results, sort_keys=True, indent=2))
sys.exit(0)
From cd5edc416c810354704c5b41701b1bcebb42305c Mon Sep 17 00:00:00 2001
From: Marc Abramowitz
Date: Tue, 1 Jul 2014 09:41:55 -0700
Subject: [PATCH 0006/3617] nova.py: Set defaults for OpenStack settings
- auth_system
- region_name
- service_type
These are config settings that could be left out in many scenarios, but
the current code is requiring them. In particular, "service_type" is a
new one in PR #7444 so if we add that and don't set a default, then
existing .ini files won't work:
```
File
"/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py",
line 618, in get
raise NoOptionError(option, section)
ConfigParser.NoOptionError: No option 'service_type' in section:
'openstack'
```
---
plugins/inventory/nova.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/plugins/inventory/nova.py b/plugins/inventory/nova.py
index b1094c72887df4..48e720184f5ec0 100644
--- a/plugins/inventory/nova.py
+++ b/plugins/inventory/nova.py
@@ -39,6 +39,7 @@
NOVA_DEFAULTS = {
'auth_system': None,
'region_name': None,
+ 'service_type': 'compute',
}
From 1560b963aa2b5188cf138a1f0be0e27b22f4915a Mon Sep 17 00:00:00 2001
From: Marc Abramowitz
Date: Tue, 1 Jul 2014 12:20:15 -0700
Subject: [PATCH 0007/3617] nova.py: Support OS_AUTH_SYSTEM and OS_REGION_NAME
---
plugins/inventory/nova.py | 37 ++++++++++++++++++++++++++++++-------
1 file changed, 30 insertions(+), 7 deletions(-)
diff --git a/plugins/inventory/nova.py b/plugins/inventory/nova.py
index 48e720184f5ec0..7e58390ee1a147 100644
--- a/plugins/inventory/nova.py
+++ b/plugins/inventory/nova.py
@@ -143,14 +143,37 @@ def get_metadata(server):
if not config:
sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
+# Load up connections info based on config and then environment
+# variables
+username = (get_fallback(config, 'username') or
+ os.environ.get('OS_USERNAME', None))
+api_key = (get_fallback(config, 'api_key') or
+ os.environ.get('OS_PASSWORD', None))
+auth_url = (get_fallback(config, 'auth_url') or
+ os.environ.get('OS_AUTH_URL', None))
+project_id = (get_fallback(config, 'project_id') or
+ os.environ.get('OS_TENANT_NAME', None))
+region_name = (get_fallback(config, 'region_name') or
+ os.environ.get('OS_REGION_NAME', None))
+auth_system = (get_fallback(config, 'auth_system') or
+ os.environ.get('OS_AUTH_SYSTEM', None))
+
+# Determine what type of IP is preferred to return
+prefer_private = False
+try:
+ prefer_private = config.getboolean('openstack', 'prefer_private')
+except ConfigParser.NoOptionError:
+ pass
+
client = nova_client.Client(
- version = config.get('openstack', 'version'),
- username = config.get('openstack', 'username'),
- api_key = config.get('openstack', 'api_key'),
- auth_url = config.get('openstack', 'auth_url'),
- region_name = config.get('openstack', 'region_name'),
- project_id = config.get('openstack', 'project_id'),
- auth_system = config.get('openstack', 'auth_system')
+ version=config.get('openstack', 'version'),
+ username=username,
+ api_key=api_key,
+ auth_url=auth_url,
+ region_name=region_name,
+ project_id=project_id,
+ auth_system=auth_system,
+ service_type=config.get('openstack', 'service_type'),
)
# Default or added list option
From 7cc5ecae527588dde572ddbace1d13e4a4b62bdf Mon Sep 17 00:00:00 2001
From: Marc Abramowitz
Date: Tue, 1 Jul 2014 12:47:25 -0700
Subject: [PATCH 0008/3617] nova.ini: Distinguish between required and optional
settings
Put them in separate sections of config to make it more clear what is
essential and what is not. Also comment out the optional settings.
And remove duplicate mention of `service_type`.
---
plugins/inventory/nova.ini | 26 +++++++++++++++++---------
1 file changed, 17 insertions(+), 9 deletions(-)
diff --git a/plugins/inventory/nova.ini b/plugins/inventory/nova.ini
index 040c52bcee9b1c..4900c49651603b 100644
--- a/plugins/inventory/nova.ini
+++ b/plugins/inventory/nova.ini
@@ -1,37 +1,45 @@
# Ansible OpenStack external inventory script
[openstack]
+
+#-------------------------------------------------------------------------
+# Required settings
+#-------------------------------------------------------------------------
+
# API version
version = 2
# OpenStack nova username
username =
-# OpenStack nova api_key
+# OpenStack nova api_key or password
api_key =
# OpenStack nova auth_url
auth_url =
-# Authentication system
-auth_system = keystone
+# OpenStack nova project_id or tenant name
+project_id =
-# OpenStack nova project_id
-project_id =
+#-------------------------------------------------------------------------
+# Optional settings
+#-------------------------------------------------------------------------
+
+# Authentication system
+# auth_system = keystone
# Serverarm region name to use
-region_name =
+# region_name =
# Specify a preference for public or private IPs (public is default)
-prefer_private = False
+# prefer_private = False
# What service type (required for newer nova client)
-service_type = compute
+# service_type = compute
# TODO: Some other options
# insecure =
# endpoint_type =
# extensions =
-# service_type =
# service_name =
From 3ca654ad9ade1ce2745f4b3496d3a1683ace2ce5 Mon Sep 17 00:00:00 2001
From: Strahinja Kustudic
Date: Sun, 5 Oct 2014 19:54:31 +0200
Subject: [PATCH 0009/3617] Added an example for paretheses
---
docsite/rst/playbooks_conditionals.rst | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/docsite/rst/playbooks_conditionals.rst b/docsite/rst/playbooks_conditionals.rst
index a00ec916c41f2c..cdaf54f5ea4a7b 100644
--- a/docsite/rst/playbooks_conditionals.rst
+++ b/docsite/rst/playbooks_conditionals.rst
@@ -26,6 +26,14 @@ It's actually pretty simple::
command: /sbin/shutdown -t now
when: ansible_os_family == "Debian"
+You can also use parentheses to group conditions::
+
+ tasks:
+ - name: "shutdown CentOS 6 and 7 systems"
+ command: /sbin/shutdown -t now
+ when: ansible_distribution == "CentOS" and
+ (ansible_distribution_major_version == "6" or ansible_distribution_major_version == "7")
+
A number of Jinja2 "filters" can also be used in when statements, some of which are unique
and provided by Ansible. Suppose we want to ignore the error of one statement and then
decide to do something conditionally based on success or failure::
From 76f473cd5d5a8ed1c6c5deb173587ce01e5b8f29 Mon Sep 17 00:00:00 2001
From: Mathieu GAUTHIER-LAFAYE
Date: Mon, 6 Oct 2014 17:12:03 +0200
Subject: [PATCH 0010/3617] add a proxmox inventory plugin
---
plugins/inventory/proxmox.py | 131 +++++++++++++++++++++++++++++++++++
1 file changed, 131 insertions(+)
create mode 100755 plugins/inventory/proxmox.py
diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py
new file mode 100755
index 00000000000000..ceb41110278417
--- /dev/null
+++ b/plugins/inventory/proxmox.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import urllib
+import urllib2
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import os
+import sys
+from optparse import OptionParser
+
+class ProxmoxNodeList(list):
+ def get_names(self):
+ return [node['node'] for node in self]
+
+class ProxmoxQemuList(list):
+ def get_names(self):
+ return [qemu['name'] for qemu in self if qemu['template'] != 1]
+
+class ProxmoxPoolList(list):
+ def get_names(self):
+ return [pool['poolid'] for pool in self]
+
+class ProxmoxPool(dict):
+ def get_members_name(self):
+ return [member['name'] for member in self['members'] if member['template'] != 1]
+
+class ProxmoxAPI(object):
+ def __init__(self, options):
+ self.options = options
+ self.credentials = None
+
+ if not options.url:
+ raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
+ elif not options.username:
+ raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
+ elif not options.password:
+ raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
+
+ def auth(self):
+ request_path = '{}api2/json/access/ticket'.format(self.options.url)
+
+ request_params = urllib.urlencode({
+ 'username': self.options.username,
+ 'password': self.options.password,
+ })
+
+ data = json.load(urllib2.urlopen(request_path, request_params))
+
+ self.credentials = {
+ 'ticket': data['data']['ticket'],
+ 'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
+ }
+
+ def get(self, url, data=None):
+ opener = urllib2.build_opener()
+ opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket'])))
+
+ request_path = '{}{}'.format(self.options.url, url)
+ request = opener.open(request_path, data)
+
+ response = json.load(request)
+ return response['data']
+
+ def nodes(self):
+ return ProxmoxNodeList(self.get('api2/json/nodes'))
+
+ def node_qemu(self, node):
+ return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node)))
+
+ def pools(self):
+ return ProxmoxPoolList(self.get('api2/json/pools'))
+
+ def pool(self, poolid):
+ return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
+
+def main_list(options):
+ result = {}
+
+ proxmox_api = ProxmoxAPI(options)
+ proxmox_api.auth()
+
+ # all
+ result['all'] = []
+ for node in proxmox_api.nodes().get_names():
+ result['all'] += proxmox_api.node_qemu(node).get_names()
+
+ # pools
+ for pool in proxmox_api.pools().get_names():
+ result[pool] = proxmox_api.pool(pool).get_members_name()
+
+ print json.dumps(result)
+
+def main_host():
+ print json.dumps({})
+
+def main():
+ parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
+ parser.add_option('--list', action="store_true", default=False, dest="list")
+ parser.add_option('--host', dest="host")
+ parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
+ parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
+ parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
+ (options, args) = parser.parse_args()
+
+ if options.list:
+ main_list(options)
+ elif options.host:
+ main_host()
+ else:
+ parser.print_help()
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
From 3d62e55abe14be12292186760413ce641f852c09 Mon Sep 17 00:00:00 2001
From: Mathieu GAUTHIER-LAFAYE
Date: Tue, 7 Oct 2014 13:10:10 +0200
Subject: [PATCH 0011/3617] add host variables (proxmox_vmid, proxmox_uptime,
proxmox_maxmem, ...)
---
plugins/inventory/proxmox.py | 26 +++++++++++++++++++++++---
1 file changed, 23 insertions(+), 3 deletions(-)
diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py
index ceb41110278417..590949a4c6631a 100755
--- a/plugins/inventory/proxmox.py
+++ b/plugins/inventory/proxmox.py
@@ -33,6 +33,10 @@ class ProxmoxQemuList(list):
def get_names(self):
return [qemu['name'] for qemu in self if qemu['template'] != 1]
+ def get_by_name(self, name):
+ results = [qemu for qemu in self if qemu['name'] == name]
+ return results[0] if len(results) > 0 else None
+
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
@@ -107,8 +111,24 @@ def main_list(options):
print json.dumps(result)
-def main_host():
- print json.dumps({})
+def main_host(options):
+ results = {}
+
+ proxmox_api = ProxmoxAPI(options)
+ proxmox_api.auth()
+
+ host = None
+ for node in proxmox_api.nodes().get_names():
+ qemu_list = proxmox_api.node_qemu(node)
+ qemu = qemu_list.get_by_name(options.host)
+ if qemu:
+ break
+
+ if qemu:
+ for key, value in qemu.iteritems():
+ results['proxmox_' + key] = value
+
+ print json.dumps(results)
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
@@ -122,7 +142,7 @@ def main():
if options.list:
main_list(options)
elif options.host:
- main_host()
+ main_host(options)
else:
parser.print_help()
sys.exit(1)
From 7c094c93798eeae5af92961031125de83d6ec91d Mon Sep 17 00:00:00 2001
From: Mathieu GAUTHIER-LAFAYE
Date: Tue, 7 Oct 2014 13:45:41 +0200
Subject: [PATCH 0012/3617] add _meta in the list json
---
plugins/inventory/proxmox.py | 56 +++++++++++++++++++++++++-----------
1 file changed, 39 insertions(+), 17 deletions(-)
diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py
index 590949a4c6631a..c9d5e82a623cfb 100755
--- a/plugins/inventory/proxmox.py
+++ b/plugins/inventory/proxmox.py
@@ -29,7 +29,18 @@ class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
+class ProxmoxQemu(dict):
+ def get_variables(self):
+ variables = {}
+ for key, value in self.iteritems():
+ variables['proxmox_' + key] = value
+ return variables
+
class ProxmoxQemuList(list):
+ def __init__(self, data=[]):
+ for item in data:
+ self.append(ProxmoxQemu(item))
+
def get_names(self):
return [qemu['name'] for qemu in self if qemu['template'] != 1]
@@ -37,6 +48,13 @@ def get_by_name(self, name):
results = [qemu for qemu in self if qemu['name'] == name]
return results[0] if len(results) > 0 else None
+ def get_variables(self):
+ variables = {}
+ for qemu in self:
+ variables[qemu['name']] = qemu.get_variables()
+
+ return variables
+
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
@@ -95,40 +113,42 @@ def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
def main_list(options):
- result = {}
+ results = {
+ 'all': {
+ 'hosts': [],
+ },
+ '_meta': {
+ 'hostvars': {},
+ }
+ }
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
- # all
- result['all'] = []
for node in proxmox_api.nodes().get_names():
- result['all'] += proxmox_api.node_qemu(node).get_names()
+ qemu_list = proxmox_api.node_qemu(node)
+ results['all']['hosts'] += qemu_list.get_names()
+ results['_meta']['hostvars'].update(qemu_list.get_variables())
# pools
for pool in proxmox_api.pools().get_names():
- result[pool] = proxmox_api.pool(pool).get_members_name()
+ results[pool] = {
+ 'hosts': proxmox_api.pool(pool).get_members_name(),
+ }
- print json.dumps(result)
+ return json.dumps(results)
def main_host(options):
- results = {}
-
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
- host = None
for node in proxmox_api.nodes().get_names():
qemu_list = proxmox_api.node_qemu(node)
qemu = qemu_list.get_by_name(options.host)
if qemu:
- break
+ return json.dumps(qemu.get_variables())
- if qemu:
- for key, value in qemu.iteritems():
- results['proxmox_' + key] = value
-
- print json.dumps(results)
+ print json.dumps({})
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
@@ -140,12 +160,14 @@ def main():
(options, args) = parser.parse_args()
if options.list:
- main_list(options)
+ json = main_list(options)
elif options.host:
- main_host(options)
+ json = main_host(options)
else:
parser.print_help()
sys.exit(1)
+ print json
+
if __name__ == '__main__':
main()
From d20ef3a10af5dada0a3e3b3c1f7b15fee3839990 Mon Sep 17 00:00:00 2001
From: Mathieu GAUTHIER-LAFAYE
Date: Tue, 7 Oct 2014 13:58:01 +0200
Subject: [PATCH 0013/3617] add --pretty for debuging purpose
---
plugins/inventory/proxmox.py | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py
index c9d5e82a623cfb..80f6628d97395d 100755
--- a/plugins/inventory/proxmox.py
+++ b/plugins/inventory/proxmox.py
@@ -136,7 +136,7 @@ def main_list(options):
'hosts': proxmox_api.pool(pool).get_members_name(),
}
- return json.dumps(results)
+ return results
def main_host(options):
proxmox_api = ProxmoxAPI(options)
@@ -146,9 +146,9 @@ def main_host(options):
qemu_list = proxmox_api.node_qemu(node)
qemu = qemu_list.get_by_name(options.host)
if qemu:
- return json.dumps(qemu.get_variables())
+ return qemu.get_variables()
- print json.dumps({})
+ return {}
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
@@ -157,17 +157,22 @@ def main():
parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
+ parser.add_option('--pretty', action="store_true", default=False, dest='pretty')
(options, args) = parser.parse_args()
if options.list:
- json = main_list(options)
+ data = main_list(options)
elif options.host:
- json = main_host(options)
+ data = main_host(options)
else:
parser.print_help()
sys.exit(1)
- print json
+ indent = None
+ if options.pretty:
+ indent = 2
+
+ print json.dumps(data, indent=indent)
if __name__ == '__main__':
main()
From fbc1cd553ca6d083a9801a32fae1dfa40e7b9f67 Mon Sep 17 00:00:00 2001
From: Andrew Rothstein
Date: Tue, 14 Oct 2014 07:29:21 -0400
Subject: [PATCH 0014/3617] an ansible inventory garnered from fleetctl
---
plugins/inventory/fleet.py | 107 +++++++++++++++++++++++++++++++++++++
1 file changed, 107 insertions(+)
create mode 100755 plugins/inventory/fleet.py
diff --git a/plugins/inventory/fleet.py b/plugins/inventory/fleet.py
new file mode 100755
index 00000000000000..d6d7e4d2925bfe
--- /dev/null
+++ b/plugins/inventory/fleet.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+"""
+fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
+returns it under the host group 'coreos'
+"""
+
+# Copyright (C) 2014 Andrew Rothstein
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+#
+# Thanks to the vagrant.py inventory script for giving me the basic structure
+# of this.
+#
+
+import sys
+import subprocess
+import re
+import string
+from optparse import OptionParser
+try:
+ import json
+except:
+ import simplejson as json
+
+# Options
+#------------------------------
+
+parser = OptionParser(usage="%prog [options] --list | --host ")
+parser.add_option('--list', default=False, dest="list", action="store_true",
+ help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
+parser.add_option('--host', default=None, dest="host",
+ help="Generate additional host specific details for given host for Ansible")
+(options, args) = parser.parse_args()
+
+#
+# helper functions
+#
+
+def get_ssh_config() :
+ configs = []
+ for box in list_running_boxes() :
+ config = get_a_ssh_config(box)
+ configs.append(config)
+ return configs
+
+#list all the running instances in the fleet
+def list_running_boxes():
+ boxes = []
+ for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n') :
+ matcher = re.search("[^\s]+[\s]+([^\s]+).+", line)
+ if matcher and matcher.group(1) != "IP":
+ boxes.append(matcher.group(1))
+
+ return boxes
+
+def get_a_ssh_config(box_name) :
+ config = {}
+ config['Host'] = box_name
+ config['ansible_ssh_user'] = 'core'
+ config['ansible_python_interpreter'] = '/opt/bin/python'
+ return config
+
+# List out servers that vagrant has running
+#------------------------------
+if options.list:
+ ssh_config = get_ssh_config()
+ hosts = { 'coreos': []}
+
+ for data in ssh_config :
+ hosts['coreos'].append(data['Host'])
+
+ print json.dumps(hosts)
+ sys.exit(1)
+
+# Get out the host details
+#------------------------------
+elif options.host:
+ result = {}
+ ssh_config = get_ssh_config()
+
+ details = filter(lambda x: (x['Host'] == options.host), ssh_config)
+ if len(details) > 0:
+ #pass through the port, in case it's non standard.
+ result = details[0]
+ result
+
+ print json.dumps(result)
+ sys.exit(1)
+
+
+# Print out help
+#------------------------------
+else:
+ parser.print_help()
+ sys.exit(1)
From 61ae3c732ff024a9102d5f423eb7fa0c69ae1c46 Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Sun, 26 Oct 2014 10:41:58 -0700
Subject: [PATCH 0015/3617] Add required_if to AnsibleModule
There is a common pattern in modules where some parameters are required
only if another parameter is present AND set to a particular value. For
instance, if a cloud server state is "present" it's important to
indicate the image to be used, but if it's "absent", the image that was
used to launch it is not necessary. Provide a check that takes as an
input a list of 3-element tuples containing parameter to depend on, the
value it should be set to, and a list of parameters which are required
if the required parameter is set to the required value.
---
lib/ansible/module_utils/basic.py | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index 8a4548dc169771..779d8f4cde8cf1 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -247,7 +247,8 @@ class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
- required_one_of=None, add_file_common_args=False, supports_check_mode=False):
+ required_one_of=None, add_file_common_args=False, supports_check_mode=False,
+ required_if=None):
'''
common code for quickly building an ansible module in Python
@@ -295,6 +296,7 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False,
self._check_argument_types()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
+ self._check_required_if(required_if)
self._set_defaults(pre=False)
if not self.no_log:
@@ -852,6 +854,20 @@ def _check_required_arguments(self):
if len(missing) > 0:
self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
+ def _check_required_if(self, spec):
+ ''' ensure that parameters which conditionally required are present '''
+ if spec is None:
+ return
+ for (key, val, requirements) in spec:
+ missing = []
+ if key in self.params and self.params[key] == val:
+ for check in requirements:
+ count = self._count_terms(check)
+ if count == 0:
+ missing.append(check)
+ if len(missing) > 0:
+ self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing))
+
def _check_argument_values(self):
''' ensure all arguments have the requested values, and there are no stray arguments '''
for (k,v) in self.argument_spec.iteritems():
From e5f651c458b5b3326f0ef371f2c8fc0d0beab6b5 Mon Sep 17 00:00:00 2001
From: Bryan Hunt
Date: Tue, 28 Oct 2014 20:19:15 +0000
Subject: [PATCH 0016/3617] export ANSIBLE_HOME so it can be used in scripts
In order that scripts like this can work
```
#!/bin/bash
ansible -vvvv tag_instance_type_foo-training -i "${ANSIBLE_HOME}/plugins/inventory/ec2.py" --private-key=~/Downloads/foo-training.pem -u ec2-user -m ping
```
---
hacking/env-setup | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hacking/env-setup b/hacking/env-setup
index 4fed1690976753..e0de78fc75d94a 100755
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -13,7 +13,7 @@ fi
# The below is an alternative to readlink -fn which doesn't exist on OS X
# Source: http://stackoverflow.com/a/1678636
FULL_PATH=`python -c "import os; print(os.path.realpath('$HACKING_DIR'))"`
-ANSIBLE_HOME=`dirname "$FULL_PATH"`
+export ANSIBLE_HOME=`dirname "$FULL_PATH"`
PREFIX_PYTHONPATH="$ANSIBLE_HOME/lib"
PREFIX_PATH="$ANSIBLE_HOME/bin"
From 3b7280b364b14e5fd6a7d1bec5fbaabd1fd23640 Mon Sep 17 00:00:00 2001
From: ktosiek
Date: Sun, 9 Nov 2014 22:40:29 +0100
Subject: [PATCH 0017/3617] guide_rax.rst: fix add_host invocations
change `groupname` to `groups`, as per add_host documentation
---
docsite/rst/guide_rax.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst
index d00a090fa3948e..28321ce7fa559a 100644
--- a/docsite/rst/guide_rax.rst
+++ b/docsite/rst/guide_rax.rst
@@ -131,7 +131,7 @@ The rax module returns data about the nodes it creates, like IP addresses, hostn
hostname: "{{ item.name }}"
ansible_ssh_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
- groupname: raxhosts
+ groups: raxhosts
with_items: rax.success
when: rax.action == 'create'
@@ -519,7 +519,7 @@ Build a complete webserver environment with servers, custom networks and load ba
ansible_ssh_host: "{{ item.rax_accessipv4 }}"
ansible_ssh_pass: "{{ item.rax_adminpass }}"
ansible_ssh_user: root
- groupname: web
+ groups: web
with_items: rax.success
when: rax.action == 'create'
From a1adff4ff00091741cd95301d66a33cac161ea9d Mon Sep 17 00:00:00 2001
From: Baptiste Mathus
Date: Wed, 26 Nov 2014 10:35:45 +0100
Subject: [PATCH 0018/3617] Setting LC_MESSAGES: prevent unparseable messages
This locale variable defines how tools should display their messages.
This is for example gonna change the yum message from "Nothing to do" to
"Rien a faire" in my case (french).
As the yum module parses that string in err, if the message is not
enforced in english this is gonna fail.
So this commits just enriches a bit more the code that's already written
for that enforcement.
This commit fixes issue #9635.
---
lib/ansible/module_utils/basic.py | 1 +
lib/ansible/runner/shell_plugins/sh.py | 1 +
2 files changed, 2 insertions(+)
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index cee6510f34c0ac..761725cea0914e 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -772,6 +772,7 @@ def _check_locale(self):
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_CTYPE'] = 'C'
+ os.environ['LC_MESSAGES'] = 'C'
except Exception, e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py
index 95d48e9e7de168..27512b2c59c8a2 100644
--- a/lib/ansible/runner/shell_plugins/sh.py
+++ b/lib/ansible/runner/shell_plugins/sh.py
@@ -29,6 +29,7 @@ def env_prefix(self, **kwargs):
env = dict(
LANG = C.DEFAULT_MODULE_LANG,
LC_CTYPE = C.DEFAULT_MODULE_LANG,
+ LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
env.update(kwargs)
return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()])
From 4ecaa78c79bd919c7d3c6107025ebff0fc8ef123 Mon Sep 17 00:00:00 2001
From: Andrew Rothstein
Date: Fri, 28 Nov 2014 00:00:35 -0500
Subject: [PATCH 0019/3617] incorporated code review feedback
---
plugins/inventory/fleet.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/plugins/inventory/fleet.py b/plugins/inventory/fleet.py
index d6d7e4d2925bfe..3267aeb2ea5384 100755
--- a/plugins/inventory/fleet.py
+++ b/plugins/inventory/fleet.py
@@ -39,7 +39,7 @@
parser = OptionParser(usage="%prog [options] --list | --host ")
parser.add_option('--list', default=False, dest="list", action="store_true",
- help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
+ help="Produce a JSON consumable grouping of servers in your fleet")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
@@ -48,9 +48,9 @@
# helper functions
#
-def get_ssh_config() :
+def get_ssh_config():
configs = []
- for box in list_running_boxes() :
+ for box in list_running_boxes():
config = get_a_ssh_config(box)
configs.append(config)
return configs
@@ -58,14 +58,14 @@ def get_ssh_config() :
#list all the running instances in the fleet
def list_running_boxes():
boxes = []
- for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n') :
+ for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
matcher = re.search("[^\s]+[\s]+([^\s]+).+", line)
if matcher and matcher.group(1) != "IP":
boxes.append(matcher.group(1))
return boxes
-def get_a_ssh_config(box_name) :
+def get_a_ssh_config(box_name):
config = {}
config['Host'] = box_name
config['ansible_ssh_user'] = 'core'
@@ -78,7 +78,7 @@ def get_a_ssh_config(box_name) :
ssh_config = get_ssh_config()
hosts = { 'coreos': []}
- for data in ssh_config :
+ for data in ssh_config:
hosts['coreos'].append(data['Host'])
print json.dumps(hosts)
From 8146d1fff3a31cf8e801770d49ee1c24b7728806 Mon Sep 17 00:00:00 2001
From: Justin Wyer
Date: Mon, 1 Dec 2014 17:17:54 +0200
Subject: [PATCH 0020/3617] /sys/block/sdX/queue/physical_block_size does not
correlate with /sys/block/sdX/size for advanced drives larger than 2TB,
/sys/block/sdX/queue/logical_block_size correlates with both see #9549
---
lib/ansible/module_utils/facts.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 5ceeb405d5503c..57476586aef9d0 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -791,7 +791,7 @@ def get_device_facts(self):
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
- part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size")
+ part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
@@ -808,7 +808,7 @@ def get_device_facts(self):
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
- d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size")
+ d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
From 19d40cc54ce65b346901e4f040ec9007a57b3fb7 Mon Sep 17 00:00:00 2001
From: Sebastien Goasguen
Date: Wed, 10 Dec 2014 11:26:21 -0500
Subject: [PATCH 0021/3617] Add tags for inventory
---
plugins/inventory/apache-libcloud.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/plugins/inventory/apache-libcloud.py b/plugins/inventory/apache-libcloud.py
index 95804095da90d0..151daeefe08973 100755
--- a/plugins/inventory/apache-libcloud.py
+++ b/plugins/inventory/apache-libcloud.py
@@ -222,12 +222,17 @@ def add_node(self, node):
self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest)
'''
# Inventory: Group by key pair
- if node.extra['keyname']:
- self.push(self.inventory, self.to_safe('key_' + node.extra['keyname']), dest)
+ if node.extra['key_name']:
+ self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest)
# Inventory: Group by security group, quick thing to handle single sg
- if node.extra['securitygroup']:
- self.push(self.inventory, self.to_safe('sg_' + node.extra['securitygroup'][0]), dest)
+ if node.extra['security_group']:
+ self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest)
+
+ # Inventory: Group by tag
+ if node.extra['tags']:
+ for tagkey in node.extra['tags'].keys():
+ self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest)
def get_host_info(self):
'''
From fce04b1eba5343f0b23c50af24404a2826591345 Mon Sep 17 00:00:00 2001
From: "Federico G. Schwindt"
Date: Sun, 14 Dec 2014 22:39:17 +0000
Subject: [PATCH 0022/3617] Use command= when we intended to
While here sort register variables and add a comment to signal multiline
testing.
---
.../roles/test_command_shell/tasks/main.yml | 28 ++++++++++---------
1 file changed, 15 insertions(+), 13 deletions(-)
diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml
index b331452b7c63a3..877eb11cd6d171 100644
--- a/test/integration/roles/test_command_shell/tasks/main.yml
+++ b/test/integration/roles/test_command_shell/tasks/main.yml
@@ -82,7 +82,7 @@
file: path={{output_dir_test}}/afile.txt state=absent
- name: create afile.txt with create_afile.sh via command
- shell: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.txt"
+ command: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.txt"
- name: verify that afile.txt is present
file: path={{output_dir_test}}/afile.txt state=file
@@ -90,7 +90,7 @@
# removes
- name: remove afile.txt with remote_afile.sh via command
- shell: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt"
+ command: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt"
- name: verify that afile.txt is absent
file: path={{output_dir_test}}/afile.txt state=absent
@@ -161,21 +161,23 @@
- name: remove afile.txt using rm
shell: rm {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt
- register: shell_result4
+ register: shell_result3
- name: assert that using rm under shell causes a warning
assert:
that:
- - "shell_result4.warnings"
+ - "shell_result3.warnings"
- name: verify that afile.txt is absent
file: path={{output_dir_test}}/afile.txt state=absent
- register: shell_result5
+ register: shell_result4
- name: assert that the file was removed by the shell
assert:
that:
- - "shell_result5.changed == False"
+ - "shell_result4.changed == False"
+
+# multiline
- name: execute a shell command using a literal multiline block
args:
@@ -189,28 +191,28 @@
| tr -s ' ' \
| cut -f1 -d ' '
echo "this is a second line"
- register: shell_result6
+ register: shell_result5
-- debug: var=shell_result6
+- debug: var=shell_result5
- name: assert the multiline shell command ran as expected
assert:
that:
- - "shell_result6.changed"
- - "shell_result6.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'"
+ - "shell_result5.changed"
+ - "shell_result5.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'"
- name: execute a shell command using a literal multiline block with arguments in it
shell: |
executable=/bin/bash
creates={{output_dir_test | expanduser}}/afile.txt
echo "test"
- register: shell_result7
+ register: shell_result6
- name: assert the multiline shell command with arguments in it run as expected
assert:
that:
- - "shell_result7.changed"
- - "shell_result7.stdout == 'test'"
+ - "shell_result6.changed"
+ - "shell_result6.stdout == 'test'"
- name: remove the previously created file
file: path={{output_dir_test}}/afile.txt state=absent
From 91a73cff81476873d73f112406a1c6dae6793c6f Mon Sep 17 00:00:00 2001
From: "Federico G. Schwindt"
Date: Sun, 14 Dec 2014 22:40:04 +0000
Subject: [PATCH 0023/3617] Add tests for globbing support
---
.../roles/test_command_shell/tasks/main.yml | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml
index 877eb11cd6d171..325e76cffea4e1 100644
--- a/test/integration/roles/test_command_shell/tasks/main.yml
+++ b/test/integration/roles/test_command_shell/tasks/main.yml
@@ -87,6 +87,15 @@
- name: verify that afile.txt is present
file: path={{output_dir_test}}/afile.txt state=file
+- name: re-run previous command using creates with globbing
+ command: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.*"
+ register: command_result3
+
+- name: assert that creates with globbing is working
+ assert:
+ that:
+ - "command_result3.changed != True"
+
# removes
- name: remove afile.txt with remote_afile.sh via command
@@ -94,12 +103,15 @@
- name: verify that afile.txt is absent
file: path={{output_dir_test}}/afile.txt state=absent
- register: command_result3
-- name: assert that the file was removed by the script
+- name: re-run previous command using removes with globbing
+ command: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.*"
+ register: command_result4
+
+- name: assert that removes with globbing is working
assert:
that:
- - "command_result3.changed != True"
+ - "command_result4.changed != True"
##
## shell
From 9639f1d8e7b4a756b7343cebd37b015b67a2418f Mon Sep 17 00:00:00 2001
From: axos88
Date: Thu, 18 Dec 2014 12:52:15 +0100
Subject: [PATCH 0024/3617] Make issue rypes as an enumeration
Easier to copy&paste, and delete all except the correct line.
---
ISSUE_TEMPLATE.md | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md
index 8ce40348ca1e8a..511760de2658ed 100644
--- a/ISSUE_TEMPLATE.md
+++ b/ISSUE_TEMPLATE.md
@@ -1,6 +1,13 @@
##### Issue Type:
-Can you help us out in labelling this by telling us what kind of ticket this this? You can say “Bug Report”, “Feature Idea”, “Feature Pull Request”, “New Module Pull Request”, “Bugfix Pull Request”, “Documentation Report”, or “Docs Pull Request”.
+Can you help us out in labelling this by telling us what kind of ticket this this? You can say:
+ - Bug Report
+ - Feature Idea
+ - Feature Pull Request
+ - New Module Pull Request
+ - Bugfix Pull Request
+ - Documentation Report
+ - Docs Pull Request
##### Ansible Version:
From 17498b58bb85b18368ede4372093297de740eab6 Mon Sep 17 00:00:00 2001
From: Mick Bass
Date: Thu, 25 Dec 2014 13:31:34 -0700
Subject: [PATCH 0025/3617] Add support for AWS Security Token Service
(temporary credentials) to all AWS cloud modules.
---
lib/ansible/module_utils/ec2.py | 28 ++++++++-------
.../utils/module_docs_fragments/aws.py | 34 ++++++++++---------
2 files changed, 33 insertions(+), 29 deletions(-)
diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py
index 0f08fead18021a..c7bad2970b6522 100644
--- a/lib/ansible/module_utils/ec2.py
+++ b/lib/ansible/module_utils/ec2.py
@@ -54,7 +54,7 @@ def aws_common_argument_spec():
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
- security_token=dict(no_log=True),
+ security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
@@ -87,38 +87,38 @@ def get_aws_connection_info(module):
validate_certs = module.params.get('validate_certs')
if not ec2_url:
- if 'EC2_URL' in os.environ:
- ec2_url = os.environ['EC2_URL']
- elif 'AWS_URL' in os.environ:
+ if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
+ elif 'EC2_URL' in os.environ:
+ ec2_url = os.environ['EC2_URL']
if not access_key:
- if 'EC2_ACCESS_KEY' in os.environ:
- access_key = os.environ['EC2_ACCESS_KEY']
- elif 'AWS_ACCESS_KEY_ID' in os.environ:
+ if 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
+ elif 'EC2_ACCESS_KEY' in os.environ:
+ access_key = os.environ['EC2_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
- if 'EC2_SECRET_KEY' in os.environ:
- secret_key = os.environ['EC2_SECRET_KEY']
- elif 'AWS_SECRET_ACCESS_KEY' in os.environ:
+ if 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
+ elif 'EC2_SECRET_KEY' in os.environ:
+ secret_key = os.environ['EC2_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
- if 'EC2_REGION' in os.environ:
- region = os.environ['EC2_REGION']
- elif 'AWS_REGION' in os.environ:
+ if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
+ elif 'EC2_REGION' in os.environ:
+ region = os.environ['EC2_REGION']
else:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
@@ -128,6 +128,8 @@ def get_aws_connection_info(module):
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
+ elif 'EC2_SECURITY_TOKEN' in os.environ:
+ security_token = os.environ['EC2_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
diff --git a/lib/ansible/utils/module_docs_fragments/aws.py b/lib/ansible/utils/module_docs_fragments/aws.py
index 9bbe84a1355768..981eb8e105038b 100644
--- a/lib/ansible/utils/module_docs_fragments/aws.py
+++ b/lib/ansible/utils/module_docs_fragments/aws.py
@@ -23,22 +23,29 @@ class ModuleDocFragment(object):
options:
ec2_url:
description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
+ - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
required: false
default: null
aliases: []
aws_secret_key:
description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
+ - AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
+ - AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
+ security_token:
+ description:
+ - AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
+ required: false
+ default: null
+ aliases: [ 'access_token' ]
+ version_added: "1.6"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
@@ -54,23 +61,18 @@ class ModuleDocFragment(object):
default: null
aliases: []
version_added: "1.6"
- security_token:
- description:
- - security token to authenticate against AWS
- required: false
- default: null
- aliases: []
- version_added: "1.6"
requirements:
- boto
notes:
- - The following environment variables can be used C(AWS_ACCESS_KEY) or
- C(EC2_ACCESS_KEY) or C(AWS_ACCESS_KEY_ID),
- C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY) or C(AWS_SECRET_ACCESS_KEY),
- C(AWS_REGION) or C(EC2_REGION), C(AWS_SECURITY_TOKEN)
+ - If parameters are not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(AWS_URL) or C(EC2_URL),
+ C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
+ C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
+ C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
+ C(AWS_REGION) or C(EC2_REGION)
- Ansible uses the boto configuration file (typically ~/.boto) if no
credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
- AWS region, when required, but
- this can also be configured in the boto config file
+ AWS region, when required, but this can also be configured in the boto config file
"""
From 64141dd78987d19b5b72330c0c456d76e31d609f Mon Sep 17 00:00:00 2001
From: John Barker
Date: Wed, 31 Dec 2014 22:06:15 +0000
Subject: [PATCH 0026/3617] Correct URL to github so links work when testing
locally
---
docsite/rst/community.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst
index 4d2de28ce16d14..c4c9f52b2edf98 100644
--- a/docsite/rst/community.rst
+++ b/docsite/rst/community.rst
@@ -66,7 +66,7 @@ Bugs related to the core language should be reported to `github.com/ansible/ansi
signing up for a free github account. Before reporting a bug, please use the bug/issue search
to see if the issue has already been reported.
-MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module.
+MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module.
When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against.
From 54f1eebde855d5ee14b97d0cd91ed1b3b54fe49a Mon Sep 17 00:00:00 2001
From: John Barker
Date: Thu, 1 Jan 2015 14:13:59 +0000
Subject: [PATCH 0027/3617] Strip formatting from lists of modules
---
hacking/module_formatter.py | 21 ++++++++++++++++++++-
1 file changed, 20 insertions(+), 1 deletion(-)
diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py
index 0a7d1c884ca200..26e403e8659394 100755
--- a/hacking/module_formatter.py
+++ b/hacking/module_formatter.py
@@ -88,6 +88,24 @@ def html_ify(text):
return t
+#####################################################################################
+
+def strip_formatting(text):
+ ''' Strips formatting
+ In lists of modules, etc, we don't want certain words to be formatted
+ Also due to a bug in RST, you can not easily nest formatting
+ #http://docutils.sourceforge.net/FAQ.html#is-nested-inline-markup-possible
+ '''
+
+ t = cgi.escape(text)
+ t = _ITALIC.sub(r"\1", t)
+ t = _BOLD.sub(r"\1", t)
+ t = _MODULE.sub(r"\1", t)
+ t = _URL.sub(r"\1", t)
+ t = _CONST.sub(r"\1", t)
+
+ return t
+
#####################################################################################
@@ -310,7 +328,8 @@ def print_modules(module, category_file, deprecated, core, options, env, templat
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result != "SKIPPED":
- category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module))
+ # Some of the module descriptions have formatting in them, this is noisy in lists, so remove it
+ category_file.write(" %s - %s <%s_module>\n" % (modstring, strip_formatting(result), module))
def process_category(category, categories, options, env, template, outputname):
From dc6e8bff34e1305a79febca44722c4345512d6ad Mon Sep 17 00:00:00 2001
From: John Barker
Date: Sat, 3 Jan 2015 11:42:44 +0000
Subject: [PATCH 0028/3617] Fix some mistakes in CHANELOG.md
---
CHANGELOG.md | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a989cdcd4465bf..70e1c8dc9b0b27 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -97,7 +97,7 @@ And various other bug fixes and improvements ...
- Fixes a bug in vault where the password file option was not being used correctly internally.
- Improved multi-line parsing when using YAML literal blocks (using > or |).
- Fixed a bug with the file module and the creation of relative symlinks.
-- Fixed a bug where checkmode was not being honored during the templating of files.
+- Fixed a bug where checkmode was not being honoured during the templating of files.
- Other various bug fixes.
## 1.7.1 "Summer Nights" - Aug 14, 2014
@@ -140,7 +140,7 @@ New Modules:
Other notable changes:
* Security fixes
- - Prevent the use of lookups when using legaxy "{{ }}" syntax around variables and with_* loops.
+ - Prevent the use of lookups when using legacy "{{ }}" syntax around variables and with_* loops.
- Remove relative paths in TAR-archived file names used by ansible-galaxy.
* Inventory speed improvements for very large inventories.
* Vault password files can now be executable, to support scripts that fetch the vault password.
@@ -319,7 +319,7 @@ Major features/changes:
* ec2 module now accepts 'exact_count' and 'count_tag' as a way to enforce a running number of nodes by tags.
* all ec2 modules that work with Eucalyptus also now support a 'validate_certs' option, which can be set to 'off' for installations using self-signed certs.
* Start of new integration test infrastructure (WIP, more details TBD)
-* if repoquery is unavailble, the yum module will automatically attempt to install yum-utils
+* if repoquery is unavailable, the yum module will automatically attempt to install yum-utils
* ansible-vault: a framework for encrypting your playbooks and variable files
* added support for privilege escalation via 'su' into bin/ansible and bin/ansible-playbook and associated keywords 'su', 'su_user', 'su_pass' for tasks/plays
@@ -782,7 +782,7 @@ Bugfixes and Misc Changes:
* misc fixes to the Riak module
* make template module slightly more efficient
* base64encode / decode filters are now available to templates
-* libvirt module can now work with multiple different libvirt connecton URIs
+* libvirt module can now work with multiple different libvirt connection URIs
* fix for postgresql password escaping
* unicode fix for shlex.split in some cases
* apt module upgrade logic improved
@@ -817,7 +817,7 @@ the variable is still registered for the host, with the attribute skipped: True.
* service pattern argument now correctly read for BSD services
* fetch location can now be controlled more directly via the 'flat' parameter.
* added basename and dirname as Jinja2 filters available to all templates
-* pip works better when sudoing from unpriveledged users
+* pip works better when sudoing from unprivileged users
* fix for user creation with groups specification reporting 'changed' incorrectly in some cases
* fix for some unicode encoding errors in outputing some data in verbose mode
* improved FreeBSD, NetBSD and Solaris facts
From 64e61197f970f1602243f84cbfe9da2761b46a7c Mon Sep 17 00:00:00 2001
From: John Barker
Date: Mon, 5 Jan 2015 20:57:05 +0000
Subject: [PATCH 0029/3617] Revert accidental changes
---
hacking/module_formatter.py | 21 +--------------------
1 file changed, 1 insertion(+), 20 deletions(-)
diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py
index 26e403e8659394..0a7d1c884ca200 100755
--- a/hacking/module_formatter.py
+++ b/hacking/module_formatter.py
@@ -88,24 +88,6 @@ def html_ify(text):
return t
-#####################################################################################
-
-def strip_formatting(text):
- ''' Strips formatting
- In lists of modules, etc, we don't want certain words to be formatted
- Also due to a bug in RST, you can not easily nest formatting
- #http://docutils.sourceforge.net/FAQ.html#is-nested-inline-markup-possible
- '''
-
- t = cgi.escape(text)
- t = _ITALIC.sub(r"\1", t)
- t = _BOLD.sub(r"\1", t)
- t = _MODULE.sub(r"\1", t)
- t = _URL.sub(r"\1", t)
- t = _CONST.sub(r"\1", t)
-
- return t
-
#####################################################################################
@@ -328,8 +310,7 @@ def print_modules(module, category_file, deprecated, core, options, env, templat
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result != "SKIPPED":
- # Some of the module descriptions have formatting in them, this is noisy in lists, so remove it
- category_file.write(" %s - %s <%s_module>\n" % (modstring, strip_formatting(result), module))
+ category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module))
def process_category(category, categories, options, env, template, outputname):
From e213fdb15dfc6964705c0b5d1567cd0872a26497 Mon Sep 17 00:00:00 2001
From: volanja
Date: Fri, 9 Jan 2015 01:24:41 +0900
Subject: [PATCH 0030/3617] to replace `running` with `started`
---
docsite/rst/test_strategies.rst | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docsite/rst/test_strategies.rst b/docsite/rst/test_strategies.rst
index a3abf160906bef..be1b80550d8c78 100644
--- a/docsite/rst/test_strategies.rst
+++ b/docsite/rst/test_strategies.rst
@@ -19,16 +19,16 @@ also very easy to run the steps on the localhost or testing servers. Ansible let
The Right Level of Testing
``````````````````````````
-Ansible resources are models of desired-state. As such, it should not be necessary to test that services are running, packages are
+Ansible resources are models of desired-state. As such, it should not be necessary to test that services are started, packages are
installed, or other such things. Ansible is the system that will ensure these things are declaratively true. Instead, assert these
things in your playbooks.
.. code-block:: yaml
tasks:
- - service: name=foo state=running enabled=yes
+ - service: name=foo state=started enabled=yes
-If you think the service may not be running, the best thing to do is request it to be running. If the service fails to start, Ansible
+If you think the service may not be started, the best thing to do is request it to be started. If the service fails to start, Ansible
will yell appropriately. (This should not be confused with whether the service is doing something functional, which we'll show more about how to
do later).
From e2ce673b1ab4d607e327ad87e6d67620699b94ef Mon Sep 17 00:00:00 2001
From: James Martin
Date: Tue, 27 Jan 2015 12:46:22 -0500
Subject: [PATCH 0031/3617] Properly empties ASG before terminating it, and
waits for ASG to be deleted.
Updated to support wait_for_instances and replace_all_instances.
---
test/integration/cleanup_ec2.py | 34 +++-
.../roles/test_ec2_asg/tasks/main.yml | 190 +++++++++++++++++-
2 files changed, 215 insertions(+), 9 deletions(-)
diff --git a/test/integration/cleanup_ec2.py b/test/integration/cleanup_ec2.py
index e4241b0d7dc35d..1935f0bdc18e5e 100644
--- a/test/integration/cleanup_ec2.py
+++ b/test/integration/cleanup_ec2.py
@@ -12,6 +12,7 @@
import yaml
import os.path
import boto.ec2.elb
+import time
def delete_aws_resources(get_func, attr, opts):
for item in get_func():
@@ -19,6 +20,37 @@ def delete_aws_resources(get_func, attr, opts):
if re.search(opts.match_re, val):
prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes)
+def delete_autoscaling_group(get_func, attr, opts):
+ assumeyes = opts.assumeyes
+ group_name = None
+ for item in get_func():
+ group_name = getattr(item, attr)
+ if re.search(opts.match_re, group_name):
+ if not opts.assumeyes:
+ assumeyes = raw_input("Delete matching %s? [y/n]: " % (item).lower()) == 'y'
+ break
+ if assumeyes and group_name:
+ groups = asg.get_all_groups(names=[group_name])
+ if groups:
+ group = groups[0]
+ group.max_size = 0
+ group.min_size = 0
+ group.desired_capacity = 0
+ group.update()
+ instances = True
+ while instances:
+ tmp_groups = asg.get_all_groups(names=[group_name])
+ if tmp_groups:
+ tmp_group = tmp_groups[0]
+ if not tmp_group.instances:
+ instances = False
+ time.sleep(10)
+
+ group.delete()
+ while len(asg.get_all_groups(names=[group_name])):
+ time.sleep(5)
+ print ("Terminated ASG: %s" % group_name)
+
def delete_aws_eips(get_func, attr, opts):
# the file might not be there if the integration test wasn't run
@@ -128,7 +160,7 @@ def parse_args():
delete_aws_resources(aws.get_all_security_groups, 'name', opts)
# Delete matching ASGs
- delete_aws_resources(asg.get_all_groups, 'name', opts)
+ delete_autoscaling_group(asg.get_all_groups, 'name', opts)
# Delete matching launch configs
delete_aws_resources(asg.get_all_launch_configurations, 'name', opts)
diff --git a/test/integration/roles/test_ec2_asg/tasks/main.yml b/test/integration/roles/test_ec2_asg/tasks/main.yml
index 6c670375d9418f..091eb2ab2b3b40 100644
--- a/test/integration/roles/test_ec2_asg/tasks/main.yml
+++ b/test/integration/roles/test_ec2_asg/tasks/main.yml
@@ -1,31 +1,69 @@
---
# tasks file for test_ec2_asg
+# we are using a custom built AMI that runs an apache server to verify
+# ELB health checks and perform rolling ASG updates
+# this will only work on us-east-1
+
# ============================================================
# create and kill an ASG
-- name: lookup ami id
- ec2_ami_search: distro=ubuntu region={{ ec2_region }} release=trusty
- register: ubuntu_image
- name: ensure launch config exists
ec2_lc:
name: "{{ resource_prefix }}-lc"
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
region: "{{ ec2_region }}"
- image_id: "{{ ubuntu_image.ami }}"
- instance_type: t1.micro
-- name: launch asg
+ image_id: ami-964a0efe
+ instance_type: t2.micro
+
+- name: launch asg and wait for instances to be deemed healthy (no ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ region: "{{ ec2_region }}"
+ state: present
+ wait_for_instances: yes
+ register: output
+
+- assert:
+ that:
+ - "output.viable_instances == 1"
+
+# - name: pause for a bit to make sure that the group can't be trivially deleted
+# pause: seconds=30
+- name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ region: "{{ ec2_region }}"
+ state: absent
+ async: 300
+
+
+- name: launch asg and do not wait for instances to be deemed healthy (no ELB)
ec2_asg:
name: "{{ resource_prefix }}-asg"
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
launch_config_name: "{{ resource_prefix }}-lc"
+ desired_capacity: 1
min_size: 1
max_size: 1
region: "{{ ec2_region }}"
+ wait_for_instances: no
state: present
-- name: pause for a bit to make sure that the group can't be trivially deleted
- pause: seconds=30
+ register: output
+
+- assert:
+ that:
+ - "output.viable_instances == 0"
+
- name: kill asg
ec2_asg:
name: "{{ resource_prefix }}-asg"
@@ -34,3 +72,139 @@
region: "{{ ec2_region }}"
state: absent
async: 300
+
+- name: launch load balancer
+ ec2_elb_lb:
+ name: "{{ resource_prefix }}-lb"
+ region: "{{ ec2_region }}"
+ state: present
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ zones:
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ connection_draining_timeout: 60
+ listeners:
+ - protocol: http
+ load_balancer_port: 80
+ instance_port: 80
+ health_check:
+ ping_protocol: http
+ ping_port: 80
+ ping_path: "/"
+ response_timeout: 5
+ interval: 30
+ unhealthy_threshold: 3
+ healthy_threshold: 3
+ register: load_balancer
+
+
+- name: launch asg and wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ availability_zones:
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: ELB
+ desired_capacity: 1
+ min_size: 1
+ max_size: 1
+ health_check_period: 120
+ load_balancers: "{{ resource_prefix }}-lb"
+ region: "{{ ec2_region }}"
+ wait_for_instances: yes
+ wait_timeout: 600
+ state: present
+ register: output
+
+- assert:
+ that:
+ - "output.viable_instances == 1"
+
+
+# grow scaling group to 3
+
+- name: add 2 more instances wait for instances to be deemed healthy (ELB)
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ availability_zones:
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ launch_config_name: "{{ resource_prefix }}-lc"
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 3
+ max_size: 5
+ health_check_period: 120
+ load_balancers: ec2-asg-int-test
+ region: "{{ ec2_region }}"
+ wait_for_instances: yes
+ wait_timeout: 600
+ state: present
+ register: output
+
+- assert:
+ that:
+ - "output.viable_instances == 3"
+
+# # create new launch config with alternate AMI
+
+- name: ensure launch config exists
+ ec2_lc:
+ name: "{{ resource_prefix }}-lc-2"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ region: "{{ ec2_region }}"
+ image_id: ami-2a4a0e42
+ instance_type: t2.micro
+
+
+# # perform rolling replace
+
+- name: perform rolling update to new AMI
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ availability_zones:
+ - "{{ ec2_region }}b"
+ - "{{ ec2_region }}c"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ launch_config_name: "{{ resource_prefix }}-lc-2"
+ health_check_type: ELB
+ desired_capacity: 3
+ min_size: 3
+ max_size: 5
+ health_check_period: 120
+ load_balancers: ec2-asg-int-test
+ region: "{{ ec2_region }}"
+ wait_for_instances: yes
+ replace_all_instances: yes
+ wait_timeout: 600
+ state: present
+ register: output
+
+# ensure that all instances have new launch config
+- assert:
+ that:
+ - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'"
+ with_dict: output.instance_facts
+
+# assert they are all healthy
+- assert:
+ that:
+ - "output.viable_instances >= 3"
+
+
+- name: kill asg
+ ec2_asg:
+ name: "{{ resource_prefix }}-asg"
+ ec2_access_key: "{{ ec2_access_key }}"
+ ec2_secret_key: "{{ ec2_secret_key }}"
+ region: "{{ ec2_region }}"
+ state: absent
+ async: 300
\ No newline at end of file
From 440df12f2ca970e91442b23bf80fced806aecb32 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 30 Jan 2015 15:25:04 -0500
Subject: [PATCH 0032/3617] added retry configs to v2, pending is actual
functionality
---
v2/ansible/constants.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py
index 1c2bc092b23cbc..4b51f1f1b1d1d2 100644
--- a/v2/ansible/constants.py
+++ b/v2/ansible/constants.py
@@ -167,6 +167,8 @@ def shell_expand_path(path):
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
+RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
+RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
From 1e787bd91e663d6fa291290dc83482ee3133429a Mon Sep 17 00:00:00 2001
From: Adam Miller
Date: Wed, 4 Feb 2015 09:49:51 -0600
Subject: [PATCH 0033/3617] Add intro to playbook docs using YAML dictionaries
---
docsite/rst/playbooks_intro.rst | 37 +++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst
index ecf8d46de1eae8..4adb5d53a6878b 100644
--- a/docsite/rst/playbooks_intro.rst
+++ b/docsite/rst/playbooks_intro.rst
@@ -73,6 +73,43 @@ For starters, here's a playbook that contains just one play::
- name: restart apache
service: name=httpd state=restarted
+We can also break task items out over multiple lines using the YAML dictionary
+types to supply module arguments. This can be helpful when working with tasks
+that have really long parameters or modules that take many parameters to keep
+them well structured. Below is another version of the above example but using
+YAML dictionaries to supply the modules with their key=value arguments.::
+
+ ---
+ - hosts: webservers
+ vars:
+ http_port: 80
+ max_clients: 200
+ remote_user: root
+ tasks:
+ - name: ensure apache is at the latest version
+ yum:
+ pkg: httpd
+ state: latest
+ - name: write the apache config file
+ template:
+ src: /srv/httpd.j2
+ dest: /etc/httpd.conf
+ notify:
+ - restart apache
+ - name: ensure apache is running
+ service:
+ name: httpd
+ state: started
+ handlers:
+ - name: restart apache
+ service:
+ name: httpd
+ state: restarted
+
+.. note::
+
+ The above example using YAML dictionaries for module arguments can also be accomplished using the YAML multiline string syntax with the `>` character but this can lead to string quoting errors.
+
Below, we'll break down what the various features of the playbook language are.
.. _playbook_basics:
From 4c661e2b93ad9a7b51de196287b9da7c6b7467d6 Mon Sep 17 00:00:00 2001
From: pdelared
Date: Tue, 10 Feb 2015 17:33:29 +0100
Subject: [PATCH 0034/3617] Update facts.py
Added support for HPUX network fact
---
lib/ansible/module_utils/facts.py | 51 +++++++++++++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 6d602af7366eca..323c0c0d0591db 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -2048,6 +2048,57 @@ def merge_default_interface(self, defaults, interfaces, ip_type):
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
+class HPUX(Network):
+ """
+ HP-UX-specifig subclass of Network. Defines networking facts:
+ - default_interface
+ - interfaces (a list of interface names)
+ - interface_ dictionary of ipv4 address information.
+ """
+ platform = 'HP-UX'
+
+ def __init__(self, module):
+ Network.__init__(self, module)
+
+ def populate(self):
+ netstat_path = self.module.get_bin_path('netstat')
+ if netstat_path is None:
+ return self.facts
+ self.get_default_interfaces()
+ interfaces = self.get_interfaces_info()
+ self.facts['interfaces'] = interfaces.keys()
+ for iface in interfaces:
+ self.facts[iface] = interfaces[iface]
+ return self.facts
+
+ def get_default_interfaces(self):
+ rc, out, err = module.run_command("/usr/bin/netstat -nr", use_unsafe_shell=True)
+ lines = out.split('\n')
+ for line in lines:
+ words = line.split()
+ if len(words) > 1:
+ if words[0] == 'default':
+ self.facts['default_interface'] = words[4]
+ self.facts['default_gateway'] = words[1]
+
+ def get_interfaces_info(self):
+ interfaces = {}
+ rc, out, err = module.run_command("/usr/bin/netstat -ni", use_unsafe_shell=True)
+ lines = out.split('\n')
+ for line in lines:
+ words = line.split()
+ for i in range(len(words) - 1):
+ if words[i][:3] == 'lan':
+ device = words[i]
+ interfaces[device] = { 'device': device }
+ address = words[i+3]
+ interfaces[device]['ipv4'] = { 'address': address }
+ network = words[i+2]
+ interfaces[device]['ipv4'] = { 'network': network,
+ 'interface': device,
+ 'address': address }
+ return interfaces
+
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the Mac OS X/Darwin Network Class.
From 4e4bdaad8d500c1c8168a8606e7284a65685367a Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Fri, 13 Feb 2015 10:40:50 -0500
Subject: [PATCH 0035/3617] Remove auth_token parameter
It turns out that this can actually already be handled by the existing
auth plugin framework and does not need its own parameter. Remove before
it sees usage and causes confusion.
---
lib/ansible/module_utils/openstack.py | 5 -----
1 file changed, 5 deletions(-)
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index 5c4503f94cecba..90415cadabbdb6 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -75,7 +75,6 @@ def openstack_full_argument_spec(**kwargs):
cloud=dict(default=None),
auth_plugin=dict(default=None),
auth=dict(default=None),
- auth_token=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
@@ -94,10 +93,6 @@ def openstack_module_kwargs(**kwargs):
required_one_of=[
['cloud', 'auth'],
],
- mutually_exclusive=[
- ['auth', 'auth_token'],
- ['auth_plugin', 'auth_token'],
- ],
)
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
From d06a277b50503e8d142d12ec356a6e0383d22cd7 Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Fri, 13 Feb 2015 10:41:58 -0500
Subject: [PATCH 0036/3617] Port openstack module_utils changes to v2 branch
---
v2/ansible/module_utils/openstack.py | 35 ++++++++++++++++++++++++++++
1 file changed, 35 insertions(+)
diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py
index 64f95437143527..90415cadabbdb6 100644
--- a/v2/ansible/module_utils/openstack.py
+++ b/v2/ansible/module_utils/openstack.py
@@ -30,6 +30,9 @@
def openstack_argument_spec():
+ # DEPRECATED: This argument spec is only used for the deprecated old
+ # OpenStack modules. It turns out that modern OpenStack auth is WAY
+ # more complex than this.
# Consume standard OpenStack environment variables.
# This is mainly only useful for ad-hoc command line operation as
# in playbooks one would assume variables would be used appropriately
@@ -67,3 +70,35 @@ def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
ret.append(interface_spec['addr'])
return ret
+def openstack_full_argument_spec(**kwargs):
+ spec = dict(
+ cloud=dict(default=None),
+ auth_plugin=dict(default=None),
+ auth=dict(default=None),
+ region_name=dict(default=None),
+ availability_zone=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ wait=dict(default=True, type='bool'),
+ timeout=dict(default=180, type='int'),
+ endpoint_type=dict(
+ default='publicURL', choices=['publicURL', 'internalURL']
+ )
+ )
+ spec.update(kwargs)
+ return spec
+
+
+def openstack_module_kwargs(**kwargs):
+ ret = dict(
+ required_one_of=[
+ ['cloud', 'auth'],
+ ],
+ )
+ for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
+ if key in kwargs:
+ if key in ret:
+ ret[key].extend(kwargs[key])
+ else:
+ ret[key] = kwargs[key]
+
+ return ret
From 7044b5a8d1679b603a4b967dfbe34e60fbc7e444 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 19 Feb 2015 08:29:53 -0500
Subject: [PATCH 0037/3617] removed bare variable detection as this confuses
people and forced us to allow for bare expressions
---
lib/ansible/playbook/task.py | 5 -----
1 file changed, 5 deletions(-)
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index bdffba5527c43a..7645450ee159ba 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -86,11 +86,6 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No
elif x.startswith("with_"):
if isinstance(ds[x], basestring):
param = ds[x].strip()
- # Only a variable, no logic
- if (param.startswith('{{') and
- param.find('}}') == len(ds[x]) - 2 and
- param.find('|') == -1):
- utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.")
plugin_name = x.replace("with_","")
if plugin_name in utils.plugins.lookup_loader:
From c4144b1391b7df465160e8d5f365bbd163761b37 Mon Sep 17 00:00:00 2001
From: David Mahler
Date: Sun, 22 Feb 2015 19:28:16 +0000
Subject: [PATCH 0038/3617] Some grammatical updates
---
docsite/rst/intro.rst | 2 +-
docsite/rst/intro_getting_started.rst | 33 +++++++++++++--------------
2 files changed, 17 insertions(+), 18 deletions(-)
diff --git a/docsite/rst/intro.rst b/docsite/rst/intro.rst
index 9b30a18bbb7d9b..7976462383c4c0 100644
--- a/docsite/rst/intro.rst
+++ b/docsite/rst/intro.rst
@@ -1,7 +1,7 @@
Introduction
============
-Before we dive into the really fun parts -- playbooks, configuration management, deployment, and orchestration, we'll learn how to get Ansible installed and some basic concepts. We'll go over how to execute ad-hoc commands in parallel across your nodes using /usr/bin/ansible. We'll also see what sort of modules are available in Ansible's core (though you can also write your own, which we'll also show later).
+Before we dive into the really fun parts -- playbooks, configuration management, deployment, and orchestration, we'll learn how to get Ansible installed and cover some basic concepts. We'll also go over how to execute ad-hoc commands in parallel across your nodes using /usr/bin/ansible. Additionally, we'll see what sort of modules are available in Ansible's core (though you can also write your own, which is also covered later).
.. toctree::
:maxdepth: 1
diff --git a/docsite/rst/intro_getting_started.rst b/docsite/rst/intro_getting_started.rst
index 67136036479b5f..c1cd5571e6d6f4 100644
--- a/docsite/rst/intro_getting_started.rst
+++ b/docsite/rst/intro_getting_started.rst
@@ -11,10 +11,10 @@ Foreword
Now that you've read :doc:`intro_installation` and installed Ansible, it's time to dig in and get
started with some commands.
-What we are showing first are not the powerful configuration/deployment/orchestration of Ansible, called playbooks.
-Playbooks are covered in a separate section.
+What we are showing first are not the powerful configuration/deployment/orchestration features of Ansible.
+These features are handled by playbooks which are covered in a separate section.
-This section is about how to get going initially. Once you have these concepts down, read :doc:`intro_adhoc` for some more
+This section is about how to initially get going. Once you have these concepts down, read :doc:`intro_adhoc` for some more
detail, and then you'll be ready to dive into playbooks and explore the most interesting parts!
.. _remote_connection_information:
@@ -22,21 +22,20 @@ detail, and then you'll be ready to dive into playbooks and explore the most int
Remote Connection Information
`````````````````````````````
-Before we get started, it's important to understand how Ansible is communicating with remote
+Before we get started, it's important to understand how Ansible communicates with remote
machines over SSH.
By default, Ansible 1.3 and later will try to use native
-OpenSSH for remote communication when possible. This enables both ControlPersist (a performance feature), Kerberos, and options in ~/.ssh/config such as Jump Host setup. When using Enterprise Linux 6 operating systems as the control machine (Red Hat Enterprise Linux and derivatives such as CentOS), however, the version of OpenSSH may be too old to support ControlPersist. On these operating systems, Ansible will fallback into using a high-quality Python implementation of
+OpenSSH for remote communication when possible. This enables ControlPersist (a performance feature), Kerberos, and options in ~/.ssh/config such as Jump Host setup. However, when using Enterprise Linux 6 operating systems as the control machine (Red Hat Enterprise Linux and derivatives such as CentOS), the version of OpenSSH may be too old to support ControlPersist. On these operating systems, Ansible will fallback into using a high-quality Python implementation of
OpenSSH called 'paramiko'. If you wish to use features like Kerberized SSH and more, consider using Fedora, OS X, or Ubuntu as your control machine until a newer version of OpenSSH is available for your platform -- or engage 'accelerated mode' in Ansible. See :doc:`playbooks_acceleration`.
-In Ansible 1.2 and before, the default was strictly paramiko and native SSH had to be explicitly selected with -c ssh or set in the configuration file.
+In releases up to and including Ansible 1.2, the default was strictly paramiko. Native SSH had to be explicitly selected with the -c ssh option or set in the configuration file.
-Occasionally you'll encounter a device that doesn't do SFTP. This is rare, but if talking with some remote devices that don't support SFTP, you can switch to SCP mode in :doc:`intro_configuration`.
+Occasionally you'll encounter a device that doesn't support SFTP. This is rare, but should it occur, you can switch to SCP mode in :doc:`intro_configuration`.
-When speaking with remote machines, Ansible will by default assume you are using SSH keys -- which we encourage -- but passwords are fine too. To enable password auth, supply the option ``--ask-pass`` where needed. If using sudo features and when sudo requires a password, also supply ``--ask-sudo-pass`` as appropriate.
+When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-sudo-pass``.
-While it may be common sense, it is worth sharing: Any management system benefits from being run near the machines being managed. If running in a cloud, consider running Ansible from a machine inside that cloud. It will work better than on the open
-internet in most cases.
+While it may be common sense, it is worth sharing: Any management system benefits from being run near the machines being managed. If you are running Ansible in a cloud, consider running it from a machine inside that cloud. In most cases this will work better than on the open Internet.
As an advanced topic, Ansible doesn't just have to connect remotely over SSH. The transports are pluggable, and there are options for managing things locally, as well as managing chroot, lxc, and jail containers. A mode called 'ansible-pull' can also invert the system and have systems 'phone home' via scheduled git checkouts to pull configuration directives from a central repository.
@@ -47,8 +46,8 @@ Your first commands
Now that you've installed Ansible, it's time to get started with some basics.
-Edit (or create) /etc/ansible/hosts and put one or more remote systems in it, for
-which you have your SSH key in ``authorized_keys``::
+Edit (or create) /etc/ansible/hosts and put one or more remote systems in it. Your
+public SSH key should be located in ``authorized_keys`` on those systems::
192.168.1.50
aserver.example.org
@@ -95,9 +94,9 @@ Now run a live command on all of your nodes:
$ ansible all -a "/bin/echo hello"
-Congratulations. You've just contacted your nodes with Ansible. It's
-soon going to be time to read some of the more real-world :doc:`intro_adhoc`, and explore
-what you can do with different modules, as well as the Ansible
+Congratulations! You've just contacted your nodes with Ansible. It's
+soon going to be time to: read about some more real-world cases in :doc:`intro_adhoc`,
+explore what you can do with different modules, and to learn about the Ansible
:doc:`playbooks` language. Ansible is not just about running commands, it
also has powerful configuration management and deployment features. There's more to
explore, but you already have a fully working infrastructure!
@@ -111,7 +110,7 @@ Ansible 1.2.1 and later have host key checking enabled by default.
If a host is reinstalled and has a different key in 'known_hosts', this will result in an error message until corrected. If a host is not initially in 'known_hosts' this will result in prompting for confirmation of the key, which results in an interactive experience if using Ansible, from say, cron. You might not want this.
-If you wish to disable this behavior and understand the implications, you can do so by editing /etc/ansible/ansible.cfg or ~/.ansible.cfg::
+If you understand the implications and wish to disable this behavior, you can do so by editing /etc/ansible/ansible.cfg or ~/.ansible.cfg::
[defaults]
host_key_checking = False
@@ -126,7 +125,7 @@ Also note that host key checking in paramiko mode is reasonably slow, therefore
.. _a_note_about_logging:
-Ansible will log some information about module arguments on the remote system in the remote syslog, unless a task or play is marked with a "no_log: True" attribute, explained later.
+Ansible will log some information about module arguments on the remote system in the remote syslog, unless a task or play is marked with a "no_log: True" attribute. This is explained later.
To enable basic logging on the control machine see :doc:`intro_configuration` document and set the 'log_path' configuration file setting. Enterprise users may also be interested in :doc:`tower`. Tower provides a very robust database logging feature where it is possible to drill down and see history based on hosts, projects, and particular inventories over time -- explorable both graphically and through a REST API.
From e59b3646416942504c4392a3eaf4f8859d1187e8 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 24 Feb 2015 05:05:27 -0500
Subject: [PATCH 0039/3617] changed from hash_merge to combine vars which
resets default to overwrite and not merge hashing
---
lib/ansible/runner/__init__.py | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index 79a167c5a00532..7a693cc8d01de7 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -674,11 +674,11 @@ def _executor_internal(self, host, new_stdin):
# Then we selectively merge some variable dictionaries down to a
# single dictionary, used to template the HostVars for this host
temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- temp_vars = utils.merge_hash(temp_vars, inject['combined_cache'])
- temp_vars = utils.merge_hash(temp_vars, self.play_vars)
- temp_vars = utils.merge_hash(temp_vars, self.play_file_vars)
- temp_vars = utils.merge_hash(temp_vars, self.extra_vars)
- temp_vars = utils.merge_hash(temp_vars, {'groups': inject['groups']})
+ temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'])
+ temp_vars = utils.combine_vars(temp_vars, self.play_vars)
+ temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
+ temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
+ temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
From ce764063f14fdd1a664002e2dad02c10eec24f59 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 24 Feb 2015 05:14:22 -0500
Subject: [PATCH 0040/3617] corrected merge vs combined in all pertinent
sections
---
lib/ansible/playbook/play.py | 10 +++++-----
lib/ansible/runner/__init__.py | 2 +-
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 8d81424f09e41a..47bfd79b0b4395 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -115,8 +115,8 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
_tasks = ds.pop('tasks', [])
_handlers = ds.pop('handlers', [])
- temp_vars = utils.merge_hash(self.vars, self.vars_file_vars)
- temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars)
+ temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
+ temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
ds = template(basedir, ds, temp_vars)
ds['tasks'] = _tasks
@@ -632,9 +632,9 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud
dirname = os.path.dirname(original_file)
# temp vars are used here to avoid trampling on the existing vars structures
- temp_vars = utils.merge_hash(self.vars, self.vars_file_vars)
- temp_vars = utils.merge_hash(temp_vars, mv)
- temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars)
+ temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
+ temp_vars = utils.combine_vars(temp_vars, mv)
+ temp_vars = utils.combine_Vars(temp_vars, self.playbook.extra_vars)
include_file = template(dirname, tokens[0], temp_vars)
include_filename = utils.path_dwim(dirname, include_file)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index 7a693cc8d01de7..15845c6929a346 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -608,7 +608,7 @@ def _executor(self, host, new_stdin):
def get_combined_cache(self):
# merge the VARS and SETUP caches for this host
combined_cache = self.setup_cache.copy()
- return utils.merge_hash(combined_cache, self.vars_cache)
+ return utils.combine_vars(combined_cache, self.vars_cache)
def get_inject_vars(self, host):
host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
From 4fa51652b42854f7646f3f176486f676d7b78e7c Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 24 Feb 2015 05:26:41 -0500
Subject: [PATCH 0041/3617] fixed typoe in combined_Vars
---
lib/ansible/playbook/play.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 47bfd79b0b4395..bb77506dd1243f 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -634,7 +634,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud
# temp vars are used here to avoid trampling on the existing vars structures
temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
temp_vars = utils.combine_vars(temp_vars, mv)
- temp_vars = utils.combine_Vars(temp_vars, self.playbook.extra_vars)
+ temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
include_file = template(dirname, tokens[0], temp_vars)
include_filename = utils.path_dwim(dirname, include_file)
From 0b8773fc99bb3e8e1e10167c7a76a844a1263161 Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Thu, 26 Feb 2015 11:35:29 -0500
Subject: [PATCH 0042/3617] Remove state from central argument list
There is an old PR that shows a great use case for having a different
set of states for the server module. Before the other modules start
being in real use, pull this out so that we don't get ourselves into a
pickle.
---
lib/ansible/module_utils/openstack.py | 1 -
lib/ansible/utils/module_docs_fragments/openstack.py | 5 -----
v2/ansible/module_utils/openstack.py | 1 -
3 files changed, 7 deletions(-)
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index 90415cadabbdb6..6388fffbad2c9f 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -77,7 +77,6 @@ def openstack_full_argument_spec(**kwargs):
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
- state=dict(default='present', choices=['absent', 'present']),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
endpoint_type=dict(
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
index d740bc719c3682..cb8f2c1bfb8bd7 100644
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ b/lib/ansible/utils/module_docs_fragments/openstack.py
@@ -53,11 +53,6 @@ class ModuleDocFragment(object):
description:
- Name of the availability zone.
required: false
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
wait:
description:
- Should ansible wait until the requested resource is complete.
diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py
index 90415cadabbdb6..6388fffbad2c9f 100644
--- a/v2/ansible/module_utils/openstack.py
+++ b/v2/ansible/module_utils/openstack.py
@@ -77,7 +77,6 @@ def openstack_full_argument_spec(**kwargs):
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
- state=dict(default='present', choices=['absent', 'present']),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
endpoint_type=dict(
From 8758ae201defe5abe166e136a2ddc4e55d66d940 Mon Sep 17 00:00:00 2001
From: Hartmut Goebel
Date: Sat, 28 Feb 2015 14:13:58 +0100
Subject: [PATCH 0043/3617] Fix detect of docker as virtualization_type.
Not only match`/docker/`, but also `docker-` followed by a hex-id.
Example (shortened):
```
$ cat /proc/1/cgroup
8:blkio:/system.slice/docker-de73f4d207861cf8757b69213ee67bb234b897a18bea7385964b6ed2d515da94.scope
7:net_cls:/
```
---
lib/ansible/module_utils/facts.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 17e7c62f83ae28..350f002dcc090d 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -2336,7 +2336,7 @@ def get_virtual_facts(self):
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
- if re.search('/docker/', line):
+ if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
From 8027a8a0b50514a362abcddf1d4c78acf67bdfee Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Fri, 6 Mar 2015 18:11:12 -0500
Subject: [PATCH 0044/3617] Change to auth_type to match python-openstackclient
---
lib/ansible/module_utils/openstack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index 6388fffbad2c9f..53e18cab0ca49f 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -73,7 +73,7 @@ def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None),
- auth_plugin=dict(default=None),
+ auth_type=dict(default=None),
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
From 8758ba08bdb07ef8fde669beef750303c455a237 Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Fri, 6 Mar 2015 18:20:45 -0500
Subject: [PATCH 0045/3617] Update common OpenStack requests-related parameters
Also, update docs related to earlier changes in this stack.
---
lib/ansible/module_utils/openstack.py | 6 +++-
.../utils/module_docs_fragments/openstack.py | 28 ++++++++++++++-----
v2/ansible/module_utils/openstack.py | 8 ++++--
3 files changed, 32 insertions(+), 10 deletions(-)
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index 53e18cab0ca49f..35b9026213e988 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -77,10 +77,14 @@ def openstack_full_argument_spec(**kwargs):
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
+ verify=dict(default=True),
+ cacert=dict(default=None),
+ cert=dict(default=None),
+ key=dict(default=None),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
endpoint_type=dict(
- default='publicURL', choices=['publicURL', 'internalURL']
+ default='public', choices=['public', 'internal', 'admin']
)
)
spec.update(kwargs)
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
index cb8f2c1bfb8bd7..2979cb68d7b95f 100644
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ b/lib/ansible/utils/module_docs_fragments/openstack.py
@@ -34,17 +34,13 @@ class ModuleDocFragment(object):
this param will need to contain whatever parameters that auth plugin
requires. This parameter is not needed if a named cloud is provided.
required: false
- auth_plugin:
+ auth_type:
description:
- Name of the auth plugin to use. If the cloud uses something other than
password authentication, the name of the plugin should be indicated here
and the contents of the I(auth) parameter should be updated accordingly.
required: false
default: password
- auth_token:
- description:
- - An auth token obtained previously. If I(auth_token) is given,
- I(auth) and I(auth_plugin) are not needed.
region_name:
description:
- Name of the region.
@@ -64,12 +60,30 @@ class ModuleDocFragment(object):
- How long should ansible wait for the requested resource.
required: false
default: 180
+ verify:
+ description:
+ - Whether or not SSL API requests should be verified.
+ required: false
+ default: True
+ cacert:
+ description:
+ - A path to a CA Cert bundle that can be used as part of verifying
+ SSL API requests.
+ required: false
+ cert:
+ description:
+ - A path to a client certificate to use as part of the SSL transaction
+ required: false
+ key:
+ description:
+ - A path to a client key to use as part of the SSL transaction
+ required: false
endpoint_type:
description:
- Endpoint URL type to fetch from the service catalog.
- choices: [publicURL, internalURL]
+ choices: [public, internal, admin]
required: false
- default: publicURL
+ default: public
requirements:
- shade
notes:
diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py
index 6388fffbad2c9f..35b9026213e988 100644
--- a/v2/ansible/module_utils/openstack.py
+++ b/v2/ansible/module_utils/openstack.py
@@ -73,14 +73,18 @@ def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None),
- auth_plugin=dict(default=None),
+ auth_type=dict(default=None),
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
+ verify=dict(default=True),
+ cacert=dict(default=None),
+ cert=dict(default=None),
+ key=dict(default=None),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
endpoint_type=dict(
- default='publicURL', choices=['publicURL', 'internalURL']
+ default='public', choices=['public', 'internal', 'admin']
)
)
spec.update(kwargs)
From 5453e2cbb8be6aa1f0036659d3e66cab54090532 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 9 Mar 2015 10:27:59 -0400
Subject: [PATCH 0046/3617] removed redundant inventory call, moved grousp to
proper priority
---
lib/ansible/runner/__init__.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index 15845c6929a346..69c062e205b79d 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -673,12 +673,11 @@ def _executor_internal(self, host, new_stdin):
# Then we selectively merge some variable dictionaries down to a
# single dictionary, used to template the HostVars for this host
- temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'])
+ temp_vars = inject['combined_cache']
+ temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
temp_vars = utils.combine_vars(temp_vars, self.play_vars)
temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
- temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
From 642d9d6b563837ae5187720444c76abc152fb49c Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 9 Mar 2015 12:12:37 -0400
Subject: [PATCH 0047/3617] readded inventory vars to runner's vars
---
lib/ansible/runner/__init__.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index 69c062e205b79d..c1f5b3683cec13 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -608,7 +608,7 @@ def _executor(self, host, new_stdin):
def get_combined_cache(self):
# merge the VARS and SETUP caches for this host
combined_cache = self.setup_cache.copy()
- return utils.combine_vars(combined_cache, self.vars_cache)
+ return utils.merge_hash(combined_cache, self.vars_cache)
def get_inject_vars(self, host):
host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
@@ -674,6 +674,7 @@ def _executor_internal(self, host, new_stdin):
# Then we selectively merge some variable dictionaries down to a
# single dictionary, used to template the HostVars for this host
temp_vars = inject['combined_cache']
+ temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
temp_vars = utils.combine_vars(temp_vars, self.play_vars)
temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
From d244390064a037dd82a71ee5d98731893e4cd33e Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 9 Mar 2015 12:15:41 -0400
Subject: [PATCH 0048/3617] correclty added inventory this time
---
lib/ansible/runner/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index c1f5b3683cec13..52e530ac652702 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -673,7 +673,7 @@ def _executor_internal(self, host, new_stdin):
# Then we selectively merge some variable dictionaries down to a
# single dictionary, used to template the HostVars for this host
- temp_vars = inject['combined_cache']
+ temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
temp_vars = utils.combine_vars(temp_vars, self.play_vars)
From 5f6db0e16477749c1bccf472150132ca06c50b3b Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 24 Nov 2014 16:36:31 -0500
Subject: [PATCH 0049/3617] preliminary privlege escalation unification +
pbrun - become constants inherit existing sudo/su ones - become command line
options, marked sudo/su as deprecated and moved sudo/su passwords to runas
group - changed method signatures as privlege escalation is collapsed to
become - added tests for su and become, diabled su for lack of support in
local.py - updated playbook,play and task objects to become - added become to
runner - added whoami test for become/sudo/su - added home override dir for
plugins - removed useless method from ask pass - forced become pass to always
be string also uses to_bytes - fixed fakerunner for tests - corrected
reference in synchronize action plugin - added pfexec (needs testing) -
removed unused sudo/su in runner init - removed deprecated info - updated pe
tests to allow to run under sudo and not need root - normalized become
options into a funciton to avoid duplication and inconsistencies - pushed
suppored list to connection classs property - updated all connection plugins
to latest 'become' pe
- includes fixes from feedback (including typos)
- added draft docs
- stub of become_exe, leaving for future v2 fixes
---
bin/ansible | 53 ++---
bin/ansible-playbook | 36 ++--
docsite/rst/become.rst | 83 ++++++++
examples/ansible.cfg | 6 +
lib/ansible/constants.py | 34 +--
lib/ansible/playbook/__init__.py | 44 ++--
lib/ansible/playbook/play.py | 119 +++++++----
lib/ansible/playbook/task.py | 84 ++++----
lib/ansible/runner/__init__.py | 168 +++++++--------
lib/ansible/runner/action_plugins/assemble.py | 2 +-
lib/ansible/runner/action_plugins/copy.py | 2 +-
lib/ansible/runner/action_plugins/fetch.py | 2 +-
lib/ansible/runner/action_plugins/patch.py | 2 +-
lib/ansible/runner/action_plugins/script.py | 3 +-
.../runner/action_plugins/synchronize.py | 16 +-
lib/ansible/runner/action_plugins/template.py | 2 +-
.../runner/action_plugins/unarchive.py | 2 +-
lib/ansible/runner/action_plugins/win_copy.py | 2 +-
.../runner/action_plugins/win_template.py | 2 +-
.../runner/connection_plugins/accelerate.py | 15 +-
.../runner/connection_plugins/chroot.py | 10 +-
.../runner/connection_plugins/fireball.py | 8 +-
.../runner/connection_plugins/funcd.py | 8 +-
lib/ansible/runner/connection_plugins/jail.py | 10 +-
.../runner/connection_plugins/libvirt_lxc.py | 10 +-
.../runner/connection_plugins/local.py | 40 ++--
.../runner/connection_plugins/paramiko_ssh.py | 44 ++--
lib/ansible/runner/connection_plugins/ssh.py | 97 +++++----
.../runner/connection_plugins/winrm.py | 10 +-
lib/ansible/runner/connection_plugins/zone.py | 11 +-
lib/ansible/utils/__init__.py | 194 ++++++++++++------
test/integration/destructive.yml | 2 +
.../roles/test_become/files/baz.txt | 1 +
.../roles/test_become/tasks/main.yml | 77 +++++++
.../roles/test_become/templates/bar.j2 | 1 +
.../roles/test_become/vars/default.yml | 1 +
test/integration/roles/test_su/files/baz.txt | 1 +
test/integration/roles/test_su/tasks/main.yml | 75 +++++++
.../roles/test_su/templates/bar.j2 | 1 +
.../roles/test_su/vars/default.yml | 1 +
.../roles/test_sudo/tasks/main.yml | 12 ++
test/units/TestPlayVarsFiles.py | 3 +
test/units/TestSynchronize.py | 7 +-
test/units/TestUtils.py | 4 +-
v2/ansible/constants.py | 16 +-
45 files changed, 845 insertions(+), 476 deletions(-)
create mode 100644 docsite/rst/become.rst
create mode 100644 test/integration/roles/test_become/files/baz.txt
create mode 100644 test/integration/roles/test_become/tasks/main.yml
create mode 100644 test/integration/roles/test_become/templates/bar.j2
create mode 100644 test/integration/roles/test_become/vars/default.yml
create mode 100644 test/integration/roles/test_su/files/baz.txt
create mode 100644 test/integration/roles/test_su/tasks/main.yml
create mode 100644 test/integration/roles/test_su/templates/bar.j2
create mode 100644 test/integration/roles/test_su/vars/default.yml
diff --git a/bin/ansible b/bin/ansible
index 5aaaa582a7e4f0..7fec34ec81e9c6 100755
--- a/bin/ansible
+++ b/bin/ansible
@@ -58,12 +58,12 @@ class Cli(object):
''' create an options parser for bin/ansible '''
parser = utils.base_parser(
- constants=C,
- runas_opts=True,
- subset_opts=True,
+ constants=C,
+ runas_opts=True,
+ subset_opts=True,
async_opts=True,
- output_opts=True,
- connect_opts=True,
+ output_opts=True,
+ connect_opts=True,
check_opts=True,
diff_opts=False,
usage='%prog [options]'
@@ -82,12 +82,8 @@ class Cli(object):
parser.print_help()
sys.exit(1)
- # su and sudo command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass):
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') are "
- "mutually exclusive")
+ # privlege escalation command line arguments need to be mutually exclusive
+ utils.check_mutually_exclusive_privilege(options, parser)
if (options.ask_vault_pass and options.vault_password_file):
parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
@@ -101,20 +97,20 @@ class Cli(object):
pattern = args[0]
- sshpass = None
- sudopass = None
- su_pass = None
- vault_pass = None
+ sshpass = becomepass = vault_pass = become_method = None
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
# Never ask for an SSH password when we run with local connection
if options.connection == "local":
options.ask_pass = False
- options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS
- options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS
+ else:
+ options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
+
options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
- (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass)
+ # become
+ utils.normalize_become_options(options)
+ prompt_method = utils.choose_pass_prompt(options)
+ (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
# read vault_pass from a file
if not options.ask_vault_pass and options.vault_password_file:
@@ -126,6 +122,7 @@ class Cli(object):
if options.subset:
inventory_manager.subset(options.subset)
hosts = inventory_manager.list_hosts(pattern)
+
if len(hosts) == 0:
callbacks.display("No hosts matched", stderr=True)
sys.exit(0)
@@ -135,16 +132,10 @@ class Cli(object):
callbacks.display(' %s' % host)
sys.exit(0)
- if ((options.module_name == 'command' or options.module_name == 'shell')
- and not options.module_args):
+ if options.module_name in ['command','shell'] and not options.module_args:
callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
sys.exit(1)
-
- if options.su_user or options.ask_su_pass:
- options.su = True
- options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER
- options.su_user = options.su_user or C.DEFAULT_SU_USER
if options.tree:
utils.prepare_writeable_dir(options.tree)
@@ -160,17 +151,15 @@ class Cli(object):
forks=options.forks,
pattern=pattern,
callbacks=self.callbacks,
- sudo=options.sudo,
- sudo_pass=sudopass,
- sudo_user=options.sudo_user,
transport=options.connection,
subset=options.subset,
check=options.check,
diff=options.check,
- su=options.su,
- su_pass=su_pass,
- su_user=options.su_user,
vault_pass=vault_pass,
+ become=options.become,
+ become_method=options.become_method,
+ become_pass=becomepass,
+ become_user=options.become_user,
extra_vars=extra_vars,
)
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
index f62c699d64d614..79cbc43d80a4ec 100755
--- a/bin/ansible-playbook
+++ b/bin/ansible-playbook
@@ -108,19 +108,14 @@ def main(args):
parser.print_help(file=sys.stderr)
return 1
- # su and sudo command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass):
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') are "
- "mutually exclusive")
+ # privlege escalation command line arguments need to be mutually exclusive
+ utils.check_mutually_exclusive_privilege(options, parser)
if (options.ask_vault_pass and options.vault_password_file):
parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
sshpass = None
- sudopass = None
- su_pass = None
+ becomepass = None
vault_pass = None
options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
@@ -132,11 +127,14 @@ def main(args):
# Never ask for an SSH password when we run with local connection
if options.connection == "local":
options.ask_pass = False
- options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS
- options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS
- (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass)
- options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER
- options.su_user = options.su_user or C.DEFAULT_SU_USER
+
+ # set pe options
+ utils.normalize_become_options(options)
+ prompt_method = utils.choose_pass_prompt(options)
+ (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
+ become_ask_pass=options.become_ask_pass,
+ ask_vault_pass=options.ask_vault_pass,
+ become_method=prompt_method)
# read vault_pass from a file
if not options.ask_vault_pass and options.vault_password_file:
@@ -197,20 +195,18 @@ def main(args):
stats=stats,
timeout=options.timeout,
transport=options.connection,
- sudo=options.sudo,
- sudo_user=options.sudo_user,
- sudo_pass=sudopass,
+ become=options.become,
+ become_method=options.become_method,
+ become_user=options.become_user,
+ become_pass=becomepass,
extra_vars=extra_vars,
private_key_file=options.private_key_file,
only_tags=only_tags,
skip_tags=skip_tags,
check=options.check,
diff=options.diff,
- su=options.su,
- su_pass=su_pass,
- su_user=options.su_user,
vault_password=vault_pass,
- force_handlers=options.force_handlers
+ force_handlers=options.force_handlers,
)
if options.flush_cache:
diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst
new file mode 100644
index 00000000000000..dd2d9b140cd842
--- /dev/null
+++ b/docsite/rst/become.rst
@@ -0,0 +1,83 @@
+Ansible Privilege Escalation
+++++++++++++++++++++++++++++
+
+Ansible can use existing privilege escalation systems to allow a user to execute tasks as another.
+
+.. contents:: Topics
+
+Become
+``````
+Before 1.9 Ansible mostly allowed the use of sudo and a limited use of su to allow a login/remote user to become a different user
+and execute tasks, create resources with the 2nd user's permissions. As of 1.9 'become' supersedes the old sudo/su, while still
+being backwards compatible. This new system also makes it easier to add other privilege escalation tools like pbrun (Powerbroker),
+pfexec and others.
+
+
+New directives
+--------------
+
+become
+ equivalent to adding sudo: or su: to a play or task, set to true/yes to activate privilege escalation
+
+become_user
+ equivalent to adding sudo_user: or su_user: to a play or task
+
+become_method
+ at play or task level overrides the default method set in ansibile.cfg
+
+
+New ansible_ variables
+----------------------
+Each allows you to set an option per group and/or host
+
+ansible_become
+ equivalent to ansible_sudo or ansbile_su, allows to force privilege escalation
+
+ansible_become_method
+ allows to set privilege escalation method
+
+ansible_become_user
+ equivalent to ansible_sudo_user or ansbile_su_user, allows to set the user you become through privilege escalation
+
+ansible_become_pass
+ equivalent to ansible_sudo_pass or ansbile_su_pass, allows you to set the privilege escalation password
+
+
+New command line options
+-----------------------
+
+--ask-become-pass
+ ask for privilege escalation password
+
+-b, --become
+ run operations with become (no passorwd implied)
+
+--become-method=BECOME_METHOD
+ privilege escalation method to use (default=sudo),
+ valid choices: [ sudo | su | pbrun | pfexec ]
+
+--become-user=BECOME_USER
+ run operations as this user (default=root)
+
+
+sudo and su still work!
+-----------------------
+
+Old playbooks will not need to be changed, even though they are deprecated, sudo and su directives will continue to work though it
+is recommended to move to become as they may be retired at one point. You cannot mix directives on the same object though, ansible
+will complain if you try to.
+
+Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the
+new ones.
+
+
+
+.. note:: Privilege escalation methods must also be supported by the connection plugin used, most will warn if they do not, some will just ignore it as they always run as root (jail, chroot, etc).
+
+.. seealso::
+
+ `Mailing List `_
+ Questions? Help? Ideas? Stop by the list on Google Groups
+ `irc.freenode.net `_
+ #ansible IRC chat channel
+
diff --git a/examples/ansible.cfg b/examples/ansible.cfg
index 67aa039608e9b0..4cf9d513e59533 100644
--- a/examples/ansible.cfg
+++ b/examples/ansible.cfg
@@ -159,6 +159,12 @@ fact_caching = memory
#retry_files_enabled = False
#retry_files_save_path = ~/.ansible-retry
+[privilege_escalation]
+#become=True
+#become_method='sudo'
+#become_user='root'
+#become_ask_pass=False
+
[paramiko_connection]
# uncomment this line to cause the paramiko connection plugin to not record new host
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 31dc91463e8795..1779b792fb3c42 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -86,9 +86,6 @@ def shell_expand_path(path):
path = os.path.expanduser(os.path.expandvars(path))
return path
-def get_plugin_paths(path):
- return ':'.join([os.path.join(x, path) for x in [os.path.expanduser('~/.ansible/plugins/'), '/usr/share/ansible_plugins/']])
-
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
@@ -137,16 +134,28 @@ def get_plugin_paths(path):
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
-
-DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', get_plugin_paths('action_plugins'))
-DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', get_plugin_paths('cache_plugins'))
-DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', get_plugin_paths('callback_plugins'))
-DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', get_plugin_paths('connection_plugins'))
-DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', get_plugin_paths('lookup_plugins'))
-DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', get_plugin_paths('vars_plugins'))
-DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', get_plugin_paths('filter_plugins'))
DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
+#TODO: get rid of ternary chain mess
+BECOME_METHODS = ['sudo','su','pbrun','runas','pfexec']
+DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True)
+DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root')
+DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True)
+# need to rethink impementing these 2
+DEFAULT_BECOME_EXE = None
+#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo')
+#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H')
+
+
+DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
+DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
+DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
+DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins')
+DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
+DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
+DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
+
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
@@ -172,7 +181,7 @@ def get_plugin_paths(path):
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
-# obsolete -- will be formally removed in 1.6
+# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
@@ -188,6 +197,7 @@ def get_plugin_paths(path):
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
+DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
index 5de1e6e309a87a..d58657012c625f 100644
--- a/lib/ansible/playbook/__init__.py
+++ b/lib/ansible/playbook/__init__.py
@@ -60,15 +60,12 @@ def __init__(self,
timeout = C.DEFAULT_TIMEOUT,
remote_user = C.DEFAULT_REMOTE_USER,
remote_pass = C.DEFAULT_REMOTE_PASS,
- sudo_pass = C.DEFAULT_SUDO_PASS,
remote_port = None,
transport = C.DEFAULT_TRANSPORT,
private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
callbacks = None,
runner_callbacks = None,
stats = None,
- sudo = False,
- sudo_user = C.DEFAULT_SUDO_USER,
extra_vars = None,
only_tags = None,
skip_tags = None,
@@ -77,11 +74,13 @@ def __init__(self,
check = False,
diff = False,
any_errors_fatal = False,
- su = False,
- su_user = False,
- su_pass = False,
vault_password = False,
force_handlers = False,
+ # privelege escalation
+ become = C.DEFAULT_BECOME,
+ become_method = C.DEFAULT_BECOME_METHOD,
+ become_user = C.DEFAULT_BECOME_USER,
+ become_pass = None,
):
"""
@@ -92,13 +91,11 @@ def __init__(self,
timeout: connection timeout
remote_user: run as this user if not specified in a particular play
remote_pass: use this remote password (for all plays) vs using SSH keys
- sudo_pass: if sudo==True, and a password is required, this is the sudo password
remote_port: default remote port to use if not specified with the host or play
transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
callbacks output callbacks for the playbook
runner_callbacks: more callbacks, this time for the runner API
stats: holds aggregrate data about events occurring to each host
- sudo: if not specified per play, requests all plays use sudo mode
inventory: can be specified instead of host_list to use a pre-existing inventory object
check: don't change anything, just try to detect some potential changes
any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
@@ -139,21 +136,20 @@ def __init__(self,
self.callbacks = callbacks
self.runner_callbacks = runner_callbacks
self.stats = stats
- self.sudo = sudo
- self.sudo_pass = sudo_pass
- self.sudo_user = sudo_user
self.extra_vars = extra_vars
self.global_vars = {}
self.private_key_file = private_key_file
self.only_tags = only_tags
self.skip_tags = skip_tags
self.any_errors_fatal = any_errors_fatal
- self.su = su
- self.su_user = su_user
- self.su_pass = su_pass
self.vault_password = vault_password
self.force_handlers = force_handlers
+ self.become = become
+ self.become_method = become_method
+ self.become_user = become_user
+ self.become_pass = become_pass
+
self.callbacks.playbook = self
self.runner_callbacks.playbook = self
@@ -416,10 +412,7 @@ def _run_task_internal(self, task):
basedir=task.play.basedir,
conditional=task.when,
callbacks=self.runner_callbacks,
- sudo=task.sudo,
- sudo_user=task.sudo_user,
transport=task.transport,
- sudo_pass=task.sudo_pass,
is_playbook=True,
check=self.check,
diff=self.diff,
@@ -429,13 +422,14 @@ def _run_task_internal(self, task):
accelerate_port=task.play.accelerate_port,
accelerate_ipv6=task.play.accelerate_ipv6,
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
- su=task.su,
- su_user=task.su_user,
- su_pass=task.su_pass,
vault_pass = self.vault_password,
run_hosts=hosts,
no_log=task.no_log,
run_once=task.run_once,
+ become=task.become,
+ become_method=task.become_method,
+ become_user=task.become_user,
+ become_pass=task.become_pass,
)
runner.module_vars.update({'play_hosts': hosts})
@@ -616,12 +610,10 @@ def _do_setup_step(self, play):
setup_cache=self.SETUP_CACHE,
vars_cache=self.VARS_CACHE,
callbacks=self.runner_callbacks,
- sudo=play.sudo,
- sudo_user=play.sudo_user,
- sudo_pass=self.sudo_pass,
- su=play.su,
- su_user=play.su_user,
- su_pass=self.su_pass,
+ become=play.become,
+ become_method=play.become_method,
+ become_user=play.become_user,
+ become_pass=self.become_pass,
vault_pass=self.vault_password,
transport=play.transport,
is_playbook=True,
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 883142da4cc87c..74c6998b22f823 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -32,24 +32,25 @@
class Play(object):
- __slots__ = [
- 'hosts', 'name', 'vars', 'vars_file_vars', 'role_vars', 'default_vars', 'vars_prompt', 'vars_files',
- 'handlers', 'remote_user', 'remote_port', 'included_roles', 'accelerate',
- 'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'transport', 'playbook',
- 'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks',
- 'basedir', 'any_errors_fatal', 'roles', 'max_fail_pct', '_play_hosts', 'su', 'su_user',
- 'vault_password', 'no_log', 'environment',
+ _pb_common = [
+ 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
+ 'become_method', 'become_user', 'environment', 'gather_facts', 'handlers', 'hosts',
+ 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', 'su_user', 'sudo',
+ 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', 'vault_password',
+ ]
+
+ __slots__ = _pb_common + [
+ '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
+ 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
+ 'role_vars', 'transport', 'vars_file_vars',
]
# to catch typos and so forth -- these are userland names
# and don't line up 1:1 with how they are stored
- VALID_KEYS = frozenset((
- 'hosts', 'name', 'vars', 'vars_prompt', 'vars_files',
- 'tasks', 'handlers', 'remote_user', 'user', 'port', 'include', 'accelerate', 'accelerate_port', 'accelerate_ipv6',
- 'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial',
- 'any_errors_fatal', 'roles', 'role_names', 'pre_tasks', 'post_tasks', 'max_fail_percentage',
- 'su', 'su_user', 'vault_password', 'no_log', 'environment',
- ))
+ VALID_KEYS = frozenset(_pb_common + [
+ 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
+ 'pre_tasks', 'role_names', 'tasks', 'user',
+ ])
# *************************************************
@@ -58,7 +59,7 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
for x in ds.keys():
if not x in Play.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter at this level in an Ansible Playbook" % x)
+ raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
# allow all playbook keys to be set by --extra-vars
self.vars = ds.get('vars', {})
@@ -140,8 +141,6 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
self._handlers = ds.get('handlers', [])
self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
self.remote_port = ds.get('port', self.playbook.remote_port)
- self.sudo = ds.get('sudo', self.playbook.sudo)
- self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user)
self.transport = ds.get('connection', self.playbook.transport)
self.remote_port = self.remote_port
self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
@@ -149,22 +148,40 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
self.accelerate_port = ds.get('accelerate_port', None)
self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
- self.su = ds.get('su', self.playbook.su)
- self.su_user = ds.get('su_user', self.playbook.su_user)
self.no_log = utils.boolean(ds.get('no_log', 'false'))
+ # Fail out if user specifies conflicting privelege escalations
+ if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
+ raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
+ if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
+ raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
+ if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
+ raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
+
+ # become settings are inherited and updated normally
+ self.become = ds.get('become', self.playbook.become)
+ self.become_method = ds.get('become_method', self.playbook.become_method)
+ self.become_user = ds.get('become_user', self.playbook.become_user)
+
+ # Make sure current play settings are reflected in become fields
+ if 'sudo' in ds:
+ self.become=ds['sudo']
+ self.become_method='sudo'
+ if 'sudo_user' in ds:
+ self.become_user=ds['sudo_user']
+ elif 'su' in ds:
+ self.become=True
+ self.become=ds['su']
+ if 'su_user' in ds:
+ self.become_user=ds['su_user']
+
# gather_facts is not a simple boolean, as None means that a 'smart'
# fact gathering mode will be used, so we need to be careful here as
# calling utils.boolean(None) returns False
self.gather_facts = ds.get('gather_facts', None)
- if self.gather_facts:
+ if self.gather_facts is not None:
self.gather_facts = utils.boolean(self.gather_facts)
- # Fail out if user specifies a sudo param with a su param in a given play
- if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params '
- '("su", "su_user") cannot be used together')
-
load_vars['role_names'] = ds.get('role_names', [])
self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
@@ -173,9 +190,6 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
# apply any missing tags to role tasks
self._late_merge_role_tags()
- if self.sudo_user != 'root':
- self.sudo = True
-
# place holder for the discovered hosts to be used in this play
self._play_hosts = None
@@ -429,7 +443,7 @@ def _load_roles(self, roles, ds):
for (role, role_path, role_vars, role_params, default_vars) in roles:
# special vars must be extracted from the dict to the included tasks
- special_keys = [ "sudo", "sudo_user", "when", "with_items" ]
+ special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
special_vars = {}
for k in special_keys:
if k in role_vars:
@@ -531,7 +545,7 @@ def _resolve_main(self, basepath):
# *************************************************
- def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sudo_vars=None,
+ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
additional_conditions=None, original_file=None, role_name=None):
''' handle task and handler include statements '''
@@ -547,8 +561,8 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud
role_params = {}
if default_vars is None:
default_vars = {}
- if sudo_vars is None:
- sudo_vars = {}
+ if become_vars is None:
+ become_vars = {}
old_conditions = list(additional_conditions)
@@ -560,14 +574,37 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud
if not isinstance(x, dict):
raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
- # evaluate sudo vars for current and child tasks
- included_sudo_vars = {}
- for k in ["sudo", "sudo_user"]:
+ # evaluate privilege escalation vars for current and child tasks
+ included_become_vars = {}
+ for k in ["become", "become_user", "become_method", "become_exe"]:
if k in x:
- included_sudo_vars[k] = x[k]
- elif k in sudo_vars:
- included_sudo_vars[k] = sudo_vars[k]
- x[k] = sudo_vars[k]
+ included_become_vars[k] = x[k]
+ elif k in become_vars:
+ included_become_vars[k] = become_vars[k]
+ x[k] = become_vars[k]
+
+ ## backwards compat with old sudo/su directives
+ if 'sudo' in x or 'sudo_user' in x:
+ included_become_vars['become'] = x['sudo']
+ x['become'] = x['sudo']
+ x['become_method'] = 'sudo'
+ del x['sudo']
+
+ if x.get('sudo_user', False):
+ included_become_vars['become_user'] = x['sudo_user']
+ x['become_user'] = x['sudo_user']
+ del x['sudo_user']
+
+ elif 'su' in x or 'su_user' in x:
+ included_become_vars['become'] = x['su']
+ x['become'] = x['su']
+ x['become_method'] = 'su'
+ del x['su']
+
+ if x.get('su_user', False):
+ included_become_vars['become_user'] = x['su_user']
+ x['become_user'] = x['su_user']
+ del x['su_user']
if 'meta' in x:
if x['meta'] == 'flush_handlers':
@@ -596,7 +633,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud
included_additional_conditions.append(x[k])
elif type(x[k]) is list:
included_additional_conditions.extend(x[k])
- elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log"):
+ elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
continue
else:
include_vars[k] = x[k]
@@ -643,7 +680,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud
for y in data:
if isinstance(y, dict) and 'include' in y:
y['role_name'] = new_role
- loaded = self._load_tasks(data, mv, role_params, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
+ loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
results += loaded
elif type(x) == dict:
task = Task(
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index 05f96c84e396be..a43c2ab89d5872 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -24,26 +24,24 @@
class Task(object):
- __slots__ = [
- 'name', 'meta', 'action', 'when', 'async_seconds', 'async_poll_interval',
- 'notify', 'module_name', 'module_args', 'module_vars', 'play_vars', 'play_file_vars', 'role_vars', 'role_params', 'default_vars',
- 'play', 'notified_by', 'tags', 'register', 'role_name',
- 'delegate_to', 'first_available_file', 'ignore_errors',
- 'local_action', 'transport', 'sudo', 'remote_user', 'sudo_user', 'sudo_pass',
- 'items_lookup_plugin', 'items_lookup_terms', 'environment', 'args',
- 'any_errors_fatal', 'changed_when', 'failed_when', 'always_run', 'delay', 'retries', 'until',
- 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
+ _t_common = [
+ 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
+ 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
+ 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
+ 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
+ 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
]
+ __slots__ = [
+ 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
+ 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
+ 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
+ ] + _t_common
+
# to prevent typos and such
- VALID_KEYS = frozenset((
- 'name', 'meta', 'action', 'when', 'async', 'poll', 'notify',
- 'first_available_file', 'include', 'tags', 'register', 'ignore_errors',
- 'delegate_to', 'local_action', 'transport', 'remote_user', 'sudo', 'sudo_user',
- 'sudo_pass', 'when', 'connection', 'environment', 'args',
- 'any_errors_fatal', 'changed_when', 'failed_when', 'always_run', 'delay', 'retries', 'until',
- 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
- ))
+ VALID_KEYS = frozenset([
+ 'async', 'connection', 'include', 'poll',
+ ] + _t_common)
def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
''' constructor loads from a task or handler datastructure '''
@@ -131,14 +129,12 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No
self.name = ds.get('name', None)
self.tags = [ 'untagged' ]
self.register = ds.get('register', None)
- self.sudo = utils.boolean(ds.get('sudo', play.sudo))
- self.su = utils.boolean(ds.get('su', play.su))
self.environment = ds.get('environment', play.environment)
self.role_name = role_name
self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
self.run_once = utils.boolean(ds.get('run_once', 'false'))
- #Code to allow do until feature in a Task
+ #Code to allow do until feature in a Task
if 'until' in ds:
if not ds.get('register'):
raise errors.AnsibleError("register keyword is mandatory when using do until feature")
@@ -160,24 +156,36 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No
else:
self.remote_user = ds.get('remote_user', play.playbook.remote_user)
- self.sudo_user = None
- self.sudo_pass = None
- self.su_user = None
- self.su_pass = None
-
- if self.sudo:
- self.sudo_user = ds.get('sudo_user', play.sudo_user)
- self.sudo_pass = ds.get('sudo_pass', play.playbook.sudo_pass)
- elif self.su:
- self.su_user = ds.get('su_user', play.su_user)
- self.su_pass = ds.get('su_pass', play.playbook.su_pass)
-
- # Fail out if user specifies a sudo param with a su param in a given play
- if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and \
- (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('sudo params ("sudo", "sudo_user", "sudo_pass") '
- 'and su params "su", "su_user", "su_pass") '
- 'cannot be used together')
+ # Fail out if user specifies privilege escalation params in conflict
+ if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
+ raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+
+ if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
+ raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
+
+ if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
+ raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
+
+ self.become = utils.boolean(ds.get('become', play.become))
+ self.become_method = ds.get('become_method', play.become_method)
+ self.become_user = ds.get('become_user', play.become_user)
+ self.become_pass = ds.get('become_pass', play.playbook.become_pass)
+
+ # set only if passed in current task data
+ if 'sudo' in ds or 'sudo_user' in ds:
+ self.become=ds['sudo']
+ self.become_method='sudo'
+ if 'sudo_user' in ds:
+ self.become_user = ds['sudo_user']
+ if 'sudo_pass' in ds:
+ self.become_pass = ds['sudo_pass']
+ if 'su' in ds or 'su_user' in ds:
+ self.become=ds['su']
+ self.become_method='su'
+ if 'su_user' in ds:
+ self.become_user = ds['su_user']
+ if 'su_pass' in ds:
+ self.become_pass = ds['su_pass']
# Both are defined
if ('action' in ds) and ('local_action' in ds):
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index af1b674a0ee248..fea76f26ada266 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -123,7 +123,6 @@ def __init__(self,
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
remote_port=None, # if SSH on different ports
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
- sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None
background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable
setup_cache=None, # used to share fact data w/ other tasks
@@ -131,8 +130,6 @@ def __init__(self,
transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
conditional='True', # run only if this fact expression evals to true
callbacks=None, # used for output
- sudo=False, # whether to run sudo or not
- sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root'
module_vars=None, # a playbooks internals thing
play_vars=None, #
play_file_vars=None, #
@@ -151,14 +148,15 @@ def __init__(self,
accelerate=False, # use accelerated connection
accelerate_ipv6=False, # accelerated connection w/ IPv6
accelerate_port=None, # port to use with accelerated connection
- su=False, # Are we running our command via su?
- su_user=None, # User to su to when running command, ex: 'root'
- su_pass=C.DEFAULT_SU_PASS,
vault_pass=None,
run_hosts=None, # an optional list of pre-calculated hosts to run on
no_log=False, # option to enable/disable logging for a given task
run_once=False, # option to enable/disable host bypass loop for a given task
- sudo_exe=C.DEFAULT_SUDO_EXE, # ex: /usr/local/bin/sudo
+ become=False, # whether to run privelege escalation or not
+ become_method=C.DEFAULT_BECOME_METHOD,
+ become_user=C.DEFAULT_BECOME_USER, # ex: 'root'
+ become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None
+ become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo
):
# used to lock multiprocess inputs and outputs at various levels
@@ -201,10 +199,12 @@ def __init__(self,
self.remote_port = remote_port
self.private_key_file = private_key_file
self.background = background
- self.sudo = sudo
- self.sudo_user_var = sudo_user
- self.sudo_user = None
- self.sudo_pass = sudo_pass
+ self.become = become
+ self.become_method = become_method
+ self.become_user_var = become_user
+ self.become_user = None
+ self.become_pass = become_pass
+ self.become_exe = become_exe
self.is_playbook = is_playbook
self.environment = environment
self.complex_args = complex_args
@@ -213,15 +213,10 @@ def __init__(self,
self.accelerate_port = accelerate_port
self.accelerate_ipv6 = accelerate_ipv6
self.callbacks.runner = self
- self.su = su
- self.su_user_var = su_user
- self.su_user = None
- self.su_pass = su_pass
self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self.vault_pass = vault_pass
self.no_log = no_log
self.run_once = run_once
- self.sudo_exe = sudo_exe
if self.transport == 'smart':
# If the transport is 'smart', check to see if certain conditions
@@ -369,7 +364,7 @@ def _compute_delegate(self, password, remote_inject):
delegate['pass'] = this_info.get('ansible_ssh_pass', password)
delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file)
delegate['transport'] = this_info.get('ansible_connection', self.transport)
- delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass)
+ delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass))
# Last chance to get private_key_file from global variables.
# this is useful if delegated host is not defined in the inventory
@@ -481,13 +476,13 @@ def _execute_module(self, conn, tmp, module_name, args,
or not conn.has_pipelining
or not C.ANSIBLE_SSH_PIPELINING
or C.DEFAULT_KEEP_REMOTE_FILES
- or self.su):
+ or self.become_method == 'su'):
self._transfer_str(conn, tmp, module_name, module_data)
environment_string = self._compute_environment_string(conn, inject)
- if "tmp" in tmp and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')):
- # deal with possible umask issues once sudo'ed to other user
+ if "tmp" in tmp and (self.become and self.become_user != 'root'):
+ # deal with possible umask issues once you become another user
self._remote_chmod(conn, 'a+r', remote_module_path, tmp)
cmd = ""
@@ -514,8 +509,8 @@ def _execute_module(self, conn, tmp, module_name, args,
else:
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
- if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'):
- # deal with possible umask issues once sudo'ed to other user
+ if self.become and self.become_user != 'root':
+ # deal with possible umask issues once become another user
self._remote_chmod(conn, 'a+r', argsfile, tmp)
if async_jid is None:
@@ -524,7 +519,7 @@ def _execute_module(self, conn, tmp, module_name, args,
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
else:
if async_jid is None:
- if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.su:
+ if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su':
in_data = module_data
else:
cmd = "%s" % (remote_module_path)
@@ -536,7 +531,7 @@ def _execute_module(self, conn, tmp, module_name, args,
rm_tmp = None
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if not self.sudo or self.su or self.sudo_user == 'root' or self.su_user == 'root':
+ if not self.become or self.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
@@ -546,17 +541,14 @@ def _execute_module(self, conn, tmp, module_name, args,
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
- # specified in the play, not the sudo_user
+ # specified in the play, not the become_user
sudoable = False
- if self.su:
- res = self._low_level_exec_command(conn, cmd, tmp, su=True, in_data=in_data)
- else:
- res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data)
+ res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data)
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'):
- # not sudoing to root, so maybe can't delete files as that other user
+ if self.become and self.become_user != 'root':
+ # not becoming root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = conn.shell.remove(tmp, recurse=True)
self._low_level_exec_command(conn, cmd2, tmp, sudoable=False)
@@ -849,11 +841,9 @@ def _safe_template_complex_args(args, inject):
def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None):
''' decides how to invoke a module '''
- # late processing of parameterized sudo_user (with_items,..)
- if self.sudo_user_var is not None:
- self.sudo_user = template.template(self.basedir, self.sudo_user_var, inject)
- if self.su_user_var is not None:
- self.su_user = template.template(self.basedir, self.su_user_var, inject)
+ # late processing of parameterized become_user (with_items,..)
+ if self.become_user_var is not None:
+ self.become_user = template.template(self.basedir, self.become_user_var, inject)
# module_name may be dynamic (but cannot contain {{ ansible_ssh_user }})
module_name = template.template(self.basedir, module_name, inject)
@@ -893,18 +883,16 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port,
actual_transport = inject.get('ansible_connection', self.transport)
actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
- self.sudo = utils.boolean(inject.get('ansible_sudo', self.sudo))
- self.sudo_user = inject.get('ansible_sudo_user', self.sudo_user)
- self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass)
- self.su = inject.get('ansible_su', self.su)
- self.su_pass = inject.get('ansible_su_pass', self.su_pass)
- self.sudo_exe = inject.get('ansible_sudo_exe', self.sudo_exe)
-
- # select default root user in case self.sudo requested
+ self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
+ self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
+ self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
+ self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
+
+ # select default root user in case self.become requested
# but no user specified; happens e.g. in host vars when
- # just ansible_sudo=True is specified
- if self.sudo and self.sudo_user is None:
- self.sudo_user = 'root'
+ # just ansible_become=True is specified
+ if self.become and self.become_user is None:
+ self.become_user = 'root'
if actual_private_key_file is not None:
actual_private_key_file = os.path.expanduser(actual_private_key_file)
@@ -937,7 +925,7 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port,
actual_user = delegate['user']
actual_pass = delegate['pass']
actual_private_key_file = delegate['private_key_file']
- self.sudo_pass = delegate['sudo_pass']
+ self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass'))
inject = delegate['inject']
# set resolved delegate_to into inject so modules can call _remote_checksum
inject['delegate_to'] = self.delegate_to
@@ -945,7 +933,7 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port,
# user/pass may still contain variables at this stage
actual_user = template.template(self.basedir, actual_user, inject)
actual_pass = template.template(self.basedir, actual_pass, inject)
- self.sudo_pass = template.template(self.basedir, self.sudo_pass, inject)
+ self.become_pass = template.template(self.basedir, self.become_pass, inject)
# make actual_user available as __magic__ ansible_ssh_user variable
inject['ansible_ssh_user'] = actual_user
@@ -1134,7 +1122,7 @@ def _late_needs_tmp_path(self, conn, tmp, module_style):
if "tmp" in tmp:
# tmp has already been created
return False
- if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su:
+ if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su':
# tmp is necessary to store module source code
return True
if not conn.has_pipelining:
@@ -1150,62 +1138,54 @@ def _late_needs_tmp_path(self, conn, tmp, module_style):
# *****************************************************
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False,
- executable=None, su=False, in_data=None):
+ executable=None, become=False, in_data=None):
''' execute a command string over SSH, return the output '''
+ # this can be skipped with powershell modules when there is no analog to a Windows command (like chmod)
+ if cmd:
- if not cmd:
- # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
- return dict(stdout='', stderr='')
+ if executable is None:
+ executable = C.DEFAULT_EXECUTABLE
- if executable is None:
- executable = C.DEFAULT_EXECUTABLE
+ become_user = self.become_user
- sudo_user = self.sudo_user
- su_user = self.su_user
+ # compare connection user to (su|sudo)_user and disable if the same
+ # assume connection type is local if no user attribute
+ this_user = getattr(conn, 'user', getpass.getuser())
+ if (not become and this_user == become_user):
+ sudoable = False
+ become = False
- # compare connection user to (su|sudo)_user and disable if the same
- # assume connection type is local if no user attribute
- this_user = getattr(conn, 'user', getpass.getuser())
- if (not su and this_user == sudo_user) or (su and this_user == su_user):
- sudoable = False
- su = False
-
- if su:
- rc, stdin, stdout, stderr = conn.exec_command(cmd,
- tmp,
- su=su,
- su_user=su_user,
- executable=executable,
- in_data=in_data)
- else:
rc, stdin, stdout, stderr = conn.exec_command(cmd,
tmp,
- sudo_user,
+ become_user=become_user,
sudoable=sudoable,
executable=executable,
in_data=in_data)
- if type(stdout) not in [ str, unicode ]:
- out = ''.join(stdout.readlines())
- else:
- out = stdout
+ if type(stdout) not in [ str, unicode ]:
+ out = ''.join(stdout.readlines())
+ else:
+ out = stdout
- if type(stderr) not in [ str, unicode ]:
- err = ''.join(stderr.readlines())
- else:
- err = stderr
+ if type(stderr) not in [ str, unicode ]:
+ err = ''.join(stderr.readlines())
+ else:
+ err = stderr
+
+ if rc is not None:
+ return dict(rc=rc, stdout=out, stderr=err)
+ else:
+ return dict(stdout=out, stderr=err)
+
+ return dict(rc=None, stdout='', stderr='')
- if rc is not None:
- return dict(rc=rc, stdout=out, stderr=err)
- else:
- return dict(stdout=out, stderr=err)
# *****************************************************
- def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, su=False):
+ def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False):
''' issue a remote chmod command '''
cmd = conn.shell.chmod(mode, path)
- return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, su=su)
+ return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become)
# *****************************************************
@@ -1217,13 +1197,11 @@ def _remote_expand_user(self, conn, path, tmp):
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
- if self.sudo and self.sudo_user:
- expand_path = '~%s' % self.sudo_user
- elif self.su and self.su_user:
- expand_path = '~%s' % self.su_user
+ if self.become and self.become_user:
+ expand_path = '~%s' % self.become_user
cmd = conn.shell.expand_user(expand_path)
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, su=False)
+ data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False)
initial_fragment = utils.last_non_blank_line(data['stdout'])
if not initial_fragment:
@@ -1287,11 +1265,11 @@ def _make_tmp_path(self, conn):
''' make and return a temporary path on a remote box '''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
- if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'):
+ if self.become and self.become_user != 'root':
use_system_tmp = True
tmp_mode = None
- if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')):
+ if self.remote_user != 'root' or (self.become and self.become_user != 'root'):
tmp_mode = 'a+rx'
cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py
index 287e9348655dee..33a4838e322b6d 100644
--- a/lib/ansible/runner/action_plugins/assemble.py
+++ b/lib/ansible/runner/action_plugins/assemble.py
@@ -125,7 +125,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
# fix file permissions when the copy is done as a different user
- if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
+ if self.runner.become and self.runner.become_user != 'root':
self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
# run the copy module
diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py
index 9f6797a02aa57a..a6a5cb5a27b625 100644
--- a/lib/ansible/runner/action_plugins/copy.py
+++ b/lib/ansible/runner/action_plugins/copy.py
@@ -234,7 +234,7 @@ def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=Non
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
- if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw:
+ if self.runner.become and self.runner.become_user != 'root' and not raw:
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
if raw:
diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py
index 94e930fdb3fded..27d2f6b3c63aed 100644
--- a/lib/ansible/runner/action_plugins/fetch.py
+++ b/lib/ansible/runner/action_plugins/fetch.py
@@ -78,7 +78,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
# use slurp if sudo and permissions are lacking
remote_data = None
- if remote_checksum in ('1', '2') or self.runner.sudo:
+ if remote_checksum in ('1', '2') or self.runner.become:
slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject)
if slurpres.is_successful():
if slurpres.result['encoding'] == 'base64':
diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py
index 8af5dabae8a358..dbba4c53dd7889 100644
--- a/lib/ansible/runner/action_plugins/patch.py
+++ b/lib/ansible/runner/action_plugins/patch.py
@@ -50,7 +50,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
tmp_src = tmp + src
conn.put_file(src, tmp_src)
- if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
+ if self.runner.become and self.runner.become_user != 'root':
if not self.runner.noop_on_check(inject):
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py
index 1159428b4c1c2f..e4c5ec075f30ab 100644
--- a/lib/ansible/runner/action_plugins/script.py
+++ b/lib/ansible/runner/action_plugins/script.py
@@ -113,8 +113,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
- if ((self.runner.sudo and self.runner.sudo_user != 'root') or
- (self.runner.su and self.runner.su_user != 'root')):
+ if self.runner.become and self.runner.become_user != 'root':
chmod_mode = 'a+rx'
sudoable = False
else:
diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py
index 8a8555a204c4b5..f8e57ae314e395 100644
--- a/lib/ansible/runner/action_plugins/synchronize.py
+++ b/lib/ansible/runner/action_plugins/synchronize.py
@@ -78,7 +78,7 @@ def setup(self, module_name, inject):
# Store original transport and sudo values.
self.original_transport = inject.get('ansible_connection', self.runner.transport)
- self.original_sudo = self.runner.sudo
+ self.original_become = self.runner.become
self.transport_overridden = False
if inject.get('delegate_to') is None:
@@ -87,7 +87,7 @@ def setup(self, module_name, inject):
if self.original_transport != 'local':
inject['ansible_connection'] = 'local'
self.transport_overridden = True
- self.runner.sudo = False
+ self.runner.become = False
def run(self, conn, tmp, module_name, module_args,
inject, complex_args=None, **kwargs):
@@ -143,7 +143,7 @@ def run(self, conn, tmp, module_name, module_args,
# use a delegate host instead of localhost
use_delegate = True
- # COMPARE DELEGATE, HOST AND TRANSPORT
+ # COMPARE DELEGATE, HOST AND TRANSPORT
process_args = False
if not dest_host is src_host and self.original_transport != 'local':
# interpret and inject remote host info into src or dest
@@ -160,7 +160,7 @@ def run(self, conn, tmp, module_name, module_args,
if not use_delegate or not user:
user = inject.get('ansible_ssh_user',
self.runner.remote_user)
-
+
if use_delegate:
# FIXME
private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
@@ -172,7 +172,7 @@ def run(self, conn, tmp, module_name, module_args,
if not private_key is None:
private_key = os.path.expanduser(private_key)
options['private_key'] = private_key
-
+
# use the mode to define src and dest's url
if options.get('mode', 'push') == 'pull':
# src is a remote path: @, dest is a local path
@@ -192,7 +192,7 @@ def run(self, conn, tmp, module_name, module_args,
rsync_path = options.get('rsync_path', None)
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
- if not rsync_path and self.transport_overridden and self.original_sudo and not dest_is_local:
+ if not rsync_path and self.transport_overridden and self.original_become and not dest_is_local and self.runner.become_method == 'sudo':
rsync_path = 'sudo rsync'
# make sure rsync path is quoted.
@@ -206,8 +206,8 @@ def run(self, conn, tmp, module_name, module_args,
# run the module and store the result
result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject)
- # reset the sudo property
- self.runner.sudo = self.original_sudo
+ # reset the sudo property
+ self.runner.become = self.original_become
return result
diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py
index cceee020316684..e6e33d354f6bd8 100644
--- a/lib/ansible/runner/action_plugins/template.py
+++ b/lib/ansible/runner/action_plugins/template.py
@@ -133,7 +133,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
# fix file permissions when the copy is done as a different user
- if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
+ if self.runner.become and self.runner.become_user != 'root' or self.runner.su and self.runner.su_user != 'root':
self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
# run the copy module
diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py
index 7cf61006c3f565..db94ac26e7d707 100644
--- a/lib/ansible/runner/action_plugins/unarchive.py
+++ b/lib/ansible/runner/action_plugins/unarchive.py
@@ -99,7 +99,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
# handle check mode client side
# fix file permissions when the copy is done as a different user
if copy:
- if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
+ if self.runner.become and self.runner.become_user != 'root':
if not self.runner.noop_on_check(inject):
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
# Build temporary module_args.
diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/lib/ansible/runner/action_plugins/win_copy.py
index 28362195c965a6..a62dfb99857d15 100644
--- a/lib/ansible/runner/action_plugins/win_copy.py
+++ b/lib/ansible/runner/action_plugins/win_copy.py
@@ -230,7 +230,7 @@ def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=Non
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
- if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw:
+ if self.runner.become and self.runner.become_user != 'root' and not raw:
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
if raw:
diff --git a/lib/ansible/runner/action_plugins/win_template.py b/lib/ansible/runner/action_plugins/win_template.py
index e32a5806c4b4c3..7bde4bd510e2f8 100644
--- a/lib/ansible/runner/action_plugins/win_template.py
+++ b/lib/ansible/runner/action_plugins/win_template.py
@@ -109,7 +109,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
# fix file permissions when the copy is done as a different user
- if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
+ if self.runner.become and self.runner.become_user != 'root':
self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
# run the copy module
diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/lib/ansible/runner/connection_plugins/accelerate.py
index a31124e119f655..0627267c16b215 100644
--- a/lib/ansible/runner/connection_plugins/accelerate.py
+++ b/lib/ansible/runner/connection_plugins/accelerate.py
@@ -50,6 +50,7 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args,
self.accport = port[1]
self.is_connected = False
self.has_pipelining = False
+ self.become_methods_supported=['sudo']
if not self.port:
self.port = constants.DEFAULT_REMOTE_PORT
@@ -226,11 +227,11 @@ def validate_user(self):
else:
return response.get('rc') == 0
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
- if su or su_user:
- raise AnsibleError("Internal Error: this module does not support running commands via su")
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
@@ -238,8 +239,8 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
if executable == "":
executable = constants.DEFAULT_EXECUTABLE
- if self.runner.sudo and sudoable and sudo_user:
- cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
+ if self.runner.become and sudoable:
+ cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
vvv("EXEC COMMAND %s" % cmd)
@@ -292,8 +293,8 @@ def put_file(self, in_path, out_path):
if fd.tell() >= fstat.st_size:
last = True
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
- if self.runner.sudo:
- data['user'] = self.runner.sudo_user
+ if self.runner.become:
+ data['user'] = self.runner.become_user
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/lib/ansible/runner/connection_plugins/chroot.py
index 38c8af7a69096c..3e960472879603 100644
--- a/lib/ansible/runner/connection_plugins/chroot.py
+++ b/lib/ansible/runner/connection_plugins/chroot.py
@@ -24,6 +24,7 @@
from ansible import errors
from ansible import utils
from ansible.callbacks import vvv
+import ansible.constants as C
class Connection(object):
''' Local chroot based connections '''
@@ -31,6 +32,7 @@ class Connection(object):
def __init__(self, runner, host, port, *args, **kwargs):
self.chroot = host
self.has_pipelining = False
+ self.become_methods_supported=C.BECOME_METHODS
if os.geteuid() != 0:
raise errors.AnsibleError("chroot connection requires running as root")
@@ -60,16 +62,16 @@ def connect(self, port=None):
return self
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
- if su or su_user:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # We enter chroot as root so sudo stuff can be ignored
+ # We enter chroot as root so we ignore privlege escalation?
if executable:
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
diff --git a/lib/ansible/runner/connection_plugins/fireball.py b/lib/ansible/runner/connection_plugins/fireball.py
index dd9e09bacda6d6..562fc2eccf94da 100644
--- a/lib/ansible/runner/connection_plugins/fireball.py
+++ b/lib/ansible/runner/connection_plugins/fireball.py
@@ -53,6 +53,8 @@ def __init__(self, runner, host, port, *args, **kwargs):
else:
self.port = port
+ self.become_methods_supported=[]
+
def connect(self):
''' activates the connection object '''
@@ -64,11 +66,11 @@ def connect(self):
socket = self.context.socket(zmq.REQ)
addr = "tcp://%s:%s" % (self.host, self.port)
socket.connect(addr)
- self.socket = socket
+ self.socket = socket
return self
- def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=None):
+ def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
if in_data:
@@ -76,7 +78,7 @@ def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bi
vvv("EXEC COMMAND %s" % cmd)
- if (self.runner.sudo and sudoable) or (self.runner.su and su):
+ if self.runner.become and sudoable:
raise errors.AnsibleError(
"When using fireball, do not specify sudo or su to run your tasks. " +
"Instead sudo the fireball action with sudo. " +
diff --git a/lib/ansible/runner/connection_plugins/funcd.py b/lib/ansible/runner/connection_plugins/funcd.py
index 7244abcbe9a65b..92b7f53605baab 100644
--- a/lib/ansible/runner/connection_plugins/funcd.py
+++ b/lib/ansible/runner/connection_plugins/funcd.py
@@ -53,16 +53,14 @@ def connect(self, port=None):
self.client = fc.Client(self.host)
return self
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False,
- executable='/bin/sh', in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False,
+ executable='/bin/sh', in_data=None):
''' run a command on the remote minion '''
- if su or su_user:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
-
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
+ # totally ignores privlege escalation
vvv("EXEC %s" % (cmd), host=self.host)
p = self.client.command.run(cmd)[self.host]
return (p[0], '', p[1], p[2])
diff --git a/lib/ansible/runner/connection_plugins/jail.py b/lib/ansible/runner/connection_plugins/jail.py
index b721ad62b50ab1..c7b61bc638cd4f 100644
--- a/lib/ansible/runner/connection_plugins/jail.py
+++ b/lib/ansible/runner/connection_plugins/jail.py
@@ -24,6 +24,7 @@
import subprocess
from ansible import errors
from ansible.callbacks import vvv
+import ansible.constants as C
class Connection(object):
''' Local chroot based connections '''
@@ -61,6 +62,7 @@ def __init__(self, runner, host, port, *args, **kwargs):
self.runner = runner
self.host = host
self.has_pipelining = False
+ self.become_methods_supported=C.BECOME_METHODS
if os.geteuid() != 0:
raise errors.AnsibleError("jail connection requires running as root")
@@ -91,16 +93,16 @@ def _generate_cmd(self, executable, cmd):
local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd)
return local_cmd
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
- if su or su_user:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # We enter chroot as root so sudo stuff can be ignored
+ # Ignores privilege escalation
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.jail)
diff --git a/lib/ansible/runner/connection_plugins/libvirt_lxc.py b/lib/ansible/runner/connection_plugins/libvirt_lxc.py
index c6cf11f2667fb2..34cdb592b246b7 100644
--- a/lib/ansible/runner/connection_plugins/libvirt_lxc.py
+++ b/lib/ansible/runner/connection_plugins/libvirt_lxc.py
@@ -22,6 +22,7 @@
import subprocess
from ansible import errors
from ansible.callbacks import vvv
+import ansible.constants as C
class Connection(object):
''' Local lxc based connections '''
@@ -50,6 +51,7 @@ def __init__(self, runner, host, port, *args, **kwargs):
self.host = host
# port is unused, since this is local
self.port = port
+ self.become_methods_supported=C.BECOME_METHODS
def connect(self, port=None):
''' connect to the lxc; nothing to do here '''
@@ -65,16 +67,16 @@ def _generate_cmd(self, executable, cmd):
local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
return local_cmd
- def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
- if su or su_user:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # We enter lxc as root so sudo stuff can be ignored
+ # We ignore privelege escalation!
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.lxc)
diff --git a/lib/ansible/runner/connection_plugins/local.py b/lib/ansible/runner/connection_plugins/local.py
index e282076ee1efb2..beaeb1ae50e45f 100644
--- a/lib/ansible/runner/connection_plugins/local.py
+++ b/lib/ansible/runner/connection_plugins/local.py
@@ -26,6 +26,7 @@
from ansible import utils
from ansible.callbacks import vvv
+
class Connection(object):
''' Local based connections '''
@@ -33,31 +34,34 @@ def __init__(self, runner, host, port, *args, **kwargs):
self.runner = runner
self.host = host
# port is unused, since this is local
- self.port = port
+ self.port = port
self.has_pipelining = False
+ # TODO: add su(needs tty), pbrun, pfexec
+ self.become_methods_supported=['sudo']
+
def connect(self, port=None):
''' connect to the local host; nothing to do here '''
return self
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the local host '''
# su requires to be run from a terminal, and therefore isn't supported here (yet?)
- if su or su_user:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- if not self.runner.sudo or not sudoable:
+ if self.runner.become and sudoable:
+ local_cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '-H', self.runner.become_exe)
+ else:
if executable:
local_cmd = executable.split() + ['-c', cmd]
else:
local_cmd = cmd
- else:
- local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
executable = executable.split()[0] if executable else None
vvv("EXEC %s" % (local_cmd), host=self.host)
@@ -66,13 +70,19 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if self.runner.sudo and sudoable and self.runner.sudo_pass:
+ if self.runner.become and sudoable and self.runner.become_pass:
fcntl.fcntl(p.stdout, fcntl.F_SETFL,
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL,
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- sudo_output = ''
- while not sudo_output.endswith(prompt) and success_key not in sudo_output:
+ become_output = ''
+ while success_key not in become_output:
+
+ if prompt and become_output.endswith(prompt):
+ break
+ if utils.su_prompts.check_su_prompt(become_output):
+ break
+
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
[p.stdout, p.stderr], self.runner.timeout)
if p.stdout in rfd:
@@ -81,13 +91,13 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
chunk = p.stderr.read()
else:
stdout, stderr = p.communicate()
- raise errors.AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output)
+ raise errors.AnsibleError('timeout waiting for %s password prompt:\n' % self.runner.become_method + become_output)
if not chunk:
stdout, stderr = p.communicate()
- raise errors.AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output)
- sudo_output += chunk
- if success_key not in sudo_output:
- p.stdin.write(self.runner.sudo_pass + '\n')
+ raise errors.AnsibleError('%s output closed while waiting for password prompt:\n' % self.runner.become_method + become_output)
+ become_output += chunk
+ if success_key not in become_output:
+ p.stdin.write(self.runner.become_pass + '\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py
index 4bb06e01c36147..2ba3d76d26a7aa 100644
--- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py
+++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py
@@ -125,6 +125,9 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args,
self.private_key_file = private_key_file
self.has_pipelining = False
+ # TODO: add pbrun, pfexec
+ self.become_methods_supported=['sudo', 'su', 'pbrun']
+
def _cache_key(self):
return "%s__%s__" % (self.host, self.user)
@@ -184,9 +187,12 @@ def _connect_uncached(self):
return ssh
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
+ if self.runner.become and sudoable and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
+
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
@@ -206,7 +212,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
no_prompt_out = ''
no_prompt_err = ''
- if not (self.runner.sudo and sudoable) and not (self.runner.su and su):
+ if not (self.runner.become and sudoable):
if executable:
quoted_command = executable + ' -c ' + pipes.quote(cmd)
@@ -224,50 +230,46 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
chan.get_pty(term=os.getenv('TERM', 'vt100'),
width=int(os.getenv('COLUMNS', 0)),
height=int(os.getenv('LINES', 0)))
- if self.runner.sudo or sudoable:
- shcmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
- elif self.runner.su or su:
- shcmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
+ if self.runner.become and sudoable:
+ shcmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
vvv("EXEC %s" % shcmd, host=self.host)
- sudo_output = ''
+ become_output = ''
try:
chan.exec_command(shcmd)
- if self.runner.sudo_pass or self.runner.su_pass:
+ if self.runner.become_pass:
while True:
- if success_key in sudo_output or \
- (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
- (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
+ if success_key in become_output or \
+ (prompt and become_output.endswith(prompt)) or \
+ utils.su_prompts.check_su_prompt(become_output)):
break
chunk = chan.recv(bufsize)
if not chunk:
- if 'unknown user' in sudo_output:
+ if 'unknown user' in become_output:
raise errors.AnsibleError(
- 'user %s does not exist' % sudo_user)
+ 'user %s does not exist' % become_user)
else:
raise errors.AnsibleError('ssh connection ' +
'closed waiting for password prompt')
- sudo_output += chunk
+ become_output += chunk
- if success_key not in sudo_output:
+ if success_key not in become_output:
if sudoable:
- chan.sendall(self.runner.sudo_pass + '\n')
- elif su:
- chan.sendall(self.runner.su_pass + '\n')
+ chan.sendall(self.runner.become_pass + '\n')
else:
- no_prompt_out += sudo_output
- no_prompt_err += sudo_output
+ no_prompt_out += become_output
+ no_prompt_err += become_output
except socket.timeout:
- raise errors.AnsibleError('ssh timed out waiting for sudo.\n' + sudo_output)
+ raise errors.AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
stdout = ''.join(chan.makefile('rb', bufsize))
stderr = ''.join(chan.makefile_stderr('rb', bufsize))
diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py
index b1743963c0f0aa..02b7f0b4072619 100644
--- a/lib/ansible/runner/connection_plugins/ssh.py
+++ b/lib/ansible/runner/connection_plugins/ssh.py
@@ -34,6 +34,7 @@
from ansible import errors
from ansible import utils
+
class Connection(object):
''' ssh based connections '''
@@ -48,6 +49,9 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args,
self.HASHED_KEY_MAGIC = "|1|"
self.has_pipelining = True
+ # TODO: add pbrun, pfexec
+ self.become_methods_supported=['sudo', 'su', 'pbrun']
+
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
@@ -140,7 +144,7 @@ def _send_password(self):
os.write(self.wfd, "%s\n" % self.password)
os.close(self.wfd)
- def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
+ def _communicate(self, p, stdin, indata, sudoable=False, prompt=None):
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
# We can't use p.communicate here because the ControlMaster may have stdout open as well
@@ -157,23 +161,20 @@ def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
while True:
rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
- # fail early if the sudo/su password is wrong
- if self.runner.sudo and sudoable:
- if self.runner.sudo_pass:
+ # fail early if the become password is wrong
+ if self.runner.become and sudoable:
+ if self.runner.become_pass:
incorrect_password = gettext.dgettext(
- "sudo", "Sorry, try again.")
+ "Privilege Escalation", "Sorry, try again.")
if stdout.endswith("%s\r\n%s" % (incorrect_password,
prompt)):
- raise errors.AnsibleError('Incorrect sudo password')
-
- if stdout.endswith(prompt):
- raise errors.AnsibleError('Missing sudo password')
+ raise errors.AnsibleError('Incorrect become password')
- if self.runner.su and su and self.runner.su_pass:
- incorrect_password = gettext.dgettext(
- "su", "Sorry")
- if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect su password')
+ if prompt:
+ if stdout.endswith(prompt):
+ raise errors.AnsibleError('Missing become password')
+ elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
+ raise errors.AnsibleError('Incorrect becom password')
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 9000)
@@ -256,9 +257,12 @@ def not_in_host_file(self, host):
vvv("EXEC previous known host file not found for %s" % host)
return True
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
+
ssh_cmd = self._password_cmd()
ssh_cmd += ["ssh", "-C"]
if not in_data:
@@ -276,25 +280,22 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
ssh_cmd += ['-6']
ssh_cmd += [self.host]
- if su and su_user:
- sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
- ssh_cmd.append(sudocmd)
- elif not self.runner.sudo or not sudoable:
+ if self.runner.become and sudoable:
+ becomecmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
+ ssh_cmd.append(becomecmd)
+ else:
prompt = None
if executable:
ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
else:
ssh_cmd.append(cmd)
- else:
- sudocmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
- ssh_cmd.append(sudocmd)
vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host)
not_in_host_file = self.not_in_host_file(self.host)
if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
+ # lock around the initial SSH connectivity so the user prompt about whether to add
# the host to known hosts is not intermingled with multiprocess output.
fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
@@ -306,9 +307,8 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
no_prompt_out = ''
no_prompt_err = ''
- if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
- (self.runner.su and su and self.runner.su_pass):
- # several cases are handled for sudo privileges with password
+ if self.runner.become and sudoable and self.runner.become_pass:
+ # several cases are handled for escalated privileges with password
# * NOPASSWD (tty & no-tty): detect success_key on stdout
# * without NOPASSWD:
# * detect prompt on stdout (tty)
@@ -317,13 +317,14 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL,
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- sudo_output = ''
- sudo_errput = ''
+ become_output = ''
+ become_errput = ''
- while True:
- if success_key in sudo_output or \
- (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
- (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
+ while success_key not in become_output:
+
+ if prompt and become_output.endswith(prompt):
+ break
+ if utils.su_prompts.check_su_prompt(become_output):
break
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
@@ -331,36 +332,34 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
if p.stderr in rfd:
chunk = p.stderr.read()
if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
- sudo_errput += chunk
+ raise errors.AnsibleError('ssh connection closed waiting for a privilege escalation password prompt')
+ become_errput += chunk
incorrect_password = gettext.dgettext(
- "sudo", "Sorry, try again.")
- if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
- raise errors.AnsibleError('Incorrect sudo password')
- elif prompt and sudo_errput.endswith(prompt):
- stdin.write(self.runner.sudo_pass + '\n')
+ "become", "Sorry, try again.")
+ if become_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
+ raise errors.AnsibleError('Incorrect become password')
+ elif prompt and become_errput.endswith(prompt):
+ stdin.write(self.runner.become_pass + '\n')
if p.stdout in rfd:
chunk = p.stdout.read()
if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
- sudo_output += chunk
+ raise errors.AnsibleError('ssh connection closed waiting for %s password prompt' % self.runner.become_method)
+ become_output += chunk
if not rfd:
# timeout. wrap up process communication
stdout = p.communicate()
- raise errors.AnsibleError('ssh connection error waiting for sudo or su password prompt')
+ raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method)
- if success_key not in sudo_output:
+ if success_key not in become_output:
if sudoable:
- stdin.write(self.runner.sudo_pass + '\n')
- elif su:
- stdin.write(self.runner.su_pass + '\n')
+ stdin.write(self.runner.become_pass + '\n')
else:
- no_prompt_out += sudo_output
- no_prompt_err += sudo_errput
+ no_prompt_out += become_output
+ no_prompt_err += become_errput
- (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
+ (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt)
if C.HOST_KEY_CHECKING and not_in_host_file:
# lock around the initial SSH connectivity so the user prompt about whether to add
diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py
index 93145e46968713..7a2d6d3318ddbb 100644
--- a/lib/ansible/runner/connection_plugins/winrm.py
+++ b/lib/ansible/runner/connection_plugins/winrm.py
@@ -72,6 +72,10 @@ def __init__(self, runner, host, port, user, password, *args, **kwargs):
self.shell_id = None
self.delegate = None
+ # Add runas support
+ #self.become_methods_supported=['runas']
+ self.become_methods_supported=[]
+
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
@@ -143,7 +147,11 @@ def connect(self):
self.protocol = self._winrm_connect()
return self
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
+
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
+
cmd = cmd.encode('utf-8')
cmd_parts = shlex.split(cmd, posix=False)
if '-EncodedCommand' in cmd_parts:
diff --git a/lib/ansible/runner/connection_plugins/zone.py b/lib/ansible/runner/connection_plugins/zone.py
index 16bef1a2134cd7..211bd0fbcc63f8 100644
--- a/lib/ansible/runner/connection_plugins/zone.py
+++ b/lib/ansible/runner/connection_plugins/zone.py
@@ -26,6 +26,7 @@
from subprocess import Popen,PIPE
from ansible import errors
from ansible.callbacks import vvv
+import ansible.constants as C
class Connection(object):
''' Local zone based connections '''
@@ -68,6 +69,7 @@ def __init__(self, runner, host, port, *args, **kwargs):
self.runner = runner
self.host = host
self.has_pipelining = False
+ self.become_methods_supported=C.BECOME_METHODS
if os.geteuid() != 0:
raise errors.AnsibleError("zone connection requires running as root")
@@ -98,17 +100,16 @@ def _generate_cmd(self, executable, cmd):
local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd)
return local_cmd
- #def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
- def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None):
+ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
''' run a command on the zone '''
- if su or su_user:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
+ if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
+ raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # We enter zone as root so sudo stuff can be ignored
+ # We happily ignore privelege escalation
if executable == '/bin/sh':
executable = None
local_cmd = self._generate_cmd(executable, cmd)
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
index 433c30db6a0b62..3745f0d43089f8 100644
--- a/lib/ansible/utils/__init__.py
+++ b/lib/ansible/utils/__init__.py
@@ -992,14 +992,12 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
+ parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
- parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
+ parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password')
- parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
- help='ask for su password')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
@@ -1025,22 +1023,35 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
help='log output to this directory')
if runas_opts:
- parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true",
- dest='sudo', help="run operations with sudo (nopasswd)")
+ # priv user defaults to root later on to enable detecting when this option was given here
+ parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password (deprecated, use become)')
+ parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
+ help='ask for su password (deprecated, use become)')
+ parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
+ help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
- help='desired sudo user (default=root)') # Can't default to root because we need to detect when this option was given
- parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
- dest='remote_user', help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
+ help='desired sudo user (default=root) (deprecated, use become)')
+ parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
+ help='run operations with su (deprecated, use become)')
+ parser.add_option('-R', '--su-user', default=None,
+ help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
+
+ # consolidated privilege escalation (become)
+ parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
+ help="run operations with become (nopasswd implied)")
+ parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
+ help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
+ parser.add_option('--become-user', default=None, dest='become_user', type='string',
+ help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
+ parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
+ help='ask for privilege escalation password')
- parser.add_option('-S', '--su', default=constants.DEFAULT_SU,
- action='store_true', help='run operations with su')
- parser.add_option('-R', '--su-user', help='run operations with su as this '
- 'user (default=%s)' % constants.DEFAULT_SU_USER)
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
- default=C.DEFAULT_TRANSPORT,
- help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
+ default=constants.DEFAULT_TRANSPORT,
+ help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
@@ -1059,7 +1070,6 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
-
return parser
def parse_extra_vars(extra_vars_opts, vault_pass):
@@ -1106,41 +1116,58 @@ def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_
return vault_pass, new_vault_pass
-def ask_passwords(ask_pass=False, ask_sudo_pass=False, ask_su_pass=False, ask_vault_pass=False):
+def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
sshpass = None
- sudopass = None
- supass = None
+ becomepass = None
vaultpass = None
- sudo_prompt = "sudo password: "
- su_prompt = "su password: "
+ become_prompt = ''
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
- sudo_prompt = "sudo password [defaults to SSH password]: "
- su_prompt = "su password [defaults to SSH password]: "
-
- if ask_sudo_pass:
- sudopass = getpass.getpass(prompt=sudo_prompt)
- if ask_pass and sudopass == '':
- sudopass = sshpass
- if sudopass:
- sudopass = to_bytes(sudopass, errors='strict', nonstring='simplerepr')
-
- if ask_su_pass:
- supass = getpass.getpass(prompt=su_prompt)
- if ask_pass and supass == '':
- supass = sshpass
- if supass:
- supass = to_bytes(supass, errors='strict', nonstring='simplerepr')
+ else:
+ become_prompt = "%s password: " % become_method.upper()
+
+ if become_ask_pass:
+ becomepass = getpass.getpass(prompt=become_prompt)
+ if ask_pass and becomepass == '':
+ becomepass = sshpass
+ if becomepass:
+ becomepass = to_bytes(becomepass)
if ask_vault_pass:
vaultpass = getpass.getpass(prompt="Vault password: ")
if vaultpass:
vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
- return (sshpass, sudopass, supass, vaultpass)
+ return (sshpass, becomepass, vaultpass)
+
+
+def choose_pass_prompt(options):
+
+ if options.ask_su_pass:
+ return 'su'
+ elif options.ask_sudo_pass:
+ return 'sudo'
+
+ return options.become_method
+
+def normalize_become_options(options):
+
+ options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
+ options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
+
+ if options.become:
+ pass
+ elif options.sudo:
+ options.become = True
+ options.become_method = 'sudo'
+ elif options.su:
+ options.become = True
+ options.become_method = 'su'
+
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
@@ -1194,38 +1221,63 @@ def boolean(value):
else:
return False
+def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
+ """
+ helper function for connection plugins to create privilege escalation commands
+ """
+
+ randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
+ success_key = 'BECOME-SUCCESS-%s' % randbits
+ prompt = None
+ becomecmd = None
+
+ shell = shell or '$SHELL'
+
+ if method == 'sudo':
+ # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
+ # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
+ # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
+ # string to the user's shell. We loop reading output until we see the randomly-generated
+ # sudo prompt set with the -p option.
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ exe = exe or C.DEFAULT_SUDO_EXE
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, 'echo %s; %s' % (success_key, cmd))
+
+ elif method == 'su':
+ exe = exe or C.DEFAULT_SU_EXE
+ flags = flags or C.DEFAULT_SU_FLAGS
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
+
+ elif method == 'pbrun':
+ exe = exe or 'pbrun'
+ flags = flags or ''
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, 'echo %s; %s' % (success_key,cmd))
+
+ elif method == 'pfexec':
+ exe = exe or 'pfexec'
+ flags = flags or ''
+ # No user as it uses it's own exec_attr to figure it out
+ becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd))
+
+ if becomecmd is None:
+ raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
+
+ return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
+
+
def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
- # Rather than detect if sudo wants a password this time, -k makes
- # sudo always ask for a password if one is required.
- # Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote()
- # and pass the quoted string to the user's shell. We loop reading
- # output until we see the randomly-generated sudo prompt set with
- # the -p option.
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- success_key = 'SUDO-SUCCESS-%s' % randbits
- sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
- sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS,
- prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd)))
- return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
+ return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
def make_su_cmd(su_user, executable, cmd):
"""
Helper function for connection plugins to create direct su commands
"""
- # TODO: work on this function
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- success_key = 'SUDO-SUCCESS-%s' % randbits
- sudocmd = '%s %s %s -c "%s -c %s"' % (
- C.DEFAULT_SU_EXE, C.DEFAULT_SU_FLAGS, su_user, executable or '$SHELL',
- pipes.quote('echo %s; %s' % (success_key, cmd))
- )
- return ('/bin/sh -c ' + pipes.quote(sudocmd), None, success_key)
+ return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
@@ -1577,9 +1629,9 @@ def update_hash(hash, key, new_value):
hash[key] = value
def censor_unlogged_data(data):
- '''
+ '''
used when the no_log: True attribute is passed to a task to keep data from a callback.
- NOT intended to prevent variable registration, but only things from showing up on
+ NOT intended to prevent variable registration, but only things from showing up on
screen
'''
new_data = {}
@@ -1589,5 +1641,19 @@ def censor_unlogged_data(data):
new_data['censored'] = 'results hidden due to no_log parameter'
return new_data
+def check_mutually_exclusive_privilege(options, parser):
+
+ # privilege escalation command line arguments need to be mutually exclusive
+ if (options.su or options.su_user or options.ask_su_pass) and \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
+ (options.su or options.su_user or options.ask_su_pass) and \
+ (options.become or options.become_user or options.become_ask_pass) or \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
+ (options.become or options.become_user or options.become_ask_pass):
+
+ parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
+ "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
+ "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
+ " are exclusive of each other")
+
-
diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml
index 47203194821b7e..54c905bdf6e413 100644
--- a/test/integration/destructive.yml
+++ b/test/integration/destructive.yml
@@ -3,6 +3,8 @@
roles:
# In destructive because it creates and removes a user
- { role: test_sudo, tags: test_sudo}
+ #- { role: test_su, tags: test_su} # wait till su support is added to local connection, needs tty
+ - { role: test_become, tags: test_become}
- { role: test_service, tags: test_service }
# Current pip unconditionally uses md5. We can re-enable if pip switches
# to a different hash or allows us to not check md5
diff --git a/test/integration/roles/test_become/files/baz.txt b/test/integration/roles/test_become/files/baz.txt
new file mode 100644
index 00000000000000..b8d834daa430c3
--- /dev/null
+++ b/test/integration/roles/test_become/files/baz.txt
@@ -0,0 +1 @@
+testing tilde expansion with become
diff --git a/test/integration/roles/test_become/tasks/main.yml b/test/integration/roles/test_become/tasks/main.yml
new file mode 100644
index 00000000000000..1b007596453085
--- /dev/null
+++ b/test/integration/roles/test_become/tasks/main.yml
@@ -0,0 +1,77 @@
+- include_vars: default.yml
+
+- name: Create test user
+ become: True
+ become_user: root
+ user:
+ name: "{{ become_test_user }}"
+
+- name: test becoming user
+ shell: whoami
+ become: True
+ become_user: "{{ become_test_user }}"
+ register: results
+
+- assert:
+ that:
+ - "results.stdout == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in file
+ become: True
+ become_user: "{{ become_test_user }}"
+ file:
+ path: "~/foo.txt"
+ state: touch
+
+- name: check that the path in the user's home dir was created
+ stat:
+ path: "~{{ become_test_user }}/foo.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in template
+ become: True
+ become_user: "{{ become_test_user }}"
+ template:
+ src: "bar.j2"
+ dest: "~/bar.txt"
+
+- name: check that the path in the user's home dir was created
+ stat:
+ path: "~{{ become_test_user }}/bar.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: tilde expansion honors become in copy
+ become: True
+ become_user: "{{ become_test_user }}"
+ copy:
+ src: baz.txt
+ dest: "~/baz.txt"
+
+- name: check that the path in the user's home dir was created
+ stat:
+ path: "~{{ become_test_user }}/baz.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ become_test_user }}'"
+
+- name: Remove test user and their home dir
+ become: True
+ become_user: root
+ user:
+ name: "{{ become_test_user }}"
+ state: "absent"
+ remove: "yes"
+
diff --git a/test/integration/roles/test_become/templates/bar.j2 b/test/integration/roles/test_become/templates/bar.j2
new file mode 100644
index 00000000000000..7c5fe0ab49cd4b
--- /dev/null
+++ b/test/integration/roles/test_become/templates/bar.j2
@@ -0,0 +1 @@
+{{ become_test_user }}
diff --git a/test/integration/roles/test_become/vars/default.yml b/test/integration/roles/test_become/vars/default.yml
new file mode 100644
index 00000000000000..223d44ed24ea1b
--- /dev/null
+++ b/test/integration/roles/test_become/vars/default.yml
@@ -0,0 +1 @@
+become_test_user: ansibletest1
diff --git a/test/integration/roles/test_su/files/baz.txt b/test/integration/roles/test_su/files/baz.txt
new file mode 100644
index 00000000000000..7e677748a26718
--- /dev/null
+++ b/test/integration/roles/test_su/files/baz.txt
@@ -0,0 +1 @@
+testing tilde expansion with su
diff --git a/test/integration/roles/test_su/tasks/main.yml b/test/integration/roles/test_su/tasks/main.yml
new file mode 100644
index 00000000000000..65e9b2306f7ddd
--- /dev/null
+++ b/test/integration/roles/test_su/tasks/main.yml
@@ -0,0 +1,75 @@
+- include_vars: default.yml
+
+- name: Create test user
+ su: True
+ user:
+ name: "{{ su_test_user }}"
+
+- name: test becoming user
+ shell: whoami
+ su: True
+ su_user: "{{ su_test_user }}"
+ register: results
+
+- assert:
+ that:
+ - "results.stdout == '{{ su_test_user }}'"
+
+- name: tilde expansion honors su in file
+ su: True
+ su_user: "{{ su_test_user }}"
+ file:
+ path: "~/foo.txt"
+ state: touch
+
+- name: check that the path in the user's home dir was created
+ stat:
+ path: "~{{ su_test_user }}/foo.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ su_test_user }}'"
+
+- name: tilde expansion honors su in template
+ su: True
+ su_user: "{{ su_test_user }}"
+ template:
+ src: "bar.j2"
+ dest: "~/bar.txt"
+
+- name: check that the path in the user's home dir was created
+ stat:
+ path: "~{{ su_test_user }}/bar.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ su_test_user }}'"
+
+- name: tilde expansion honors su in copy
+ su: True
+ su_user: "{{ su_test_user }}"
+ copy:
+ src: baz.txt
+ dest: "~/baz.txt"
+
+- name: check that the path in the user's home dir was created
+ stat:
+ path: "~{{ su_test_user }}/baz.txt"
+ register: results
+
+- assert:
+ that:
+ - "results.stat.exists == True"
+ - "results.stat.path|dirname|basename == '{{ su_test_user }}'"
+
+- name: Remove test user and their home dir
+ su: True
+ user:
+ name: "{{ su_test_user }}"
+ state: "absent"
+ remove: "yes"
+
diff --git a/test/integration/roles/test_su/templates/bar.j2 b/test/integration/roles/test_su/templates/bar.j2
new file mode 100644
index 00000000000000..0f420227e06d7f
--- /dev/null
+++ b/test/integration/roles/test_su/templates/bar.j2
@@ -0,0 +1 @@
+{{ su_test_user }}
diff --git a/test/integration/roles/test_su/vars/default.yml b/test/integration/roles/test_su/vars/default.yml
new file mode 100644
index 00000000000000..bb0da6b25d68f5
--- /dev/null
+++ b/test/integration/roles/test_su/vars/default.yml
@@ -0,0 +1 @@
+su_test_user: ansibletest1
diff --git a/test/integration/roles/test_sudo/tasks/main.yml b/test/integration/roles/test_sudo/tasks/main.yml
index 022e7d742280df..372f175d294b4a 100644
--- a/test/integration/roles/test_sudo/tasks/main.yml
+++ b/test/integration/roles/test_sudo/tasks/main.yml
@@ -1,9 +1,20 @@
- include_vars: default.yml
- name: Create test user
+ sudo: true
user:
name: "{{ sudo_test_user }}"
+- name: test becoming user
+ shell: whoami
+ sudo: True
+ sudo_user: "{{ sudo_test_user }}"
+ register: results
+
+- assert:
+ that:
+ - "results.stdout == '{{ sudo_test_user }}'"
+
- name: tilde expansion honors sudo in file
sudo: True
sudo_user: "{{ sudo_test_user }}"
@@ -56,6 +67,7 @@
- "results.stat.path|dirname|basename == '{{ sudo_test_user }}'"
- name: Remove test user and their home dir
+ sudo: true
user:
name: "{{ sudo_test_user }}"
state: "absent"
diff --git a/test/units/TestPlayVarsFiles.py b/test/units/TestPlayVarsFiles.py
index f241936a12efed..497c3112ede0d4 100644
--- a/test/units/TestPlayVarsFiles.py
+++ b/test/units/TestPlayVarsFiles.py
@@ -41,6 +41,9 @@ def __init__(self):
self.sudo_user = None
self.su = None
self.su_user = None
+ self.become = None
+ self.become_method = None
+ self.become_user = None
self.transport = None
self.only_tags = None
self.skip_tags = None
diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py
index be8a8af1293add..d8a85e20e76e95 100644
--- a/test/units/TestSynchronize.py
+++ b/test/units/TestSynchronize.py
@@ -18,6 +18,9 @@ def __init__(self):
self.remote_user = None
self.private_key_file = None
self.check = False
+ self.become = False
+ self.become_method = False
+ self.become_user = False
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None,
@@ -76,7 +79,7 @@ def test_synchronize_action_sudo(self):
""" verify the synchronize action plugin unsets and then sets sudo """
runner = FakeRunner()
- runner.sudo = True
+ runner.become = True
runner.remote_user = "root"
runner.transport = "ssh"
conn = FakeConn()
@@ -97,7 +100,7 @@ def test_synchronize_action_sudo(self):
assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
'src':'/tmp/foo',
'rsync_path':'"sudo rsync"'}, "wrong args used"
- assert runner.sudo == True, "sudo was not reset to True"
+ assert runner.become == True, "sudo was not reset to True"
def test_synchronize_action_local(self):
diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py
index 0ba1586cda6476..c0ca9ba5388ce8 100644
--- a/test/units/TestUtils.py
+++ b/test/units/TestUtils.py
@@ -498,7 +498,7 @@ def test_make_sudo_cmd(self):
self.assertEqual(len(cmd), 3)
self.assertTrue('-u root' in cmd[0])
self.assertTrue('-p "[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key'))
- self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-'))
+ self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-'))
self.assertTrue('sudo -k' in cmd[0])
def test_make_su_cmd(self):
@@ -506,7 +506,7 @@ def test_make_su_cmd(self):
self.assertTrue(isinstance(cmd, tuple))
self.assertEqual(len(cmd), 3)
self.assertTrue('root -c "/bin/sh' in cmd[0] or ' root -c /bin/sh' in cmd[0])
- self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-'))
+ self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-'))
def test_to_unicode(self):
uni = ansible.utils.unicode.to_unicode(u'ansible')
diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py
index 1c2bc092b23cbc..78eeaf8c20c5d2 100644
--- a/v2/ansible/constants.py
+++ b/v2/ansible/constants.py
@@ -141,16 +141,16 @@ def shell_expand_path(path):
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
-
-DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins')
-DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '/usr/share/ansible_plugins/cache_plugins')
-DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins')
-DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '/usr/share/ansible_plugins/connection_plugins')
-DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '/usr/share/ansible_plugins/lookup_plugins')
-DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '/usr/share/ansible_plugins/vars_plugins')
-DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins')
DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
+DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
+DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
+DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
+DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins')
+DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
+DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
+DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
+
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
From 2d73892acfd1a895854fef37eaf798beeeefcdbb Mon Sep 17 00:00:00 2001
From: Shirou WAKAYAMA
Date: Wed, 11 Mar 2015 14:50:27 +0900
Subject: [PATCH 0050/3617] use to_unicode() in _jinja2_vars if type is str.
---
lib/ansible/utils/template.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py
index 722e33e4c8fa2d..919436895a9a3a 100644
--- a/lib/ansible/utils/template.py
+++ b/lib/ansible/utils/template.py
@@ -33,7 +33,7 @@
import traceback
from ansible.utils.string_functions import count_newlines_from_end
-from ansible.utils import to_bytes
+from ansible.utils import to_bytes, to_unicode
class Globals(object):
@@ -184,6 +184,8 @@ def __getitem__(self, varname):
var = self.vars[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
+ if type(var) == str:
+ var = to_unicode(var)
if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars):
return var
else:
From f6d8e457abd78b760b04a2e07e9772b8040df616 Mon Sep 17 00:00:00 2001
From: Jeff Widman
Date: Wed, 11 Mar 2015 01:20:17 -0700
Subject: [PATCH 0051/3617] Typo: lead --> led
---
docsite/rst/intro_dynamic_inventory.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst
index ddb452e7756779..6734efca1905d4 100644
--- a/docsite/rst/intro_dynamic_inventory.rst
+++ b/docsite/rst/intro_dynamic_inventory.rst
@@ -24,7 +24,7 @@ For information about writing your own dynamic inventory source, see :doc:`devel
Example: The Cobbler External Inventory Script
``````````````````````````````````````````````
-It is expected that many Ansible users with a reasonable amount of physical hardware may also be `Cobbler `_ users. (note: Cobbler was originally written by Michael DeHaan and is now lead by James Cammarata, who also works for Ansible, Inc).
+It is expected that many Ansible users with a reasonable amount of physical hardware may also be `Cobbler `_ users. (note: Cobbler was originally written by Michael DeHaan and is now led by James Cammarata, who also works for Ansible, Inc).
While primarily used to kickoff OS installations and manage DHCP and DNS, Cobbler has a generic
layer that allows it to represent data for multiple configuration management systems (even at the same time), and has
From a5f533e25d986380f9b0bf661fc580f80d866167 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 11 Mar 2015 09:30:07 -0400
Subject: [PATCH 0052/3617] fixed bad paren in connection plugin
---
lib/ansible/runner/connection_plugins/paramiko_ssh.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py
index 2ba3d76d26a7aa..8eaf97c3f6d2ec 100644
--- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py
+++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py
@@ -246,7 +246,7 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab
if success_key in become_output or \
(prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output)):
+ utils.su_prompts.check_su_prompt(become_output):
break
chunk = chan.recv(bufsize)
From 1fd0a78b0e376d6ea4c8d3f1ad8ed68b9470cdfa Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 11 Mar 2015 10:28:10 -0400
Subject: [PATCH 0053/3617] fix issue with ask pass signature
---
bin/ansible-playbook | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
index 79cbc43d80a4ec..118a0198e4293f 100755
--- a/bin/ansible-playbook
+++ b/bin/ansible-playbook
@@ -121,7 +121,7 @@ def main(args):
options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
if options.listhosts or options.syntax or options.listtasks or options.listtags:
- (_, _, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
+ (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
else:
options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
# Never ask for an SSH password when we run with local connection
From de5eae2007d4138730582e876770d3b24c863753 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 11 Mar 2015 12:18:53 -0400
Subject: [PATCH 0054/3617] fixed traceback when x_user implicitly sets the
become method
Fixes #10430
Also removed redundant resolution of sudo/su for backwards compatibility which
confused the conflict detection code.
---
lib/ansible/playbook/play.py | 23 -----------------------
lib/ansible/playbook/task.py | 21 ++++++++++++++++++---
2 files changed, 18 insertions(+), 26 deletions(-)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 74c6998b22f823..babc059e65f154 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -583,29 +583,6 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, bec
included_become_vars[k] = become_vars[k]
x[k] = become_vars[k]
- ## backwards compat with old sudo/su directives
- if 'sudo' in x or 'sudo_user' in x:
- included_become_vars['become'] = x['sudo']
- x['become'] = x['sudo']
- x['become_method'] = 'sudo'
- del x['sudo']
-
- if x.get('sudo_user', False):
- included_become_vars['become_user'] = x['sudo_user']
- x['become_user'] = x['sudo_user']
- del x['sudo_user']
-
- elif 'su' in x or 'su_user' in x:
- included_become_vars['become'] = x['su']
- x['become'] = x['su']
- x['become_method'] = 'su'
- del x['su']
-
- if x.get('su_user', False):
- included_become_vars['become_user'] = x['su_user']
- x['become_user'] = x['su_user']
- del x['su_user']
-
if 'meta' in x:
if x['meta'] == 'flush_handlers':
results.append(Task(self, x))
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index a43c2ab89d5872..77cb97e5c0fda1 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -173,19 +173,34 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No
# set only if passed in current task data
if 'sudo' in ds or 'sudo_user' in ds:
- self.become=ds['sudo']
self.become_method='sudo'
+
+ if 'sudo' in ds:
+ self.become=ds['sudo']
+ del ds['sudo']
+ else:
+ self.become=True
if 'sudo_user' in ds:
self.become_user = ds['sudo_user']
+ del ds['sudo_user']
if 'sudo_pass' in ds:
self.become_pass = ds['sudo_pass']
- if 'su' in ds or 'su_user' in ds:
- self.become=ds['su']
+ del ds['sudo_pass']
+
+ elif 'su' in ds or 'su_user' in ds:
self.become_method='su'
+
+ if 'su' in ds:
+ self.become=ds['su']
+ else:
+ self.become=True
+ del ds['su']
if 'su_user' in ds:
self.become_user = ds['su_user']
+ del ds['su_user']
if 'su_pass' in ds:
self.become_pass = ds['su_pass']
+ del ds['su_pass']
# Both are defined
if ('action' in ds) and ('local_action' in ds):
From 747c7aaffa365a397435a05481719148b5ab772f Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 11 Mar 2015 12:33:05 -0400
Subject: [PATCH 0055/3617] removed uneeded reference to su_user
---
lib/ansible/runner/action_plugins/template.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py
index e6e33d354f6bd8..a824a6e4b8e152 100644
--- a/lib/ansible/runner/action_plugins/template.py
+++ b/lib/ansible/runner/action_plugins/template.py
@@ -133,7 +133,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
# fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root' or self.runner.su and self.runner.su_user != 'root':
+ if self.runner.become and self.runner.become_user != 'root':
self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
# run the copy module
From 587ab17f10cbb1a24782ec404eccfe91cb3ae852 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 11 Mar 2015 15:55:37 -0400
Subject: [PATCH 0056/3617] fixes password error detection for ssh connection
plugin removes sycnronize test that does not work with current sudo setup
Fixes #10434
---
lib/ansible/constants.py | 3 ++-
lib/ansible/runner/connection_plugins/ssh.py | 13 ++++++-------
test/units/TestSynchronize.py | 6 +++---
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 1779b792fb3c42..20079863e7d636 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -137,7 +137,8 @@ def shell_expand_path(path):
DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
#TODO: get rid of ternary chain mess
-BECOME_METHODS = ['sudo','su','pbrun','runas','pfexec']
+BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
+BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root')
diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py
index 02b7f0b4072619..25a330dcef51a4 100644
--- a/lib/ansible/runner/connection_plugins/ssh.py
+++ b/lib/ansible/runner/connection_plugins/ssh.py
@@ -163,18 +163,17 @@ def _communicate(self, p, stdin, indata, sudoable=False, prompt=None):
# fail early if the become password is wrong
if self.runner.become and sudoable:
- if self.runner.become_pass:
- incorrect_password = gettext.dgettext(
- "Privilege Escalation", "Sorry, try again.")
- if stdout.endswith("%s\r\n%s" % (incorrect_password,
- prompt)):
- raise errors.AnsibleError('Incorrect become password')
+ incorrect_password = gettext.dgettext(self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method])
if prompt:
+ if self.runner.become_pass:
+ if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
+ raise errors.AnsibleError('Incorrect become password')
+
if stdout.endswith(prompt):
raise errors.AnsibleError('Missing become password')
elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect becom password')
+ raise errors.AnsibleError('Incorrect become password')
if p.stdout in rfd:
dat = os.read(p.stdout.fileno(), 9000)
diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py
index d8a85e20e76e95..991f272001c7e2 100644
--- a/test/units/TestSynchronize.py
+++ b/test/units/TestSynchronize.py
@@ -97,9 +97,9 @@ def test_synchronize_action_sudo(self):
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
- 'src':'/tmp/foo',
- 'rsync_path':'"sudo rsync"'}, "wrong args used"
+ #assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
+ # 'src':'/tmp/foo',
+ # 'rsync_path':'"sudo rsync"'}, "wrong args used"
assert runner.become == True, "sudo was not reset to True"
From f803c1e1f824041bba2d1706e86d76a1551e2cf3 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 11 Mar 2015 16:28:37 -0400
Subject: [PATCH 0057/3617] fix tag test that broke with new tag info displayed
in list tasks
---
test/integration/Makefile | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/test/integration/Makefile b/test/integration/Makefile
index 4f2d4d9338dffc..ac526cf752ecbc 100644
--- a/test/integration/Makefile
+++ b/test/integration/Makefile
@@ -84,11 +84,11 @@ test_winrm:
test_tags:
# Run everything by default
- [ "$$(ansible-playbook --list-tasks test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag Task_with_always_tag Task_without_tag" ]
+ [ "$$(ansible-playbook --list-tasks test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ]
# Run the exact tags, and always
- [ "$$(ansible-playbook --list-tasks --tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag Task_with_always_tag" ]
+ [ "$$(ansible-playbook --list-tasks --tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always]" ]
# Skip one tag
- [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag Task_without_tag" ]
+ [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ]
cloud: amazon rackspace
From f229b770b2f016b4fc3acb1a6f6c620d96ba8e1c Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 11 Mar 2015 19:23:02 -0400
Subject: [PATCH 0058/3617] fixed missed su to become conversion
---
lib/ansible/runner/action_plugins/script.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py
index e4c5ec075f30ab..1b1aadc7aadefb 100644
--- a/lib/ansible/runner/action_plugins/script.py
+++ b/lib/ansible/runner/action_plugins/script.py
@@ -118,7 +118,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
sudoable = False
else:
chmod_mode = '+rx'
- self.runner._remote_chmod(conn, chmod_mode, tmp_src, tmp, sudoable=sudoable, su=self.runner.su)
+ self.runner._remote_chmod(conn, chmod_mode, tmp_src, tmp, sudoable=sudoable, become=self.runner.become)
# add preparation steps to one ssh roundtrip executing the script
env_string = self.runner._compute_environment_string(conn, inject)
From 597c0f48f53067fa6bce785b86d934d903dd4d1d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?J=C3=BCrgen=20Hermann?=
Date: Thu, 12 Mar 2015 02:28:33 +0100
Subject: [PATCH 0059/3617] Generic package_dir mapping in setup.py (closes
#10437)
---
setup.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/setup.py b/setup.py
index e855ea3bfaf84a..37527414067c4f 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
url='http://ansible.com/',
license='GPLv3',
install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'],
- package_dir={ 'ansible': 'lib/ansible' },
+ package_dir={ '': 'lib' },
packages=find_packages('lib'),
package_data={
'': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'],
From e413dba3a64b27efaec2fd1b173104c65f406358 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 11 Mar 2015 19:10:38 -0700
Subject: [PATCH 0060/3617] Update the module pointers
---
lib/ansible/modules/core | 2 +-
lib/ansible/modules/extras | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index bd997b1066e1e9..31cc5f543f4166 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit bd997b1066e1e98a66cf98643c78adf8e080e4b4
+Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index e60b2167f5ebfd..8baba98ebe5053 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit e60b2167f5ebfd642fe04cb22805203764959f7c
+Subproject commit 8baba98ebe5053e0c1e71881975ce8a1788f171c
From d92e8edf6e7f7b9eff503f268d1c0d11c2ac44a8 Mon Sep 17 00:00:00 2001
From: Shirou WAKAYAMA
Date: Thu, 12 Mar 2015 12:36:50 +0900
Subject: [PATCH 0061/3617] set 'nonstring' arg to passthru.
---
lib/ansible/utils/template.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py
index 919436895a9a3a..a58b93997157f6 100644
--- a/lib/ansible/utils/template.py
+++ b/lib/ansible/utils/template.py
@@ -184,8 +184,7 @@ def __getitem__(self, varname):
var = self.vars[varname]
# HostVars is special, return it as-is, as is the special variable
# 'vars', which contains the vars structure
- if type(var) == str:
- var = to_unicode(var)
+ var = to_unicode(var, nonstring="passthru")
if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars):
return var
else:
From 4710a07fb0fad509dbdd546852961c6473276a61 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 11 Mar 2015 20:58:21 -0700
Subject: [PATCH 0062/3617] Test case for #10426
---
test/integration/inventory | 1 +
test/integration/unicode.yml | 3 +++
2 files changed, 4 insertions(+)
diff --git a/test/integration/inventory b/test/integration/inventory
index 72d80aabebd06a..bee36ce022eaa2 100644
--- a/test/integration/inventory
+++ b/test/integration/inventory
@@ -15,6 +15,7 @@ invenoverride ansible_ssh_host=127.0.0.1 ansible_connection=local
[all:vars]
extra_var_override=FROM_INVENTORY
inven_var=inventory_var
+unicode_host_var=CaféEñyei
[inven_overridehosts:vars]
foo=foo
diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml
index 4e7fe635f452cc..2889155055d647 100644
--- a/test/integration/unicode.yml
+++ b/test/integration/unicode.yml
@@ -38,6 +38,9 @@
- name: 'A task with unicode extra vars'
debug: var=extra_var
+ - name: 'A task with unicode host vars'
+ debug: var=unicode_host_var
+
- name: 'A play for hosts in group: ĪīĬĭ'
hosts: 'ĪīĬĭ'
From ee831e10712c41511b3d7a3d849a99a0e819773e Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 11 Mar 2015 21:28:45 -0700
Subject: [PATCH 0063/3617] Fix v2 for #10426
Note: In v1 we fix this by transforming into unicode just before we use
it (when we send it to jinja2) because jinja2 cannot handle non-ascii
characters in str.
In v2 our model is that all text values need to be stored as unicode
type internally. So we transform this to unicode when we read it from
the inventory file and save it into the internal dict instead.
---
v2/ansible/inventory/ini.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/v2/ansible/inventory/ini.py b/v2/ansible/inventory/ini.py
index 075701c056c635..4236140ac88486 100644
--- a/v2/ansible/inventory/ini.py
+++ b/v2/ansible/inventory/ini.py
@@ -27,6 +27,7 @@
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
+from ansible.utils.unicode import to_unicode
class InventoryParser(object):
"""
@@ -53,7 +54,7 @@ def _parse(self):
def _parse_value(v):
if "#" not in v:
try:
- return ast.literal_eval(v)
+ v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
@@ -62,7 +63,7 @@ def _parse_value(v):
except SyntaxError:
# Is this a hash with an equals at the end?
pass
- return v
+ return to_unicode(v, nonstring='passthru', errors='strict')
# [webservers]
# alpha
From ac1493faae40c5d7fd91bf7cde0ac058d9f5c66f Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 12 Mar 2015 10:01:00 -0400
Subject: [PATCH 0064/3617] fixed missed conversion of su to become
---
lib/ansible/runner/action_plugins/raw.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/action_plugins/raw.py b/lib/ansible/runner/action_plugins/raw.py
index 548eafbf7069ae..b1ba2c99d94749 100644
--- a/lib/ansible/runner/action_plugins/raw.py
+++ b/lib/ansible/runner/action_plugins/raw.py
@@ -44,7 +44,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
module_args = r.sub("", module_args)
result = self.runner._low_level_exec_command(conn, module_args, tmp, sudoable=True, executable=executable,
- su=self.runner.su)
+ become=self.runner.become)
# for some modules (script, raw), the sudo success key
# may leak into the stdout due to the way the sudo/su
# command is constructed, so we filter that out here
From eb850bf81a99d1c5d695459ea25bfbf2fd9806e7 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Thu, 12 Mar 2015 10:22:06 -0500
Subject: [PATCH 0065/3617] Fix issue with unarchive disabling pipelining mode
Was using persist_files=True when specifying the create paramater,
which breaks pipelining. Switched to use delete_remote_tmp=False instead,
which is the proper way to preserve the remove tmp dir when running
other modules from the action plugin.
---
lib/ansible/runner/action_plugins/unarchive.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py
index db94ac26e7d707..312a2265c0f55a 100644
--- a/lib/ansible/runner/action_plugins/unarchive.py
+++ b/lib/ansible/runner/action_plugins/unarchive.py
@@ -62,7 +62,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
module_args_tmp = ""
complex_args_tmp = dict(path=creates, get_md5=False, get_checksum=False)
module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args_tmp, persist_files=True)
+ complex_args=complex_args_tmp, delete_remote_tmp=False)
stat = module_return.result.get('stat', None)
if stat and stat.get('exists', False):
return ReturnData(
From b1d78a61fca18b95dbf1dfd6a32382ce546c0980 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Thu, 12 Mar 2015 12:14:57 -0500
Subject: [PATCH 0066/3617] Initial support for vault in v2
TODO:
* password prompting needs to be implemented, but is being worked on
as part of the become privilege escalation changes
---
v2/ansible/plugins/strategies/linear.py | 13 +++++-
v2/ansible/utils/vault.py | 56 +++++++++++++++++++++++++
v2/bin/ansible-playbook | 54 +++++-------------------
3 files changed, 79 insertions(+), 44 deletions(-)
create mode 100644 v2/ansible/utils/vault.py
diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py
index f8c0e3bee8e25f..c6b9445b2e673b 100644
--- a/v2/ansible/plugins/strategies/linear.py
+++ b/v2/ansible/plugins/strategies/linear.py
@@ -224,6 +224,7 @@ def __eq__(self, other):
def __repr__(self):
return "%s (%s): %s" % (self._filename, self._args, self._hosts)
+ # FIXME: this should also be moved to the base class in a method
included_files = []
for res in host_results:
if res._task.action == 'include':
@@ -253,6 +254,9 @@ def __repr__(self):
inc_file.add_host(res._host)
+ # FIXME: should this be moved into the iterator class? Main downside would be
+ # that accessing the TQM's callback member would be more difficult, if
+ # we do want to send callbacks from here
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
@@ -263,7 +267,14 @@ def __repr__(self):
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
- new_tasks = self._load_included_file(included_file)
+ try:
+ new_tasks = self._load_included_file(included_file)
+ except AnsibleError, e:
+ for host in included_file._hosts:
+ iterator.mark_host_failed(host)
+ # FIXME: callback here?
+ print(e)
+
noop_tasks = [noop_task for t in new_tasks]
for host in hosts_left:
if host in included_file._hosts:
diff --git a/v2/ansible/utils/vault.py b/v2/ansible/utils/vault.py
new file mode 100644
index 00000000000000..04634aa377b498
--- /dev/null
+++ b/v2/ansible/utils/vault.py
@@ -0,0 +1,56 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import subprocess
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.utils.path import is_executable
+
+def read_vault_file(vault_password_file):
+ """
+ Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
+ """
+
+ this_path = os.path.realpath(os.path.expanduser(vault_password_file))
+ if not os.path.exists(this_path):
+ raise AnsibleError("The vault password file %s was not found" % this_path)
+
+ if is_executable(this_path):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
+ except OSError, e:
+ raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
+ stdout, stderr = p.communicate()
+ vault_pass = stdout.strip('\r\n')
+ else:
+ try:
+ f = open(this_path, "rb")
+ vault_pass=f.read().strip()
+ f.close()
+ except (OSError, IOError), e:
+ raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
+
+ return vault_pass
+
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index eafccedcba52c6..bdd9598ec82174 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -15,6 +15,7 @@ from ansible.playbook.task import Task
from ansible.utils.cli import base_parser
from ansible.utils.unicode import to_unicode
from ansible.utils.vars import combine_vars
+from ansible.utils.vault import read_vault_file
from ansible.vars import VariableManager
# Implement an ansible.utils.warning() function later
@@ -34,8 +35,8 @@ def main(args):
check_opts=True,
diff_opts=True
)
- #parser.add_option('--vault-password', dest="vault_password",
- # help="password for vault encrypted files")
+ parser.add_option('--vault-password', dest="vault_password",
+ help="password for vault encrypted files")
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
parser.add_option('-t', '--tags', dest='tags', default='all',
@@ -61,47 +62,14 @@ def main(args):
parser.print_help(file=sys.stderr)
return 1
- #---------------------------------------------------------------------------------------------------
- # FIXME: su/sudo stuff needs to be generalized
- # su and sudo command line arguments need to be mutually exclusive
- #if (options.su or options.su_user or options.ask_su_pass) and \
- # (options.sudo or options.sudo_user or options.ask_sudo_pass):
- # parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- # "and su arguments ('-su', '--su-user', and '--ask-su-pass') are "
- # "mutually exclusive")
- #
- #if (options.ask_vault_pass and options.vault_password_file):
- # parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
- #
- #sshpass = None
- #sudopass = None
- #su_pass = None
- #vault_pass = None
- #
- #options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
- #
- #if options.listhosts or options.syntax or options.listtasks:
- # (_, _, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
- #else:
- # options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
- # # Never ask for an SSH password when we run with local connection
- # if options.connection == "local":
- # options.ask_pass = False
- # options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS
- # options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS
- # (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass)
- # options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER
- # options.su_user = options.su_user or C.DEFAULT_SU_USER
- #
- ## read vault_pass from a file
- #if not options.ask_vault_pass and options.vault_password_file:
- # vault_pass = utils.read_vault_file(options.vault_password_file)
- # END FIXME
- #---------------------------------------------------------------------------------------------------
-
- # FIXME: this hard-coded value will be removed after fixing the removed block
- # above, which dealt wtih asking for passwords during runtime
- vault_pass = 'testing'
+ vault_pass = None
+ if options.ask_vault_pass:
+ # FIXME: prompt here
+ pass
+ elif options.vault_password_file:
+ # read vault_pass from a file
+ vault_pass = read_vault_file(options.vault_password_file)
+
loader = DataLoader(vault_password=vault_pass)
extra_vars = {}
From b5d23543f0e71ad16dd7926a37acf0c661fe7144 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 12 Mar 2015 14:22:24 -0400
Subject: [PATCH 0067/3617] fixed and reintroduced syncronize test, fakerunner
object needed become_method to be it's default 'sudo'
---
test/units/TestSynchronize.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py
index 991f272001c7e2..cf28ea5d809083 100644
--- a/test/units/TestSynchronize.py
+++ b/test/units/TestSynchronize.py
@@ -19,7 +19,7 @@ def __init__(self):
self.private_key_file = None
self.check = False
self.become = False
- self.become_method = False
+ self.become_method = 'sudo'
self.become_user = False
def _execute_module(self, conn, tmp, module_name, args,
@@ -97,9 +97,9 @@ def test_synchronize_action_sudo(self):
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- #assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
- # 'src':'/tmp/foo',
- # 'rsync_path':'"sudo rsync"'}, "wrong args used"
+ assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
+ 'src':'/tmp/foo',
+ 'rsync_path':'"sudo rsync"'}, "wrong args used"
assert runner.become == True, "sudo was not reset to True"
From 644e50fe34cc89e381b6edb12fb65130709bcfff Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 12 Mar 2015 11:37:57 -0700
Subject: [PATCH 0068/3617] Hash randomization makes one of the
heuristic_log_sanitize checks not work.
Nothing we can do, when it sanitizes ssh_urls it's simply overzealous.
---
test/units/TestModuleUtilsBasic.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py
index 2ac77764d746a9..5fd3d6b1db462e 100644
--- a/test/units/TestModuleUtilsBasic.py
+++ b/test/units/TestModuleUtilsBasic.py
@@ -321,7 +321,7 @@ def test_log_sanitize_correctness(self):
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
- self.assertTrue(ssh_output.endswith("'}}}}"))
+ self.assertTrue(ssh_output.endswith("}"))
try:
self.assertIn(":********@foo.com/data',", ssh_output)
except AttributeError:
From 74bf59082df50dbf216caf0de633d63eee1bdcc7 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 12 Mar 2015 14:22:24 -0400
Subject: [PATCH 0069/3617] fixed and reintroduced syncronize test, fakerunner
object needed become_method to be it's default 'sudo'
---
test/units/TestSynchronize.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py
index 991f272001c7e2..cf28ea5d809083 100644
--- a/test/units/TestSynchronize.py
+++ b/test/units/TestSynchronize.py
@@ -19,7 +19,7 @@ def __init__(self):
self.private_key_file = None
self.check = False
self.become = False
- self.become_method = False
+ self.become_method = 'sudo'
self.become_user = False
def _execute_module(self, conn, tmp, module_name, args,
@@ -97,9 +97,9 @@ def test_synchronize_action_sudo(self):
x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- #assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
- # 'src':'/tmp/foo',
- # 'rsync_path':'"sudo rsync"'}, "wrong args used"
+ assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
+ 'src':'/tmp/foo',
+ 'rsync_path':'"sudo rsync"'}, "wrong args used"
assert runner.become == True, "sudo was not reset to True"
From 90886594faccc4a2bed6221172c1e7a74eaa55e5 Mon Sep 17 00:00:00 2001
From: jhermann
Date: Thu, 12 Mar 2015 03:07:41 +0100
Subject: [PATCH 0070/3617] added test requirements for pip
---
test-requirements.txt | 7 +++++++
1 file changed, 7 insertions(+)
create mode 100644 test-requirements.txt
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 00000000000000..714b65b7646146
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,7 @@
+#
+# Test requirements
+#
+
+nose
+mock
+passlib
From f05cda6ffc214072afe3e54f280a7ead3ce5623e Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 12 Mar 2015 13:20:20 -0700
Subject: [PATCH 0071/3617] Comma is also dependent on position within the hash
---
test/units/TestModuleUtilsBasic.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py
index 5fd3d6b1db462e..5b8be283071951 100644
--- a/test/units/TestModuleUtilsBasic.py
+++ b/test/units/TestModuleUtilsBasic.py
@@ -323,10 +323,10 @@ def test_log_sanitize_correctness(self):
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("}"))
try:
- self.assertIn(":********@foo.com/data',", ssh_output)
+ self.assertIn(":********@foo.com/data'", ssh_output)
except AttributeError:
# python2.6 or less's unittest
- self.assertTrue(":********@foo.com/data'," in ssh_output, '%s is not present in %s' % (":********@foo.com/data',", ssh_output))
+ self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output))
# The overzealous-ness here may lead to us changing the algorithm in
# the future. We could make it consume less of the data (with the
From 3d67e9e0c0df18e5c82e62fdb79820724dbe2577 Mon Sep 17 00:00:00 2001
From: James Laska
Date: Tue, 10 Mar 2015 19:38:37 -0400
Subject: [PATCH 0072/3617] Add tox and travis-ci support
Add tox integration to run unittests in supported python releases.
Travis-CI is used for test execution.
Additionally, the unittest TestQuotePgIdentifier was updated to support
using assert_raises_regexp on python-2.6.
Sample travis-ci output available at
https://travis-ci.org/ansible/ansible/builds/54189977
---
.coveragerc | 4 ++++
.gitignore | 1 +
.travis.yml | 11 +++++++++++
Makefile | 2 +-
README.md | 4 +++-
test-requirements.txt | 2 ++
tox.ini | 7 +++++++
7 files changed, 29 insertions(+), 2 deletions(-)
create mode 100644 .coveragerc
create mode 100644 .travis.yml
create mode 100644 tox.ini
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 00000000000000..812fc3b139483c
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,4 @@
+[report]
+omit =
+ */python?.?/*
+ */site-packages/nose/*
diff --git a/.gitignore b/.gitignore
index 5fe1d994e3c43e..5d3970a168353d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -42,6 +42,7 @@ deb-build
credentials.yml
# test output
.coverage
+.tox
results.xml
coverage.xml
/test/units/cover-html
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 00000000000000..6e18e06050cd88
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,11 @@
+sudo: false
+language: python
+env:
+ - TOXENV=py26
+ - TOXENV=py27
+install:
+ - pip install tox
+script:
+ - tox
+after_success:
+ - coveralls
diff --git a/Makefile b/Makefile
index f688bd73bf607b..81e24efab367d5 100644
--- a/Makefile
+++ b/Makefile
@@ -93,7 +93,7 @@ NOSETESTS3 ?= nosetests-3.3
all: clean python
tests:
- PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v # Could do: --with-coverage --cover-package=ansible
+ PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v --with-coverage --cover-package=ansible --cover-branches
newtests:
PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches
diff --git a/README.md b/README.md
index 8bfe58a5433377..e052e78dcde29e 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,6 @@
-[](http://badge.fury.io/py/ansible) [](https://pypi.python.org/pypi/ansible)
+[](http://badge.fury.io/py/ansible)
+[](https://pypi.python.org/pypi/ansible)
+[](https://travis-ci.org/ansible/ansible)
Ansible
diff --git a/test-requirements.txt b/test-requirements.txt
index 714b65b7646146..abb61ed1e97c1d 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -5,3 +5,5 @@
nose
mock
passlib
+coverage
+coveralls
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000000000..7c86e7e08f1ff4
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,7 @@
+[tox]
+envlist = py26,py27
+
+[testenv]
+deps = -r{toxinidir}/test-requirements.txt
+whitelist_externals = make
+commands = make tests
From 60acdee0dc95a77f3f322689c75d9e5f965e71a1 Mon Sep 17 00:00:00 2001
From: James Laska
Date: Thu, 12 Mar 2015 21:18:29 -0400
Subject: [PATCH 0073/3617] Enable assert_raises_regexp on py26
---
test/units/TestModuleUtilsDatabase.py | 49 +++++++++++++++++----------
1 file changed, 32 insertions(+), 17 deletions(-)
diff --git a/test/units/TestModuleUtilsDatabase.py b/test/units/TestModuleUtilsDatabase.py
index 5278d6db5aab12..67da0b60e0bd03 100644
--- a/test/units/TestModuleUtilsDatabase.py
+++ b/test/units/TestModuleUtilsDatabase.py
@@ -1,8 +1,26 @@
import collections
import mock
import os
-
-from nose import tools
+import re
+
+from nose.tools import eq_
+try:
+ from nose.tools import assert_raises_regexp
+except ImportError:
+ # Python < 2.7
+ def assert_raises_regexp(expected, regexp, callable, *a, **kw):
+ try:
+ callable(*a, **kw)
+ except expected as e:
+ if isinstance(regexp, basestring):
+ regexp = re.compile(regexp)
+ if not regexp.search(str(e)):
+ raise Exception('"%s" does not match "%s"' %
+ (regexp.pattern, str(e)))
+ else:
+ if hasattr(expected,'__name__'): excName = expected.__name__
+ else: excName = str(expected)
+ raise AssertionError("%s not raised" % excName)
from ansible.module_utils.database import (
pg_quote_identifier,
@@ -70,34 +88,31 @@ class TestQuotePgIdentifier(object):
}
def check_valid_quotes(self, identifier, quoted_identifier):
- tools.eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
+ eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
def test_valid_quotes(self):
for identifier in self.valid:
yield self.check_valid_quotes, identifier, self.valid[identifier]
def check_invalid_quotes(self, identifier, id_type, msg):
- if hasattr(tools, 'assert_raises_regexp'):
- tools.assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
- else:
- tools.assert_raises(SQLParseError, pg_quote_identifier, *(identifier, id_type))
+ assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
def test_invalid_quotes(self):
for test in self.invalid:
yield self.check_invalid_quotes, test[0], test[1], self.invalid[test]
def test_how_many_dots(self):
- tools.eq_(pg_quote_identifier('role', 'role'), '"role"')
- tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
+ eq_(pg_quote_identifier('role', 'role'), '"role"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
- tools.eq_(pg_quote_identifier('db', 'database'), '"db"')
- tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
+ eq_(pg_quote_identifier('db', 'database'), '"db"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
- tools.eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
- tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
+ eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
- tools.eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
- tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
+ eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
- tools.eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
- tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
+ eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
+ assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
From f451974efe1bc462a21652887c873eaf0c7c335c Mon Sep 17 00:00:00 2001
From: James Laska
Date: Fri, 13 Mar 2015 10:56:30 -0400
Subject: [PATCH 0074/3617] Use correct URL for travis status badge
This uses the `devel` branch when displaying the travis-ci status badge.
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index e052e78dcde29e..2a7d8e03af7181 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
[](http://badge.fury.io/py/ansible)
[](https://pypi.python.org/pypi/ansible)
-[](https://travis-ci.org/ansible/ansible)
+[](https://travis-ci.org/ansible/ansible)
Ansible
From 070c7c319ff6c2246c8df402a80370e656e99135 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Fri, 13 Mar 2015 11:57:27 -0500
Subject: [PATCH 0075/3617] Started implementing become in v2
---
v2/ansible/constants.py | 20 ++++-
v2/ansible/executor/connection_info.py | 102 +++++++++++++++---------
v2/ansible/executor/play_iterator.py | 2 +-
v2/ansible/executor/task_executor.py | 1 +
v2/ansible/playbook/base.py | 5 ++
v2/ansible/playbook/become.py | 88 ++++++++++++++++++++
v2/ansible/playbook/block.py | 4 +-
v2/ansible/playbook/play.py | 20 ++---
v2/ansible/playbook/playbook_include.py | 2 +-
v2/ansible/playbook/role/definition.py | 2 +-
v2/ansible/playbook/task.py | 12 +--
v2/ansible/plugins/action/__init__.py | 25 +++---
v2/ansible/plugins/connections/local.py | 6 +-
v2/ansible/plugins/connections/ssh.py | 28 ++++---
v2/ansible/utils/cli.py | 9 +++
v2/samples/test_become.yml | 7 ++
16 files changed, 238 insertions(+), 95 deletions(-)
create mode 100644 v2/ansible/playbook/become.py
create mode 100644 v2/samples/test_become.yml
diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py
index 78eeaf8c20c5d2..f2da07ffb02059 100644
--- a/v2/ansible/constants.py
+++ b/v2/ansible/constants.py
@@ -24,7 +24,6 @@
import sys
from . compat import configparser
-
from string import ascii_letters, digits
# copied from utils, avoid circular reference fun :)
@@ -143,6 +142,19 @@ def shell_expand_path(path):
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
+#TODO: get rid of ternary chain mess
+BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
+BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
+DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True)
+DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root')
+DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True)
+# need to rethink impementing these 2
+DEFAULT_BECOME_EXE = None
+#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo')
+#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H')
+
+
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
@@ -168,12 +180,15 @@ def shell_expand_path(path):
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
+RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
+RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
+
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
-# obsolete -- will be formally removed in 1.6
+# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
@@ -189,6 +204,7 @@ def shell_expand_path(path):
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
+DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py
index 7522ac210c2858..f2eaec630d440b 100644
--- a/v2/ansible/executor/connection_info.py
+++ b/v2/ansible/executor/connection_info.py
@@ -48,16 +48,16 @@ def __init__(self, play=None, options=None):
self.password = ''
self.port = 22
self.private_key_file = None
- self.su = False
- self.su_user = ''
- self.su_pass = ''
- self.sudo = False
- self.sudo_user = ''
- self.sudo_pass = ''
self.verbosity = 0
self.only_tags = set()
self.skip_tags = set()
+ # privilege escalation
+ self.become = False
+ self.become_method = C.DEFAULT_BECOME_METHOD
+ self.become_user = ''
+ self.become_pass = ''
+
self.no_log = False
self.check_mode = False
@@ -84,15 +84,13 @@ def set_play(self, play):
if play.connection:
self.connection = play.connection
- self.remote_user = play.remote_user
- self.password = ''
- self.port = int(play.port) if play.port else 22
- self.su = play.su
- self.su_user = play.su_user
- self.su_pass = play.su_pass
- self.sudo = play.sudo
- self.sudo_user = play.sudo_user
- self.sudo_pass = play.sudo_pass
+ self.remote_user = play.remote_user
+ self.password = ''
+ self.port = int(play.port) if play.port else 22
+ self.become = play.become
+ self.become_method = play.become_method
+ self.become_user = play.become_user
+ self.become_pass = play.become_pass
# non connection related
self.no_log = play.no_log
@@ -158,7 +156,7 @@ def set_task_override(self, task):
new_info = ConnectionInformation()
new_info.copy(self)
- for attr in ('connection', 'remote_user', 'su', 'su_user', 'su_pass', 'sudo', 'sudo_user', 'sudo_pass', 'environment', 'no_log'):
+ for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'):
if hasattr(task, attr):
attr_val = getattr(task, attr)
if attr_val:
@@ -166,31 +164,58 @@ def set_task_override(self, task):
return new_info
- def make_sudo_cmd(self, sudo_exe, executable, cmd):
+ def make_become_cmd(self, cmd, shell, become_settings=None):
+
"""
- Helper function for wrapping commands with sudo.
-
- Rather than detect if sudo wants a password this time, -k makes
- sudo always ask for a password if one is required. Passing a quoted
- compound command to sudo (or sudo -s) directly doesn't work, so we
- shellquote it with pipes.quote() and pass the quoted string to the
- user's shell. We loop reading output until we see the randomly-
- generated sudo prompt set with the -p option.
+ helper function to create privilege escalation commands
"""
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- success_key = 'SUDO-SUCCESS-%s' % randbits
-
- sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
- sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt,
- self.sudo_user, executable or '$SHELL',
- pipes.quote('echo %s; %s' % (success_key, cmd))
- )
-
- # FIXME: old code, can probably be removed as it's been commented out for a while
- #return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
- return (sudocmd, prompt, success_key)
+ # FIXME: become settings should probably be stored in the connection info itself
+ if become_settings is None:
+ become_settings = {}
+
+ randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
+ success_key = 'BECOME-SUCCESS-%s' % randbits
+ prompt = None
+ becomecmd = None
+
+ shell = shell or '$SHELL'
+
+ if self.become_method == 'sudo':
+ # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
+ # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
+ # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
+ # string to the user's shell. We loop reading output until we see the randomly-generated
+ # sudo prompt set with the -p option.
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
+ flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, shell, 'echo %s; %s' % (success_key, cmd))
+
+ elif self.become_method == 'su':
+ exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
+ flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS)
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
+
+ elif self.become_method == 'pbrun':
+ exe = become_settings.get('pbrun_exe', 'pbrun')
+ flags = become_settings.get('pbrun_flags', '')
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd))
+
+ elif self.become_method == 'pfexec':
+ exe = become_settings.get('pfexec_exe', 'pbrun')
+ flags = become_settings.get('pfexec_flags', '')
+ # No user as it uses it's own exec_attr to figure it out
+ becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd))
+ elif self.become:
+ raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
+
+ return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
+
+ def check_become_success(self, output, become_settings):
+ #TODO: implement
+ pass
def _get_fields(self):
return [i for i in self.__dict__.keys() if i[:1] != '_']
@@ -204,4 +229,3 @@ def post_validate(self, variables, loader):
for field in self._get_fields():
value = templar.template(getattr(self, field))
setattr(self, field, value)
-
diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py
index 0461fc87f2c594..4a149243d9118d 100644
--- a/v2/ansible/executor/play_iterator.py
+++ b/v2/ansible/executor/play_iterator.py
@@ -197,7 +197,7 @@ def mark_host_failed(self, host):
self._host_states[host.name] = s
def get_failed_hosts(self):
- return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.failed_state != self.FAILED_NONE)
+ return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
def get_original_task(self, host, task):
'''
diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py
index 012fb991949b54..bad47279a5e8f3 100644
--- a/v2/ansible/executor/task_executor.py
+++ b/v2/ansible/executor/task_executor.py
@@ -33,6 +33,7 @@
import json
import time
+import pipes
class TaskExecutor:
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index 691de0c9f0f85b..949e6a09fdc652 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -72,6 +72,11 @@ def _get_base_attributes(self):
def munge(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
+ for base_class in self.__class__.__bases__:
+ method = getattr(self, ("_munge_%s" % base_class.__name__).lower(), None)
+ if method:
+ ds = method(ds)
+
return ds
def load_data(self, ds, variable_manager=None, loader=None):
diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py
new file mode 100644
index 00000000000000..6ac1d2bad986ca
--- /dev/null
+++ b/v2/ansible/playbook/become.py
@@ -0,0 +1,88 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.playbook.attribute import Attribute, FieldAttribute
+#from ansible.utils.display import deprecated
+
+class Become:
+
+ # Privlege escalation
+ _become = FieldAttribute(isa='bool', default=False)
+ _become_method = FieldAttribute(isa='string')
+ _become_user = FieldAttribute(isa='string')
+ _become_pass = FieldAttribute(isa='string')
+
+ def __init__(self):
+ return super(Become, self).__init__()
+
+ def _detect_privilege_escalation_conflict(self, ds):
+
+ # Fail out if user specifies conflicting privelege escalations
+ has_become = 'become' in ds or 'become_user'in ds
+ has_sudo = 'sudo' in ds or 'sudo_user' in ds
+ has_su = 'su' in ds or 'su_user' in ds
+
+ if has_become:
+ msg = 'The become params ("become", "become_user") and'
+ if has_sudo:
+ raise errors.AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
+ elif has_su:
+ raise errors.AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
+ elif has_sudo and has_su:
+ raise errors.AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
+
+ def _munge_become(self, ds):
+
+ self._detect_privilege_escalation_conflict(ds)
+
+ # Setting user implies setting become/sudo/su to true
+ if 'become_user' in ds and not ds.get('become', False):
+ ds['become'] = True
+
+ # Privilege escalation, backwards compatibility for sudo/su
+ if 'sudo' in ds or 'sudo_user' in ds:
+ ds['become_method'] = 'sudo'
+ if 'sudo' in ds:
+ ds['become'] = ds['sudo']
+ del ds['sudo']
+ else:
+ ds['become'] = True
+ if 'sudo_user' in ds:
+ ds['become_user'] = ds['sudo_user']
+ del ds['sudo_user']
+
+ #deprecated("Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)")
+
+ elif 'su' in ds or 'su_user' in ds:
+ ds['become_method'] = 'su'
+ if 'su' in ds:
+ ds['become'] = ds['su']
+ del ds['su']
+ else:
+ ds['become'] = True
+ if 'su_user' in ds:
+ ds['become_user'] = ds['su_user']
+ del ds['su_user']
+
+ #deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
+
+ return ds
diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py
index 533b552f22e0a9..49f65a15349452 100644
--- a/v2/ansible/playbook/block.py
+++ b/v2/ansible/playbook/block.py
@@ -21,6 +21,7 @@
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
+#from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.role import Role
@@ -80,7 +81,8 @@ def munge(self, ds):
return dict(block=ds)
else:
return dict(block=[ds])
- return ds
+
+ return super(Block, self).munge(ds)
def _load_block(self, attr, ds):
return load_list_of_tasks(
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index 29c9c04cc8e098..e9847fccd90640 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -23,6 +23,7 @@
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
+from ansible.playbook.become import Become
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
@@ -33,7 +34,7 @@
__all__ = ['Play']
-class Play(Base, Taggable):
+class Play(Base, Taggable, Become):
"""
A play is a language feature that represents a list of roles and/or
@@ -47,21 +48,19 @@ class Play(Base, Taggable):
# =================================================================================
# Connection-Related Attributes
+
+ # TODO: generalize connection
_accelerate = FieldAttribute(isa='bool', default=False)
_accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
- _accelerate_port = FieldAttribute(isa='int', default=5099)
+ _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
+
+ # Connection
_connection = FieldAttribute(isa='string', default='smart')
_gather_facts = FieldAttribute(isa='string', default='smart')
_hosts = FieldAttribute(isa='list', default=[], required=True)
_name = FieldAttribute(isa='string', default='')
_port = FieldAttribute(isa='int', default=22)
_remote_user = FieldAttribute(isa='string', default='root')
- _su = FieldAttribute(isa='bool', default=False)
- _su_user = FieldAttribute(isa='string', default='root')
- _su_pass = FieldAttribute(isa='string')
- _sudo = FieldAttribute(isa='bool', default=False)
- _sudo_user = FieldAttribute(isa='string', default='root')
- _sudo_pass = FieldAttribute(isa='string')
# Variable Attributes
_vars = FieldAttribute(isa='dict', default=dict())
@@ -101,6 +100,7 @@ def get_name(self):
@staticmethod
def load(data, variable_manager=None, loader=None):
p = Play()
+ print("in play load, become is: %s" % getattr(p, 'become'))
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def munge(self, ds):
@@ -122,7 +122,7 @@ def munge(self, ds):
ds['remote_user'] = ds['user']
del ds['user']
- return ds
+ return super(Play, self).munge(ds)
def _load_vars(self, attr, ds):
'''
@@ -187,7 +187,7 @@ def _load_roles(self, attr, ds):
roles.append(Role.load(ri))
return roles
- # FIXME: post_validation needs to ensure that su/sudo are not both set
+ # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
def _compile_roles(self):
'''
diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py
index 159c3d25da8e58..e1d7f6be34f24b 100644
--- a/v2/ansible/playbook/playbook_include.py
+++ b/v2/ansible/playbook/playbook_include.py
@@ -98,7 +98,7 @@ def munge(self, ds):
raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds)
new_ds[k] = v
- return new_ds
+ return super(PlaybookInclude, self).munge(new_ds)
def _munge_include(self, ds, new_ds, k, v):
'''
diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py
index c9ec4259c17582..d52c6795fb92d4 100644
--- a/v2/ansible/playbook/role/definition.py
+++ b/v2/ansible/playbook/role/definition.py
@@ -88,7 +88,7 @@ def munge(self, ds):
self._ds = ds
# and return the cleaned-up data structure
- return new_ds
+ return super(RoleDefinition, self).munge(new_ds)
def _load_role_name(self, ds):
'''
diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py
index e6fcc13d2591d5..79ec2df3401ad5 100644
--- a/v2/ansible/playbook/task.py
+++ b/v2/ansible/playbook/task.py
@@ -29,6 +29,7 @@
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
+from ansible.playbook.become import Become
from ansible.playbook.block import Block
from ansible.playbook.conditional import Conditional
from ansible.playbook.role import Role
@@ -36,7 +37,7 @@
__all__ = ['Task']
-class Task(Base, Conditional, Taggable):
+class Task(Base, Conditional, Taggable, Become):
"""
A task is a language feature that represents a call to a module, with given arguments and other parameters.
@@ -86,12 +87,6 @@ class Task(Base, Conditional, Taggable):
_remote_user = FieldAttribute(isa='string')
_retries = FieldAttribute(isa='int', default=1)
_run_once = FieldAttribute(isa='bool')
- _su = FieldAttribute(isa='bool')
- _su_pass = FieldAttribute(isa='string')
- _su_user = FieldAttribute(isa='string')
- _sudo = FieldAttribute(isa='bool')
- _sudo_user = FieldAttribute(isa='string')
- _sudo_pass = FieldAttribute(isa='string')
_transport = FieldAttribute(isa='string')
_until = FieldAttribute(isa='list') # ?
_vars = FieldAttribute(isa='dict', default=dict())
@@ -172,6 +167,7 @@ def munge(self, ds):
args_parser = ModuleArgsParser(task_ds=ds)
(action, args, delegate_to) = args_parser.parse()
+
new_ds['action'] = action
new_ds['args'] = args
new_ds['delegate_to'] = delegate_to
@@ -186,7 +182,7 @@ def munge(self, ds):
else:
new_ds[k] = v
- return new_ds
+ return super(Task, self).munge(new_ds)
def post_validate(self, all_vars=dict(), fail_on_undefined=True):
'''
diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py
index 1dc9d59aa0841f..46f25ec503c3e3 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/v2/ansible/plugins/action/__init__.py
@@ -130,7 +130,7 @@ def _late_needs_tmp_path(self, tmp, module_style):
if tmp and "tmp" in tmp:
# tmp has already been created
return False
- if not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.su:
+ if not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become:
# tmp is necessary to store module source code
return True
if not self._connection._has_pipelining:
@@ -152,12 +152,11 @@ def _make_tmp_path(self):
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
- if (self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root'):
+ if self._connection_info.become and self._connection_info.become_user != 'root':
use_system_tmp = True
tmp_mode = None
- if self._connection_info.remote_user != 'root' or \
- ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root')):
+ if self._connection_info.remote_user != 'root' or self._connection_info.become and self._connection_info.become_user != 'root':
tmp_mode = 'a+rx'
cmd = self._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
@@ -291,10 +290,8 @@ def _remote_expand_user(self, path, tmp):
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
- if self._connection_info.sudo and self._connection_info.sudo_user:
- expand_path = '~%s' % self._connection_info.sudo_user
- elif self._connection_info.su and self._connection_info.su_user:
- expand_path = '~%s' % self._connection_info.su_user
+ if self._connection_info.become and self._connection_info.become_user:
+ expand_path = '~%s' % self._connection_info.become_user
cmd = self._shell.expand_user(expand_path)
debug("calling _low_level_execute_command to expand the remote user path")
@@ -373,7 +370,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_
environment_string = self._compute_environment_string()
- if tmp and "tmp" in tmp and ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root')):
+ if tmp and "tmp" in tmp and self._connection_info.become and self._connection_info.become_user != 'root':
# deal with possible umask issues once sudo'ed to other user
self._remote_chmod(tmp, 'a+r', remote_module_path)
@@ -391,7 +388,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_
rm_tmp = None
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if not self._connection_info.sudo or self._connection_info.su or self._connection_info.sudo_user == 'root' or self._connection_info.su_user == 'root':
+ if not self._connection_info.become or self._connection_info.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
@@ -409,7 +406,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_
debug("_low_level_execute_command returned ok")
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if (self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root'):
+ if self._connection_info.become and self._connection_info.become_user != 'root':
# not sudoing to root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = self._shell.remove(tmp, recurse=True)
@@ -457,11 +454,7 @@ def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, i
success_key = None
if sudoable:
- if self._connection_info.su and self._connection_info.su_user:
- cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd)
- elif self._connection_info.sudo and self._connection_info.sudo_user:
- # FIXME: hard-coded sudo_exe here
- cmd, prompt, success_key = self._connection_info.make_sudo_cmd('/usr/bin/sudo', executable, cmd)
+ cmd, prompt, success_key = self._connection_info.make_become_cmd(executable, cmd)
debug("executing the command %s through the connection" % cmd)
rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data)
diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py
index 963c8c2d4ece57..d75ee70159eab3 100644
--- a/v2/ansible/plugins/connections/local.py
+++ b/v2/ansible/plugins/connections/local.py
@@ -44,8 +44,8 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
debug("in local.exec_command()")
# su requires to be run from a terminal, and therefore isn't supported here (yet?)
- if self._connection_info.su:
- raise AnsibleError("Internal Error: this module does not support running commands via su")
+ #if self._connection_info.su:
+ # raise AnsibleError("Internal Error: this module does not support running commands via su")
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
@@ -57,7 +57,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
# else:
# local_cmd = cmd
#else:
- # local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
+ # local_cmd, prompt, success_key = utils.make_become_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
if executable:
local_cmd = executable.split() + ['-c', cmd]
else:
diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py
index 6c0ab9917c1aff..e5b397f5659fc1 100644
--- a/v2/ansible/plugins/connections/ssh.py
+++ b/v2/ansible/plugins/connections/ssh.py
@@ -281,19 +281,19 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
# ssh_cmd += ['-6']
ssh_cmd += [self._connection_info.remote_addr]
- if not (self._connection_info.sudo or self._connection_info.su):
- prompt = None
- if executable:
- ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
- else:
- ssh_cmd.append(cmd)
- elif self._connection_info.su and self._connection_info.su_user:
- su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd)
- ssh_cmd.append(su_cmd)
- else:
- # FIXME: hard-coded sudo_exe here
- sudo_cmd, prompt, success_key = self._connection_info.make_sudo_cmd('/usr/bin/sudo', executable, cmd)
- ssh_cmd.append(sudo_cmd)
+ #if not (self._connection_info.sudo or self._connection_info.su):
+ # prompt = None
+ # if executable:
+ # ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
+ # else:
+ # ssh_cmd.append(cmd)
+ #elif self._connection_info.su and self._connection_info.su_user:
+ # su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd)
+ # ssh_cmd.append(su_cmd)
+ #else:
+ # # FIXME: hard-coded sudo_exe here
+ # sudo_cmd, prompt, success_key = self._connection_info.make_become_cmd('/usr/bin/sudo', executable, cmd)
+ # ssh_cmd.append(sudo_cmd)
self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._connection_info.remote_addr)
@@ -369,6 +369,8 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
# no_prompt_err += sudo_errput
#(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
+ # FIXME: the prompt won't be here anymore
+ prompt=""
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt)
#if C.HOST_KEY_CHECKING and not_in_host_file:
diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py
index 43aa21470d3c44..f846d6f73ca336 100644
--- a/v2/ansible/utils/cli.py
+++ b/v2/ansible/utils/cli.py
@@ -59,6 +59,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
help='ask for sudo password')
parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
help='ask for su password')
+ parser.add_option('--ask-become-pass', default=False, dest='ask_become_pass', action='store_true',
+ help='ask for privlege escalation password')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
@@ -84,6 +86,10 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
help='log output to this directory')
if runas_opts:
+ parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true",
+ dest='become', help="run operations with become (nopasswd implied)")
+ parser.add_option('-B', '--become-user', help='run operations with as this '
+ 'user (default=%s)' % C.DEFAULT_BECOME_USER)
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true",
dest='sudo', help="run operations with sudo (nopasswd)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
@@ -100,6 +106,9 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
parser.add_option('-c', '--connection', dest='connection',
default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
+ parser.add_option('--become-method', dest='become_method',
+ default=C.DEFAULT_BECOME_METHOD,
+ help="privlege escalation method to use (default=%s)" % C.DEFAULT_BECOME_METHOD)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml
new file mode 100644
index 00000000000000..7e229af5de2639
--- /dev/null
+++ b/v2/samples/test_become.yml
@@ -0,0 +1,7 @@
+- hosts: all
+ gather_facts: no
+ tasks:
+ - command: whoami
+ become: yes
+ become_user: jamesc
+ become_method: su
From 8e346186b2df34b976e7e268cc7446da3f6fac5b Mon Sep 17 00:00:00 2001
From: Michael Crilly
Date: Fri, 13 Mar 2015 18:07:18 +0000
Subject: [PATCH 0076/3617] Correct version number.
1.8.4 is the latest stable now, I believe.
---
docsite/rst/index.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst
index c8d263d01aecf3..1afa47db87d82c 100644
--- a/docsite/rst/index.rst
+++ b/docsite/rst/index.rst
@@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu
Ansible manages machines in an agentless manner. There is never a question of how to
upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems.
-This documentation covers the current released version of Ansible (1.8.2) and also some development version features (1.9). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release.
+This documentation covers the current released version of Ansible (1.8.4) and also some development version features (1.9). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release.
.. _an_introduction:
From 70f56c135cbb14e4b7206594695e1623dcc1d6c8 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 13 Mar 2015 11:44:58 -0700
Subject: [PATCH 0077/3617] Port #10357 to v2
---
v2/ansible/module_utils/facts.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py
index 6d602af7366eca..d18615857cc665 100644
--- a/v2/ansible/module_utils/facts.py
+++ b/v2/ansible/module_utils/facts.py
@@ -2288,7 +2288,7 @@ def get_virtual_facts(self):
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
- if re.search('/docker/', line):
+ if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
From 22304afd1db399cfb94ae485486384ca3c9c0e33 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Fri, 13 Mar 2015 15:31:20 -0500
Subject: [PATCH 0078/3617] More fixing of become stuff in v2
---
v2/ansible/executor/connection_info.py | 72 +++++++++++++------------
v2/ansible/playbook/play.py | 1 -
v2/ansible/plugins/action/__init__.py | 2 +-
v2/ansible/plugins/connections/local.py | 19 ++-----
v2/ansible/plugins/connections/ssh.py | 15 +-----
v2/samples/test_become.yml | 3 +-
6 files changed, 44 insertions(+), 68 deletions(-)
diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py
index f2eaec630d440b..b918dc6b2144cf 100644
--- a/v2/ansible/executor/connection_info.py
+++ b/v2/ansible/executor/connection_info.py
@@ -164,7 +164,7 @@ def set_task_override(self, task):
return new_info
- def make_become_cmd(self, cmd, shell, become_settings=None):
+ def make_become_cmd(self, cmd, executable, become_settings=None):
"""
helper function to create privilege escalation commands
@@ -179,39 +179,43 @@ def make_become_cmd(self, cmd, shell, become_settings=None):
prompt = None
becomecmd = None
- shell = shell or '$SHELL'
-
- if self.become_method == 'sudo':
- # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
- # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
- # string to the user's shell. We loop reading output until we see the randomly-generated
- # sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
- flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, shell, 'echo %s; %s' % (success_key, cmd))
-
- elif self.become_method == 'su':
- exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
- flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS)
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif self.become_method == 'pbrun':
- exe = become_settings.get('pbrun_exe', 'pbrun')
- flags = become_settings.get('pbrun_flags', '')
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd))
-
- elif self.become_method == 'pfexec':
- exe = become_settings.get('pfexec_exe', 'pbrun')
- flags = become_settings.get('pfexec_flags', '')
- # No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd))
- elif self.become:
- raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
-
- return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
+ executable = executable or '$SHELL'
+
+ if self.become:
+ if self.become_method == 'sudo':
+ # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
+ # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
+ # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
+ # string to the user's shell. We loop reading output until we see the randomly-generated
+ # sudo prompt set with the -p option.
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
+ flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, 'echo %s; %s' % (success_key, cmd))
+
+ elif self.become_method == 'su':
+ exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
+ flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS)
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd)))
+
+ elif self.become_method == 'pbrun':
+ exe = become_settings.get('pbrun_exe', 'pbrun')
+ flags = become_settings.get('pbrun_flags', '')
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd))
+
+ elif self.become_method == 'pfexec':
+ exe = become_settings.get('pfexec_exe', 'pbrun')
+ flags = become_settings.get('pfexec_flags', '')
+ # No user as it uses it's own exec_attr to figure it out
+ becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd))
+
+ else:
+ raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
+
+ return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key)
+
+ return (cmd, "", "")
def check_become_success(self, output, become_settings):
#TODO: implement
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index e9847fccd90640..cbe4e038617a82 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -100,7 +100,6 @@ def get_name(self):
@staticmethod
def load(data, variable_manager=None, loader=None):
p = Play()
- print("in play load, become is: %s" % getattr(p, 'become'))
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def munge(self, ds):
diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py
index 46f25ec503c3e3..d430bd748beb1f 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/v2/ansible/plugins/action/__init__.py
@@ -454,7 +454,7 @@ def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, i
success_key = None
if sudoable:
- cmd, prompt, success_key = self._connection_info.make_become_cmd(executable, cmd)
+ cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd, executable)
debug("executing the command %s through the connection" % cmd)
rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data)
diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py
index d75ee70159eab3..c847ee79d5d0ef 100644
--- a/v2/ansible/plugins/connections/local.py
+++ b/v2/ansible/plugins/connections/local.py
@@ -50,27 +50,14 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
- # FIXME: su/sudo stuff needs to be generalized
- #if not self.runner.sudo or not sudoable:
- # if executable:
- # local_cmd = executable.split() + ['-c', cmd]
- # else:
- # local_cmd = cmd
- #else:
- # local_cmd, prompt, success_key = utils.make_become_cmd(self.runner.sudo_exe, sudo_user, executable, cmd)
- if executable:
- local_cmd = executable.split() + ['-c', cmd]
- else:
- local_cmd = cmd
-
executable = executable.split()[0] if executable else None
- self._display.vvv("%s EXEC %s" % (self._connection_info.remote_addr, local_cmd))
+ self._display.vvv("%s EXEC %s" % (self._connection_info.remote_addr, cmd))
# FIXME: cwd= needs to be set to the basedir of the playbook
debug("opening command with Popen()")
p = subprocess.Popen(
- local_cmd,
- shell=isinstance(local_cmd, basestring),
+ cmd,
+ shell=isinstance(cmd, basestring),
executable=executable, #cwd=...
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py
index e5b397f5659fc1..e233a704f987a4 100644
--- a/v2/ansible/plugins/connections/ssh.py
+++ b/v2/ansible/plugins/connections/ssh.py
@@ -281,20 +281,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
# ssh_cmd += ['-6']
ssh_cmd += [self._connection_info.remote_addr]
- #if not (self._connection_info.sudo or self._connection_info.su):
- # prompt = None
- # if executable:
- # ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
- # else:
- # ssh_cmd.append(cmd)
- #elif self._connection_info.su and self._connection_info.su_user:
- # su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd)
- # ssh_cmd.append(su_cmd)
- #else:
- # # FIXME: hard-coded sudo_exe here
- # sudo_cmd, prompt, success_key = self._connection_info.make_become_cmd('/usr/bin/sudo', executable, cmd)
- # ssh_cmd.append(sudo_cmd)
-
+ ssh_cmd.append(cmd)
self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._connection_info.remote_addr)
not_in_host_file = self.not_in_host_file(self._connection_info.remote_addr)
diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml
index 7e229af5de2639..8e753beade313b 100644
--- a/v2/samples/test_become.yml
+++ b/v2/samples/test_become.yml
@@ -2,6 +2,5 @@
gather_facts: no
tasks:
- command: whoami
- become: yes
- become_user: jamesc
+ become_user: testing
become_method: su
From 7813ffd719e73670e715e09f0e4256facf453002 Mon Sep 17 00:00:00 2001
From: Chris Blumentritt
Date: Fri, 13 Mar 2015 15:35:31 -0500
Subject: [PATCH 0079/3617] Adding uptime_seconds fact for linux and darwin
platforms
Adds ansible_uptime_seconds facts for linux and darwin platforms. BSD
platforms may also work.
---
lib/ansible/module_utils/facts.py | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 424c388fb6ee2e..c1951925e4ba71 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -652,6 +652,7 @@ def populate(self):
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
+ self.get_uptime_facts()
try:
self.get_mount_facts()
except TimeoutError:
@@ -990,6 +991,9 @@ def get_device_facts(self):
self.facts['devices'][diskname] = d
+ def get_uptime_facts(self):
+ uptime_seconds_string = get_file_content('/proc/uptime').split(' ')[0]
+ self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
class SunOSHardware(Hardware):
"""
@@ -1588,6 +1592,7 @@ def populate(self):
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
+ self.get_uptime_facts()
return self.facts
def get_sysctl(self):
@@ -1635,6 +1640,12 @@ def get_memory_facts(self):
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
+ def get_uptime_facts(self):
+ kern_boottime = self.sysctl['kern.boottime']
+ boottime = datetime.datetime.strptime(kern_boottime, "%a %b %d %H:%M:%S %Y")
+ delta = datetime.datetime.now() - boottime
+ self.facts['uptime_seconds'] = int(delta.total_seconds())
+
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
From 731b268cd6c76572cd5d66c27be254ccdb2952c6 Mon Sep 17 00:00:00 2001
From: Patrik Lundin
Date: Sun, 15 Mar 2015 07:59:54 +0100
Subject: [PATCH 0080/3617] env-setup: Don't use ${.sh.file} if shell is pdksh
The default ksh in OpenBSD throws the following error:
===
$ . hacking/env-setup
ksh: hacking/env-setup[23]: ${.sh.file}": bad substitution
[...]
===
The same error can be seen on Linux if pdksh is used.
---
hacking/env-setup | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hacking/env-setup b/hacking/env-setup
index 9b9a529d13a4e8..16baa9b1b75d25 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -16,7 +16,7 @@ if [ -n "$BASH_SOURCE" ] ; then
HACKING_DIR=$(dirname "$BASH_SOURCE")
elif [ $(basename -- "$0") = "env-setup" ]; then
HACKING_DIR=$(dirname "$0")
-elif [ -n "$KSH_VERSION" ]; then
+elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then
HACKING_DIR=$(dirname "${.sh.file}")
else
HACKING_DIR="$PWD/hacking"
From fbff0449ce08fe2a57724e66938886e203173611 Mon Sep 17 00:00:00 2001
From: Steve Gargan
Date: Sun, 15 Mar 2015 12:20:34 +0000
Subject: [PATCH 0081/3617] fix for issue #10422. outputs informative error
message when AWS credentials are not available
---
plugins/inventory/ec2.py | 57 ++++++++++++++++++++++++----------------
1 file changed, 35 insertions(+), 22 deletions(-)
diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py
index 0f7c19857520d1..617463355f064f 100755
--- a/plugins/inventory/ec2.py
+++ b/plugins/inventory/ec2.py
@@ -334,23 +334,24 @@ def do_api_calls_update_cache(self):
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
+ def connect(self, region):
+ ''' create connection to api server'''
+ if self.eucalyptus:
+ conn = boto.connect_euca(host=self.eucalyptus_host)
+ conn.APIVersion = '2010-08-31'
+ else:
+ conn = ec2.connect_to_region(region)
+ # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+ if conn is None:
+ raise Exception("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
- if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host)
- conn.APIVersion = '2010-08-31'
- else:
- conn = ec2.connect_to_region(region)
-
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- sys.exit(1)
-
+ conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.iteritems():
@@ -363,6 +364,9 @@ def get_instances_by_region(self, region):
self.add_instance(instance, region)
except boto.exception.BotoServerError, e:
+ if e.error_code == 'AuthFailure':
+ self.display_auth_error()
+
if not self.eucalyptus:
print "Looks like AWS is down again:"
print e
@@ -379,23 +383,33 @@ def get_rds_instances_by_region(self, region):
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
+ if e.error_code == 'AuthFailure':
+ self.display_auth_error()
+
if not e.reason == "Forbidden":
print "Looks like AWS RDS is down: "
print e
sys.exit(1)
- def get_instance(self, region, instance_id):
- ''' Gets details about a specific instance '''
- if self.eucalyptus:
- conn = boto.connect_euca(self.eucalyptus_host)
- conn.APIVersion = '2010-08-31'
+ def display_auth_error(self):
+ ''' Raise an error with an informative message if there is an issue authenticating'''
+ errors = ["Authentication error retrieving ec2 inventory."]
+ if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
+ errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
- conn = ec2.connect_to_region(region)
+ errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- sys.exit(1)
+ boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
+ boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
+ if len(boto_config_found) > 0:
+ errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
+ else:
+ errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
+
+ raise Exception('\n'.join(errors))
+
+ def get_instance(self, region, instance_id):
+ conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
@@ -785,4 +799,3 @@ def json_format_dict(self, data, pretty=False):
# Run the script
Ec2Inventory()
-
From caf2a96ef9808436f00522b7792a3541301d90eb Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Sun, 15 Mar 2015 13:22:07 -0700
Subject: [PATCH 0082/3617] Merge pdksh fix to v2
---
hacking/env-setup | 1 +
v2/hacking/env-setup | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/hacking/env-setup b/hacking/env-setup
index 16baa9b1b75d25..f52c91a8b9cf8c 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -16,6 +16,7 @@ if [ -n "$BASH_SOURCE" ] ; then
HACKING_DIR=$(dirname "$BASH_SOURCE")
elif [ $(basename -- "$0") = "env-setup" ]; then
HACKING_DIR=$(dirname "$0")
+# Works with ksh93 but not pdksh
elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then
HACKING_DIR=$(dirname "${.sh.file}")
else
diff --git a/v2/hacking/env-setup b/v2/hacking/env-setup
index fed8e892fdad52..c03fa0874e1ef7 100644
--- a/v2/hacking/env-setup
+++ b/v2/hacking/env-setup
@@ -16,7 +16,8 @@ if [ -n "$BASH_SOURCE" ] ; then
HACKING_DIR=$(dirname "$BASH_SOURCE")
elif [ $(basename -- "$0") = "env-setup" ]; then
HACKING_DIR=$(dirname "$0")
-elif [ -n "$KSH_VERSION" ]; then
+# Works with ksh93 but not pdksh
+elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then
HACKING_DIR=$(dirname "${.sh.file}")
else
HACKING_DIR="$PWD/hacking"
From 5eae4353573b35710980cf082c15251e765884ce Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 16 Mar 2015 13:41:36 -0400
Subject: [PATCH 0083/3617] removed Darwin get_uptime_facts as it seems to
crash on OS X, will waiy for a patch tested by someone that has access to the
platform
---
lib/ansible/module_utils/facts.py | 6 ------
1 file changed, 6 deletions(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index c1951925e4ba71..93fe68786d80cf 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -1592,7 +1592,6 @@ def populate(self):
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
- self.get_uptime_facts()
return self.facts
def get_sysctl(self):
@@ -1640,11 +1639,6 @@ def get_memory_facts(self):
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
- def get_uptime_facts(self):
- kern_boottime = self.sysctl['kern.boottime']
- boottime = datetime.datetime.strptime(kern_boottime, "%a %b %d %H:%M:%S %Y")
- delta = datetime.datetime.now() - boottime
- self.facts['uptime_seconds'] = int(delta.total_seconds())
class Network(Facts):
"""
From b783ea94bb83ab62d3351e68320b6b05a95ccb34 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 16 Mar 2015 14:00:07 -0400
Subject: [PATCH 0084/3617] fixed raw return check for privilege escalation
---
lib/ansible/runner/action_plugins/raw.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/runner/action_plugins/raw.py b/lib/ansible/runner/action_plugins/raw.py
index b1ba2c99d94749..e52296b2e78391 100644
--- a/lib/ansible/runner/action_plugins/raw.py
+++ b/lib/ansible/runner/action_plugins/raw.py
@@ -48,7 +48,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
# for some modules (script, raw), the sudo success key
# may leak into the stdout due to the way the sudo/su
# command is constructed, so we filter that out here
- if result.get('stdout','').strip().startswith('SUDO-SUCCESS-'):
- result['stdout'] = re.sub(r'^((\r)?\n)?SUDO-SUCCESS.*(\r)?\n', '', result['stdout'])
+ if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
+ result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
return ReturnData(conn=conn, result=result)
From 1bf0e606466b158b539a4229906d3d4c9dcdfc5a Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 16 Mar 2015 11:34:55 -0700
Subject: [PATCH 0085/3617] Update core module pointer
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 31cc5f543f4166..ceda82603a5c1d 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940
+Subproject commit ceda82603a5c1d2c911a952440d0545fa011edf9
From 37ab61c542dd5758ef2668bbdfa163cadbcc6f24 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 16 Mar 2015 11:46:44 -0700
Subject: [PATCH 0086/3617] Update core pointer to make use of
DOCKER_TLS_VERIFY env var:
https://github.com/ansible/ansible-modules-core/issues/946
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index ceda82603a5c1d..34c4e0d4959eea 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit ceda82603a5c1d2c911a952440d0545fa011edf9
+Subproject commit 34c4e0d4959eeaf5dc4d2b69d2bd435267e8ff91
From ada2567dfb912428b4b23f2de9e91ac6b2cbb4b3 Mon Sep 17 00:00:00 2001
From: Steve Gargan
Date: Mon, 16 Mar 2015 20:00:18 +0000
Subject: [PATCH 0087/3617] log errors and explicitly exit rather than raising
exceptions
---
plugins/inventory/ec2.py | 43 ++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 21 deletions(-)
diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py
index 617463355f064f..5f7bd061d7210d 100755
--- a/plugins/inventory/ec2.py
+++ b/plugins/inventory/ec2.py
@@ -343,7 +343,7 @@ def connect(self, region):
conn = ec2.connect_to_region(region)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
- raise Exception("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
@@ -365,12 +365,11 @@ def get_instances_by_region(self, region):
except boto.exception.BotoServerError, e:
if e.error_code == 'AuthFailure':
- self.display_auth_error()
-
- if not self.eucalyptus:
- print "Looks like AWS is down again:"
- print e
- sys.exit(1)
+ error = self.get_auth_error_message()
+ else:
+ backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
+ error = "Error connecting to %s backend.\n%s" % (backend, e.message)
+ self.fail_with_error(error)
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
@@ -384,15 +383,13 @@ def get_rds_instances_by_region(self, region):
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
if e.error_code == 'AuthFailure':
- self.display_auth_error()
-
+ error = self.get_auth_error_message()
if not e.reason == "Forbidden":
- print "Looks like AWS RDS is down: "
- print e
- sys.exit(1)
+ error = "Looks like AWS RDS is down:\n%s" % e.message
+ self.fail_with_error(error)
- def display_auth_error(self):
- ''' Raise an error with an informative message if there is an issue authenticating'''
+ def get_auth_error_message(self):
+ ''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
@@ -406,7 +403,12 @@ def display_auth_error(self):
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
- raise Exception('\n'.join(errors))
+ return '\n'.join(errors)
+
+ def fail_with_error(self, err_msg):
+ '''log an error to std err for ansible-playbook to consume and exit'''
+ sys.stderr.write(err_msg)
+ sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
@@ -506,9 +508,8 @@ def add_instance(self, instance, region):
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ 'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
@@ -601,9 +602,9 @@ def add_rds_instance(self, instance, region):
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ 'Please upgrade boto >= 2.3.0.']))
+
# Inventory: Group by engine
if self.group_by_rds_engine:
From a47c1326953c443f1dea723eee18d4ca83518237 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 16 Mar 2015 19:08:34 -0400
Subject: [PATCH 0088/3617] slight changes to allow for checksum and other
commands to work correctly with quoting
---
lib/ansible/runner/connection_plugins/ssh.py | 18 ++++++++----------
lib/ansible/utils/__init__.py | 8 ++++----
2 files changed, 12 insertions(+), 14 deletions(-)
diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py
index 25a330dcef51a4..a7a57a01cf25f1 100644
--- a/lib/ansible/runner/connection_plugins/ssh.py
+++ b/lib/ansible/runner/connection_plugins/ssh.py
@@ -306,7 +306,7 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab
no_prompt_out = ''
no_prompt_err = ''
- if self.runner.become and sudoable and self.runner.become_pass:
+ if sudoable and self.runner.become and self.runner.become_pass:
# several cases are handled for escalated privileges with password
# * NOPASSWD (tty & no-tty): detect success_key on stdout
# * without NOPASSWD:
@@ -319,11 +319,10 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab
become_output = ''
become_errput = ''
- while success_key not in become_output:
-
- if prompt and become_output.endswith(prompt):
- break
- if utils.su_prompts.check_su_prompt(become_output):
+ while True:
+ if success_key in become_output or \
+ (prompt and become_output.endswith(prompt)) or \
+ utils.su_prompts.check_su_prompt(become_output):
break
rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
@@ -351,12 +350,11 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab
stdout = p.communicate()
raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method)
- if success_key not in become_output:
- if sudoable:
- stdin.write(self.runner.become_pass + '\n')
- else:
+ if success_key in become_output:
no_prompt_out += become_output
no_prompt_err += become_errput
+ elif sudoable:
+ stdin.write(self.runner.become_pass + '\n')
(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt)
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
index 3745f0d43089f8..f164b25bd47cba 100644
--- a/lib/ansible/utils/__init__.py
+++ b/lib/ansible/utils/__init__.py
@@ -1241,8 +1241,8 @@ def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
# sudo prompt set with the -p option.
prompt = '[sudo via ansible, key=%s] password: ' % randbits
exe = exe or C.DEFAULT_SUDO_EXE
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, 'echo %s; %s' % (success_key, cmd))
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif method == 'su':
exe = exe or C.DEFAULT_SU_EXE
@@ -1252,13 +1252,13 @@ def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
elif method == 'pbrun':
exe = exe or 'pbrun'
flags = flags or ''
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, 'echo %s; %s' % (success_key,cmd))
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
elif method == 'pfexec':
exe = exe or 'pfexec'
flags = flags or ''
# No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd))
+ becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
if becomecmd is None:
raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
From bbdcba53da302d10effc57a8232188028060cd44 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 16 Mar 2015 19:37:03 -0400
Subject: [PATCH 0089/3617] fixed bug on using su on play level not setting
become method correctly
---
lib/ansible/modules/core | 2 +-
lib/ansible/playbook/play.py | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 34c4e0d4959eea..31cc5f543f4166 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 34c4e0d4959eeaf5dc4d2b69d2bd435267e8ff91
+Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index ef097d04813093..edec30df758651 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -172,6 +172,7 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
elif 'su' in ds:
self.become=True
self.become=ds['su']
+ self.become_method='su'
if 'su_user' in ds:
self.become_user=ds['su_user']
From b11be68249eec50a602c401ca31578598ea9dd1a Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 16 Mar 2015 19:40:37 -0400
Subject: [PATCH 0090/3617] updated module ref
---
lib/ansible/modules/core | 2 +-
lib/ansible/modules/extras | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 31cc5f543f4166..8658b82de7d279 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940
+Subproject commit 8658b82de7d279ea935c5d04db239fc300003090
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index 8baba98ebe5053..696bc60caad2ea 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit 8baba98ebe5053e0c1e71881975ce8a1788f171c
+Subproject commit 696bc60caad2ea96c0a70c8091e24b2da060f35c
From 316284c56b2f5eab18563a13694e98fc86b68894 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Tue, 17 Mar 2015 10:35:24 -0500
Subject: [PATCH 0091/3617] Making blocks support become, and cleaning up
sudo/su references
---
v2/ansible/executor/connection_info.py | 7 +++++--
v2/ansible/executor/task_executor.py | 2 +-
v2/ansible/playbook/become.py | 9 +++++++++
v2/ansible/playbook/block.py | 24 +++++++++++++++++-------
v2/ansible/plugins/action/assemble.py | 2 +-
v2/ansible/plugins/action/copy.py | 2 +-
v2/ansible/plugins/action/fetch.py | 2 +-
v2/ansible/plugins/action/script.py | 3 +--
v2/ansible/plugins/action/template.py | 8 ++++----
v2/ansible/plugins/action/unarchive.py | 2 +-
v2/samples/test_become.yml | 4 +++-
11 files changed, 44 insertions(+), 21 deletions(-)
diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py
index b918dc6b2144cf..0ae51e6b612e9f 100644
--- a/v2/ansible/executor/connection_info.py
+++ b/v2/ansible/executor/connection_info.py
@@ -157,10 +157,13 @@ def set_task_override(self, task):
new_info.copy(self)
for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'):
+ attr_val = None
if hasattr(task, attr):
attr_val = getattr(task, attr)
- if attr_val:
- setattr(new_info, attr, attr_val)
+ if task._block and hasattr(task._block, attr) and not attr_val:
+ attr_val = getattr(task._block, attr)
+ if attr_val:
+ setattr(new_info, attr, attr_val)
return new_info
diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py
index bad47279a5e8f3..7eaba0061ef29e 100644
--- a/v2/ansible/executor/task_executor.py
+++ b/v2/ansible/executor/task_executor.py
@@ -382,7 +382,7 @@ def _compute_delegate(self, variables):
self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password)
self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file)
self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection)
- self._connection_info.sudo_pass = this_info.get('ansible_sudo_pass', self._connection_info.sudo_pass)
+ self._connection_info.become_pass = this_info.get('ansible_sudo_pass', self._connection_info.become_pass)
if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'):
self._connection_info.connection = 'local'
diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py
index 6ac1d2bad986ca..0b0ad10176002e 100644
--- a/v2/ansible/playbook/become.py
+++ b/v2/ansible/playbook/become.py
@@ -19,6 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
#from ansible.utils.display import deprecated
@@ -85,4 +86,12 @@ def _munge_become(self, ds):
#deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
+ # if we are becoming someone else, but some fields are unset,
+ # make sure they're initialized to the default config values
+ if ds.get('become', False):
+ if ds.get('become_method', None) is None:
+ ds['become_method'] = C.DEFAULT_BECOME_METHOD
+ if ds.get('become_user', None) is None:
+ ds['become_user'] = C.DEFAULT_BECOME_USER
+
return ds
diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py
index 49f65a15349452..fa67b6ae1b99d8 100644
--- a/v2/ansible/playbook/block.py
+++ b/v2/ansible/playbook/block.py
@@ -21,13 +21,13 @@
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
-#from ansible.playbook.become import Become
+from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
-class Block(Base, Conditional, Taggable):
+class Block(Base, Become, Conditional, Taggable):
_block = FieldAttribute(isa='list', default=[])
_rescue = FieldAttribute(isa='list', default=[])
@@ -71,16 +71,18 @@ def munge(self, ds):
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
'''
+
is_block = False
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
+
if not is_block:
if isinstance(ds, list):
- return dict(block=ds)
+ return super(Block, self).munge(dict(block=ds))
else:
- return dict(block=[ds])
+ return super(Block, self).munge(dict(block=[ds]))
return super(Block, self).munge(ds)
@@ -166,7 +168,11 @@ def serialize(self):
a task we don't want to include the attribute list of tasks.
'''
- data = dict(when=self.when)
+ data = dict()
+ for attr in self._get_base_attributes():
+ if attr not in ('block', 'rescue', 'always'):
+ data[attr] = getattr(self, attr)
+
data['dep_chain'] = self._dep_chain
if self._role is not None:
@@ -184,8 +190,12 @@ def deserialize(self, data):
from ansible.playbook.task import Task
- # unpack the when attribute, which is the only one we want
- self.when = data.get('when')
+ # we don't want the full set of attributes (the task lists), as that
+ # would lead to a serialize/deserialize loop
+ for attr in self._get_base_attributes():
+ if attr in data and attr not in ('block', 'rescue', 'always'):
+ setattr(self, attr, data.get(attr))
+
self._dep_chain = data.get('dep_chain', [])
# if there was a serialized role, unpack it too
diff --git a/v2/ansible/plugins/action/assemble.py b/v2/ansible/plugins/action/assemble.py
index 1ae8be02039141..b1bdc06c6d3873 100644
--- a/v2/ansible/plugins/action/assemble.py
+++ b/v2/ansible/plugins/action/assemble.py
@@ -117,7 +117,7 @@ def run(self, tmp=None, task_vars=dict()):
xfered = self._transfer_data('src', resultant)
# fix file permissions when the copy is done as a different user
- if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root':
+ if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', xfered, tmp)
# run the copy module
diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py
index 46cb89550265f8..088a806b61b0ae 100644
--- a/v2/ansible/plugins/action/copy.py
+++ b/v2/ansible/plugins/action/copy.py
@@ -231,7 +231,7 @@ def run(self, tmp=None, task_vars=dict()):
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
- if (self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root') and not raw:
+ if (self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:
diff --git a/v2/ansible/plugins/action/fetch.py b/v2/ansible/plugins/action/fetch.py
index 9bd73136b48d3a..e63fd88ea5c3c4 100644
--- a/v2/ansible/plugins/action/fetch.py
+++ b/v2/ansible/plugins/action/fetch.py
@@ -57,7 +57,7 @@ def run(self, tmp=None, task_vars=dict()):
# use slurp if sudo and permissions are lacking
remote_data = None
- if remote_checksum in ('1', '2') or self._connection_info.sudo:
+ if remote_checksum in ('1', '2') or self._connection_info.become:
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), tmp=tmp)
if slurpres.get('rc') == 0:
if slurpres['encoding'] == 'base64':
diff --git a/v2/ansible/plugins/action/script.py b/v2/ansible/plugins/action/script.py
index 6e8c1e1b9a4b45..21a9f41c59bfd1 100644
--- a/v2/ansible/plugins/action/script.py
+++ b/v2/ansible/plugins/action/script.py
@@ -74,8 +74,7 @@ def run(self, tmp=None, task_vars=None):
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
- if ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or
- (self._connection_info.su and self._connection_info.su_user != 'root')):
+ if self._connection_info.become and self._connection_info.become_user != 'root':
chmod_mode = 'a+rx'
sudoable = False
else:
diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py
index 372c07544d357b..1f7a6955a3220b 100644
--- a/v2/ansible/plugins/action/template.py
+++ b/v2/ansible/plugins/action/template.py
@@ -26,8 +26,6 @@ class ActionModule(ActionBase):
TRANSFERS_FILES = True
-
-
def get_checksum(self, tmp, dest, try_directory=False, source=None):
remote_checksum = self._remote_checksum(tmp, dest)
@@ -92,7 +90,9 @@ def run(self, tmp=None, task_vars=dict()):
# Expand any user home dir specification
dest = self._remote_expand_user(dest, tmp)
+ directory_prepended = False
if dest.endswith("/"): # CCTODO: Fix path for Windows hosts.
+ directory_prepended = True
base = os.path.basename(source)
dest = os.path.join(dest, base)
@@ -105,7 +105,7 @@ def run(self, tmp=None, task_vars=dict()):
except Exception, e:
return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
- local_checksum = utils.checksum_s(resultant)
+ local_checksum = checksum_s(resultant)
remote_checksum = self.get_checksum(tmp, dest, not directory_prepended, source=source)
if isinstance(remote_checksum, dict):
# Error from remote_checksum is a dict. Valid return is a str
@@ -129,7 +129,7 @@ def run(self, tmp=None, task_vars=dict()):
xfered = self._transfer_data(self._shell.join_path(tmp, 'source'), resultant)
# fix file permissions when the copy is done as a different user
- if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root':
+ if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', xfered, tmp)
# run the copy module
diff --git a/v2/ansible/plugins/action/unarchive.py b/v2/ansible/plugins/action/unarchive.py
index fab0843e9fe751..f99d7e28e64e08 100644
--- a/v2/ansible/plugins/action/unarchive.py
+++ b/v2/ansible/plugins/action/unarchive.py
@@ -81,7 +81,7 @@ def run(self, tmp=None, task_vars=dict()):
# handle check mode client side
# fix file permissions when the copy is done as a different user
if copy:
- if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root':
+ if self._connection_info.become and self._connection_info.become_user != 'root':
# FIXME: noop stuff needs to be reworked
#if not self.runner.noop_on_check(task_vars):
# self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml
index 8e753beade313b..4b02563ca79257 100644
--- a/v2/samples/test_become.yml
+++ b/v2/samples/test_become.yml
@@ -3,4 +3,6 @@
tasks:
- command: whoami
become_user: testing
- become_method: su
+ - block:
+ - command: whoami
+ become_user: testing
From 3473a3bbece56e15a957fd5252d14e5775becb6b Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Tue, 17 Mar 2015 10:50:41 -0500
Subject: [PATCH 0092/3617] Changes to become cmd formatting, per a47c132
---
v2/ansible/executor/connection_info.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py
index 0ae51e6b612e9f..26a14a23f9d1d4 100644
--- a/v2/ansible/executor/connection_info.py
+++ b/v2/ansible/executor/connection_info.py
@@ -194,8 +194,8 @@ def make_become_cmd(self, cmd, executable, become_settings=None):
prompt = '[sudo via ansible, key=%s] password: ' % randbits
exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, 'echo %s; %s' % (success_key, cmd))
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif self.become_method == 'su':
exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
@@ -205,13 +205,13 @@ def make_become_cmd(self, cmd, executable, become_settings=None):
elif self.become_method == 'pbrun':
exe = become_settings.get('pbrun_exe', 'pbrun')
flags = become_settings.get('pbrun_flags', '')
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd))
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key, cmd)))
elif self.become_method == 'pfexec':
exe = become_settings.get('pfexec_exe', 'pbrun')
flags = become_settings.get('pfexec_flags', '')
# No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd))
+ becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key, cmd)))
else:
raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
From e42848e0fee906967d36e6606153a1cd0f920b2d Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 16 Mar 2015 18:01:59 -0700
Subject: [PATCH 0093/3617] Better comment for why we have get_checksum call
itself sometimes
---
lib/ansible/runner/action_plugins/template.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py
index a824a6e4b8e152..5c9be9e079d280 100644
--- a/lib/ansible/runner/action_plugins/template.py
+++ b/lib/ansible/runner/action_plugins/template.py
@@ -36,7 +36,12 @@ def get_checksum(self, conn, tmp, dest, inject, try_directory=False, source=None
if remote_checksum in ('0', '2', '3', '4'):
# Note: 1 means the file is not present which is fine; template
# will create it. 3 means directory was specified instead of file
+ # which requires special handling
if try_directory and remote_checksum == '3' and source:
+ # If the user specified a directory name as their dest then we
+ # have to check the checksum of dest/basename(src). This is
+ # the same behaviour as cp foo.txt /var/tmp/ so users expect
+ # it to work.
base = os.path.basename(source)
dest = os.path.join(dest, base)
remote_checksum = self.get_checksum(conn, tmp, dest, inject, try_directory=False)
From f9a66a7ff7836274e025b9681f526918b028736d Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 11:03:15 -0700
Subject: [PATCH 0094/3617] Update core module pointer
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 8658b82de7d279..ae253593e3a0e3 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 8658b82de7d279ea935c5d04db239fc300003090
+Subproject commit ae253593e3a0e3339a136bf57e0a54e62229e8e6
From a64de2e000d9732a5689545c20b527a8ee950c1f Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 12:32:07 -0700
Subject: [PATCH 0095/3617] Initial test of the docker module
---
test/integration/destructive.yml | 1 +
.../roles/test_docker/meta/main.yml | 20 +++++++
.../roles/test_docker/tasks/main.yml | 54 +++++++++++++++++++
3 files changed, 75 insertions(+)
create mode 100644 test/integration/roles/test_docker/meta/main.yml
create mode 100644 test/integration/roles/test_docker/tasks/main.yml
diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml
index 54c905bdf6e413..b8f56d113bfb36 100644
--- a/test/integration/destructive.yml
+++ b/test/integration/destructive.yml
@@ -17,3 +17,4 @@
- { role: test_mysql_db, tags: test_mysql_db}
- { role: test_mysql_user, tags: test_mysql_user}
- { role: test_mysql_variables, tags: test_mysql_variables}
+ - { role: test_docker, tags: test_docker}
diff --git a/test/integration/roles/test_docker/meta/main.yml b/test/integration/roles/test_docker/meta/main.yml
new file mode 100644
index 00000000000000..399f3fb6e77f51
--- /dev/null
+++ b/test/integration/roles/test_docker/meta/main.yml
@@ -0,0 +1,20 @@
+# test code for the service module
+# (c) 2014, James Cammarata
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+dependencies:
+ - prepare_tests
diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml
new file mode 100644
index 00000000000000..6141fe05348444
--- /dev/null
+++ b/test/integration/roles/test_docker/tasks/main.yml
@@ -0,0 +1,54 @@
+- name: Install docker packages (yum)
+ yum:
+ state: present
+ name: docker,docker-registry,python-docker-py
+ when: ansible_distribution in ['RedHat', 'CentOS', 'Fedora']
+
+- name: Install docker packages (apt)
+ apt:
+ state: present
+ # Note: add docker-registry when available
+ name: docker.io,python-docker
+ when: ansible_distribution in ['Ubuntu', 'Debian']
+
+- name: Start docker daemon
+ service:
+ name: docker
+ state: started
+
+- name: Download busybox image
+ docker:
+ image: busybox
+ state: present
+ pull: missing
+
+- name: Run a small script in busybox
+ docker:
+ image: busybox
+ state: reloaded
+ pull: always
+ command: "nc -l -p 2000 -e xargs -n1 echo hello"
+ detach: True
+
+- name: Get the docker container id
+ shell: "docker ps | grep busybox | awk '{ print $1 }'"
+ register: container_id
+
+- debug: var=container_id
+
+- name: Get the docker container ip
+ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
+ register: container_ip
+
+- debug: var=container_ip
+
+- name: Try to access the server
+ shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
+ register: docker_output
+
+- debug: var=docker_output
+
+- name: check that the script ran
+ assert:
+ that:
+ - "'hello world' in docker_output.stdout_lines"
From 23291e8d8c0bf5f06303a62a4ba7a8c801bb53a8 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 13:18:42 -0700
Subject: [PATCH 0096/3617] Ugh, looks like very few distros have the proper
packages to run the docker module.
break up the tests so that we can maybe run this on at least one
platform
---
.../test_docker/tasks/docker-setup-rht.yml | 4 ++
.../roles/test_docker/tasks/main.yml | 62 ++++---------------
2 files changed, 15 insertions(+), 51 deletions(-)
create mode 100644 test/integration/roles/test_docker/tasks/docker-setup-rht.yml
diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
new file mode 100644
index 00000000000000..26373e4d3c7be8
--- /dev/null
+++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
@@ -0,0 +1,4 @@
+- name: Install docker packages (yum)
+ yum:
+ state: present
+ name: docker,docker-registry,python-docker-py,nmap-ncat
diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml
index 6141fe05348444..d1cd7f4e593e8f 100644
--- a/test/integration/roles/test_docker/tasks/main.yml
+++ b/test/integration/roles/test_docker/tasks/main.yml
@@ -1,54 +1,14 @@
-- name: Install docker packages (yum)
- yum:
- state: present
- name: docker,docker-registry,python-docker-py
- when: ansible_distribution in ['RedHat', 'CentOS', 'Fedora']
+- include: docker-setup-rht.yml
+ when: ansible_distribution in ['Fedora']
+ # Packages on RHEL and CentOS are broken, broken, broken. Revisit when
+ # they've got that sorted out
+ #when: ansible_distribution in ['Fedora', 'RedHat', 'CentOS']
-- name: Install docker packages (apt)
- apt:
- state: present
- # Note: add docker-registry when available
- name: docker.io,python-docker
- when: ansible_distribution in ['Ubuntu', 'Debian']
+# python-docker isn't available until 14.10. Revist at the next Ubuntu LTS
+#- include: docker-setup-debian.yml
+# when: ansible_distribution in ['Ubuntu']
-- name: Start docker daemon
- service:
- name: docker
- state: started
+- include: docker-tests.yml
+ # Add other distributions as the proper packages become available
+ when: ansible_distribution in ['Fedora']
-- name: Download busybox image
- docker:
- image: busybox
- state: present
- pull: missing
-
-- name: Run a small script in busybox
- docker:
- image: busybox
- state: reloaded
- pull: always
- command: "nc -l -p 2000 -e xargs -n1 echo hello"
- detach: True
-
-- name: Get the docker container id
- shell: "docker ps | grep busybox | awk '{ print $1 }'"
- register: container_id
-
-- debug: var=container_id
-
-- name: Get the docker container ip
- shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
- register: container_ip
-
-- debug: var=container_ip
-
-- name: Try to access the server
- shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
- register: docker_output
-
-- debug: var=docker_output
-
-- name: check that the script ran
- assert:
- that:
- - "'hello world' in docker_output.stdout_lines"
From f8ec1451eae9a0bf3003a5d047144a43d3dee9e0 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 13:31:54 -0700
Subject: [PATCH 0097/3617] Would help if I added these files in the right
directory
---
.../test_docker/tasks/docker-setup-debian.yml | 6 +++
.../roles/test_docker/tasks/docker-tests.yml | 41 +++++++++++++++++++
2 files changed, 47 insertions(+)
create mode 100644 test/integration/roles/test_docker/tasks/docker-setup-debian.yml
create mode 100644 test/integration/roles/test_docker/tasks/docker-tests.yml
diff --git a/test/integration/roles/test_docker/tasks/docker-setup-debian.yml b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml
new file mode 100644
index 00000000000000..01a67eee6bb8f1
--- /dev/null
+++ b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml
@@ -0,0 +1,6 @@
+- name: Install docker packages (apt)
+ apt:
+ state: present
+ # Note: add docker-registry when available
+ name: docker.io,python-docker,netcat-openbsd
+
diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml
new file mode 100644
index 00000000000000..e3ce04a56c44cb
--- /dev/null
+++ b/test/integration/roles/test_docker/tasks/docker-tests.yml
@@ -0,0 +1,41 @@
+- name: Start docker daemon
+ service:
+ name: docker
+ state: started
+
+- name: Download busybox image
+ docker:
+ image: busybox
+ state: present
+ pull: missing
+
+- name: Run a small script in busybox
+ docker:
+ image: busybox
+ state: reloaded
+ pull: always
+ command: "nc -l -p 2000 -e xargs -n1 echo hello"
+ detach: True
+
+- name: Get the docker container id
+ shell: "docker ps | grep busybox | awk '{ print $1 }'"
+ register: container_id
+
+- debug: var=container_id
+
+- name: Get the docker container ip
+ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
+ register: container_ip
+
+- debug: var=container_ip
+
+- name: Try to access the server
+ shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
+ register: docker_output
+
+- debug: var=docker_output
+
+- name: check that the script ran
+ assert:
+ that:
+ - "'hello world' in docker_output.stdout_lines"
From 85e137bbadaf7d72569e52c047f2f5fd28919deb Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 13:52:44 -0700
Subject: [PATCH 0098/3617] Attempt to enable docker tests for rhel/centos6 as
well
---
.../roles/test_docker/tasks/docker-setup-rht.yml | 16 +++++++++++++++-
.../integration/roles/test_docker/tasks/main.yml | 7 +++++--
2 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
index 26373e4d3c7be8..d141bddc55efb0 100644
--- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
+++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
@@ -1,4 +1,18 @@
- name: Install docker packages (yum)
yum:
state: present
- name: docker,docker-registry,python-docker-py,nmap-ncat
+ name: docker-io,docker-registry,python-docker-py
+
+- name: Install netcat
+ yum:
+ state: present
+ name: nmap-ncat
+ # RHEL7 as well...
+ when: ansible_distribution == 'Fedora'
+
+- name: Install netcat
+ yum:
+ state: present
+ name: nc
+ when: ansible_distribution != 'Fedora'
+
diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml
index d1cd7f4e593e8f..d0abc5a9c6eac0 100644
--- a/test/integration/roles/test_docker/tasks/main.yml
+++ b/test/integration/roles/test_docker/tasks/main.yml
@@ -1,8 +1,9 @@
- include: docker-setup-rht.yml
when: ansible_distribution in ['Fedora']
- # Packages on RHEL and CentOS are broken, broken, broken. Revisit when
+- include: docker-setup-rht.yml
+ # Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when
# they've got that sorted out
- #when: ansible_distribution in ['Fedora', 'RedHat', 'CentOS']
+ when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6
# python-docker isn't available until 14.10. Revist at the next Ubuntu LTS
#- include: docker-setup-debian.yml
@@ -12,3 +13,5 @@
# Add other distributions as the proper packages become available
when: ansible_distribution in ['Fedora']
+- include: docker-tests.yml
+ when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6
From 9e14471471ba20c11e5c81dd9dd8dc24fa83f169 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 14:14:26 -0700
Subject: [PATCH 0099/3617] And ran into a different problem with centos6.
Sigh.
---
test/integration/roles/test_docker/tasks/main.yml | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml
index d0abc5a9c6eac0..bdf252c42f6b3d 100644
--- a/test/integration/roles/test_docker/tasks/main.yml
+++ b/test/integration/roles/test_docker/tasks/main.yml
@@ -1,9 +1,10 @@
- include: docker-setup-rht.yml
when: ansible_distribution in ['Fedora']
-- include: docker-setup-rht.yml
+#- include: docker-setup-rht.yml
# Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when
# they've got that sorted out
- when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6
+ # CentOS 6 currently broken by conflicting files in pyhton-backports and python-backports-ssl_match_hostname
+ #when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6
# python-docker isn't available until 14.10. Revist at the next Ubuntu LTS
#- include: docker-setup-debian.yml
From 4ce791fe84e1ba800ad57fb790455181f08a0687 Mon Sep 17 00:00:00 2001
From: Steve Gargan
Date: Tue, 17 Mar 2015 21:25:45 +0000
Subject: [PATCH 0100/3617] avoid path issues by determining the path of
ansible-pull and using its path to run ansible and ansible-playbook
---
bin/ansible-pull | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/bin/ansible-pull b/bin/ansible-pull
index a9a0897fbff821..d4887631e0fdfb 100755
--- a/bin/ansible-pull
+++ b/bin/ansible-pull
@@ -186,9 +186,12 @@ def main(args):
if path is None:
sys.stderr.write("module '%s' not found.\n" % options.module_name)
return 1
- cmd = 'ansible localhost -i "%s" %s -m %s -a "%s"' % (
- inv_opts, base_opts, options.module_name, repo_opts
+
+ bin_path = os.path.dirname(os.path.abspath(__file__))
+ cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
+ bin_path, inv_opts, base_opts, options.module_name, repo_opts
)
+
for ev in options.extra_vars:
cmd += ' -e "%s"' % ev
@@ -221,7 +224,7 @@ def main(args):
print >>sys.stderr, "Could not find a playbook to run."
return 1
- cmd = 'ansible-playbook %s %s' % (base_opts, playbook)
+ cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
if options.vault_password_file:
cmd += " --vault-password-file=%s" % options.vault_password_file
if options.inventory:
From 2cfeec3683a3e6387c126b9975bf63eb5d5ce69a Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 14:40:10 -0700
Subject: [PATCH 0101/3617] Add tests using a docker private registry
---
.../roles/test_docker/tasks/main.yml | 9 ++-
.../test_docker/tasks/registry-tests.yml | 62 +++++++++++++++++++
2 files changed, 69 insertions(+), 2 deletions(-)
create mode 100644 test/integration/roles/test_docker/tasks/registry-tests.yml
diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml
index bdf252c42f6b3d..2ea15644d5f847 100644
--- a/test/integration/roles/test_docker/tasks/main.yml
+++ b/test/integration/roles/test_docker/tasks/main.yml
@@ -14,5 +14,10 @@
# Add other distributions as the proper packages become available
when: ansible_distribution in ['Fedora']
-- include: docker-tests.yml
- when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6
+#- include: docker-tests.yml
+# when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6
+
+- include: registry-tests.yml
+ # Add other distributions as the proper packages become available
+ when: ansible_distribution in ['Fedora']
+
diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml
new file mode 100644
index 00000000000000..52d840601975db
--- /dev/null
+++ b/test/integration/roles/test_docker/tasks/registry-tests.yml
@@ -0,0 +1,62 @@
+- name: Configure a private docker registry
+ service:
+ name: docker-registry
+ state: started
+
+- name: Get busybox image id
+ shell: "docker images | grep busybox | awk '{ print $3 }'"
+ register: image_id
+
+- name: Tag docker image into the local repository
+ shell: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine"
+
+- name: Push docker image into the local repository
+ shell: "docker push localhost:5000/mine"
+
+- name: Remove the busybox image from the local docker
+ shell: "docker rmi -f {{ image_id.stdout_lines[0] }}"
+
+- name: Remove the new image from the local docker
+ shell: "docker rmi -f localhost:5000/mine"
+
+- name: Get number of images in docker
+ shell: "docker images |wc -l"
+ register: docker_output
+
+- name: Check that there are no images in docker
+ assert:
+ that:
+ - "'1' in docker_output.stdout_lines"
+
+- name: Retrieve the image from private docker server
+ docker:
+ image: "localhost:5000/mine"
+ state: present
+ pull: missing
+ insecure_registry: True
+
+- name: Run a small script in the new image
+ docker:
+ image: "localhost:5000/mine"
+ state: reloaded
+ pull: always
+ command: "nc -l -p 2000 -e xargs -n1 echo hello"
+ detach: True
+ insecure_registry: True
+
+- name: Get the docker container id
+ shell: "docker ps | grep mine | awk '{ print $1 }'"
+ register: container_id
+
+- name: Get the docker container ip
+ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
+ register: container_ip
+
+- name: Try to access the server
+ shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
+ register: docker_output
+
+- name: check that the script ran
+ assert:
+ that:
+ - "'hello world' in docker_output.stdout_lines"
From 259744d5f43f7bb36b9f707f02d074c03364740d Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 17 Mar 2015 14:40:43 -0700
Subject: [PATCH 0102/3617] Remove debug statements
---
test/integration/roles/test_docker/tasks/docker-tests.yml | 6 ------
1 file changed, 6 deletions(-)
diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml
index e3ce04a56c44cb..11f2f9ac2c1927 100644
--- a/test/integration/roles/test_docker/tasks/docker-tests.yml
+++ b/test/integration/roles/test_docker/tasks/docker-tests.yml
@@ -21,20 +21,14 @@
shell: "docker ps | grep busybox | awk '{ print $1 }'"
register: container_id
-- debug: var=container_id
-
- name: Get the docker container ip
shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
register: container_ip
-- debug: var=container_ip
-
- name: Try to access the server
shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
register: docker_output
-- debug: var=docker_output
-
- name: check that the script ran
assert:
that:
From ba4e9a4c82e5543f2333f2eab1917c4c7ff3d8d4 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 17 Mar 2015 18:23:40 -0400
Subject: [PATCH 0103/3617] added missing become method inventory override
---
lib/ansible/runner/__init__.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index 59e4d96924ffc4..5c5554816179f8 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -883,10 +883,12 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port,
actual_transport = inject.get('ansible_connection', self.transport)
actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
+
self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
+ self.become_method = inject.get('ansible_become_method', self.become_method)
# select default root user in case self.become requested
# but no user specified; happens e.g. in host vars when
From f4c1260d0359e5b5ad43477f36afabfd1c8c87e4 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 12:15:42 -0700
Subject: [PATCH 0104/3617] Add more tests for private docker registries
---
.../roles/test_docker/files/devdockerCA.crt | 23 ++++
.../roles/test_docker/files/devdockerCA.key | 27 +++++
.../roles/test_docker/files/devdockerCA.srl | 1 +
.../files/docker-registry.htpasswd | 1 +
.../files/dockertest.ansible.com.crt | 21 ++++
.../files/dockertest.ansible.com.csr | 17 +++
.../files/dockertest.ansible.com.key | 27 +++++
.../files/nginx-docker-registry.conf | 40 +++++++
.../test_docker/tasks/docker-setup-debian.yml | 2 +-
.../test_docker/tasks/docker-setup-rht.yml | 2 +-
.../roles/test_docker/tasks/docker-tests.yml | 31 +++++
.../test_docker/tasks/registry-tests.yml | 108 +++++++++++++++++-
12 files changed, 294 insertions(+), 6 deletions(-)
create mode 100644 test/integration/roles/test_docker/files/devdockerCA.crt
create mode 100644 test/integration/roles/test_docker/files/devdockerCA.key
create mode 100644 test/integration/roles/test_docker/files/devdockerCA.srl
create mode 100644 test/integration/roles/test_docker/files/docker-registry.htpasswd
create mode 100644 test/integration/roles/test_docker/files/dockertest.ansible.com.crt
create mode 100644 test/integration/roles/test_docker/files/dockertest.ansible.com.csr
create mode 100644 test/integration/roles/test_docker/files/dockertest.ansible.com.key
create mode 100644 test/integration/roles/test_docker/files/nginx-docker-registry.conf
diff --git a/test/integration/roles/test_docker/files/devdockerCA.crt b/test/integration/roles/test_docker/files/devdockerCA.crt
new file mode 100644
index 00000000000000..14f1b2f7ee6180
--- /dev/null
+++ b/test/integration/roles/test_docker/files/devdockerCA.crt
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIJAPczDjnFOjH/MA0GCSqGSIb3DQEBCwUAMIGEMQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEQMA4GA1UECgwH
+QW5zaWJsZTEfMB0GA1UEAwwWZG9ja2VydGVzdC5hbnNpYmxlLmNvbTEkMCIGCSqG
+SIb3DQEJARYVdGt1cmF0b21pQGFuc2libGUuY29tMB4XDTE1MDMxNzIyMjc1OVoX
+DTQyMDgwMjIyMjc1OVowgYQxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0G
+A1UEBwwGRHVyaGFtMRAwDgYDVQQKDAdBbnNpYmxlMR8wHQYDVQQDDBZkb2NrZXJ0
+ZXN0LmFuc2libGUuY29tMSQwIgYJKoZIhvcNAQkBFhV0a3VyYXRvbWlAYW5zaWJs
+ZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIk4D0+QY3obQM
+I/BPmI4pFFu734HHz98ce6Qat7WYiGUHsnt3LHw2a6zMsgP3siD1zqGHtk1IipWR
+IwZbXm1spww/8YNUEE8wbXlLGI8IPUpg2J7NS2SdYIuN/TrQMqCUt7fFb+7OQjaH
+RtR0LtXhP96al3E8BR9G6AiS67XuwdTL4vrXLUWISjNyF2Vj7xQsp8KRrq0qnXhq
+pefeBi1fD9DG5f76j3s8lqGiOg9FHegvfodonNGcqE16T/vBhQcf+NjenlFvR2Lh
+3wb/RCo/b1IhZHKNx32fJ/WpiKXkrLYFvwtIWtLw6XIwwarc+n7AfGqKnt4h4bAG
+a+5aNnlFAgMBAAGjUDBOMB0GA1UdDgQWBBRZpu6oomSlpCvy2VgOHbWwDwVl1jAf
+BgNVHSMEGDAWgBRZpu6oomSlpCvy2VgOHbWwDwVl1jAMBgNVHRMEBTADAQH/MA0G
+CSqGSIb3DQEBCwUAA4IBAQCqOSFzTgQDww5bkNRCQrg7lTKzXW9bJpJ5NZdTLwh6
+b+e+XouRH+lBe7Cnn2RTtuFYVfm8hQ1Ra7GDM3v2mJns/s3zDkRINZMMVXddzl5S
+M8QxsFJK41PaL9wepizslkcg19yQkdWJQYPDeFurlFvwtakhZE7ttawYi5bFkbCd
+4fchMNBBmcigpSfoWb/L2lK2vVKBcfOdUl+V6k49lpf8u7WZD0Xi2cbBhw17tPj4
+ulKZaVNdzj0GFfhpQe/MtDoqxStRpHamdk0Y6fN+CvoW7RPDeVsqkIgCu30MOFuG
+A53ZtOc3caYRyGYJtIIl0Rd5uIApscec/6RGiFX6Gab8
+-----END CERTIFICATE-----
diff --git a/test/integration/roles/test_docker/files/devdockerCA.key b/test/integration/roles/test_docker/files/devdockerCA.key
new file mode 100644
index 00000000000000..0c8c0ee7b0c293
--- /dev/null
+++ b/test/integration/roles/test_docker/files/devdockerCA.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAyJOA9PkGN6G0DCPwT5iOKRRbu9+Bx8/fHHukGre1mIhlB7J7
+dyx8NmuszLID97Ig9c6hh7ZNSIqVkSMGW15tbKcMP/GDVBBPMG15SxiPCD1KYNie
+zUtknWCLjf060DKglLe3xW/uzkI2h0bUdC7V4T/empdxPAUfRugIkuu17sHUy+L6
+1y1FiEozchdlY+8ULKfCka6tKp14aqXn3gYtXw/QxuX++o97PJahojoPRR3oL36H
+aJzRnKhNek/7wYUHH/jY3p5Rb0di4d8G/0QqP29SIWRyjcd9nyf1qYil5Ky2Bb8L
+SFrS8OlyMMGq3Pp+wHxqip7eIeGwBmvuWjZ5RQIDAQABAoIBAQCVOumfWgf+LBlB
+TxvknKRoe/Ukes6cU1S0ZGlcV4KM0i4Y4/poWHiyJLqUMX4yNB3BxNL5nfEyH6nY
+Ki74m/Dd/gtnJ9GGIfxJE6pC7Sq9/pvwIjtEkutxC/vI0LeJX6GKBIZ+JyGN5EWd
+sF0xdAc9Z7+/VR2ygj0bDFgUt7rMv6fLaXh6i5Ms0JV7I/HkIi0Lmy9FncJPOTjP
+/Wb3Rj5twDppBqSiqU2JNQHysWzNbp8nzBGeR0+WU6xkWjjGzVyQZJq4XJQhqqot
+t+v+/lF+jObujcRxPRStaA5IoQdmls3l+ubkoFeNp3j6Nigz40wjTJArMu/Q9xQ5
+A+kHYNgBAoGBAPVNku0eyz1SyMM8FNoB+AfSpkslTnqfmehn1GCOOS9JPimGWS3A
+UlAs/PAPW/H/FTM38eC89GsKKVV8zvwkERNwf+PIGzkQrJgYLxGwoflAKsvFoQi9
+PVbIn0TBDZ3TWyNfGul62fEgNen4B46d7kG6l/C3p9eKKCo3sCBgWl8FAoGBANFS
+n9YWyAYmHQAWy5R0YeTsdtiRpZWkB0Is9Jr8Zm/DQDNnsKgvXw//qxuWYMi68teK
+6o8t5mgDQNWBu3rXrU73f8mMVJNmzSHFbyQEyFOJ9yvI5qMRbJfvdURUje6d3ZUw
+G7olKjX0fec4cAG7hbT8sMDvIbnATdhh3VppiEVBAoGBAJKidJnaNpPJ0MkkOTK4
+ypOikFWLT4ZtsYsDxiiR3A0wM0CPVu/Kb2oN+oVmKQhX+0xKvQQi79iskljP6ss+
+pBaCwXBgRiWumf2xNzHT7H8apHp7APBAb1JZSxvGa2VU2r4iM+wty+of3xqlcZ8H
+OU2BRSJYJrTpmWjjMR2pe1whAoGAfMTbMSlzIPcm4h60SlD06Rdp370xDfkvumpB
+gwBfrs6bPgjYa+eQqmCjBValagDFL2VGWwHpDKajxqAFuDtGuoMcUG6tGw9zxmWA
+0d9n6SObiSW/FAQWzpmVNJ2R3GGM6pg6bsIoXvDU+zXQzbeRA0h7swTW/Xl67Teo
+UXQGHgECgYEAjckqv2e39AgBvjxvj9SylVbFNSERrbpmiIRH31MnAHpTXbxRf7K+
+/79vUsRfQun9F/+KVfjUyMqRj0PE2tS4ATIjqQsa18RCB4mAE3sNsKz8HbJfzIFq
+eEqAWmURm6gRmLmaTMlXS0ZtZaw/A2Usa/DJumu9CsfBu7ZJbDnrQIY=
+-----END RSA PRIVATE KEY-----
diff --git a/test/integration/roles/test_docker/files/devdockerCA.srl b/test/integration/roles/test_docker/files/devdockerCA.srl
new file mode 100644
index 00000000000000..78f0162afecbc3
--- /dev/null
+++ b/test/integration/roles/test_docker/files/devdockerCA.srl
@@ -0,0 +1 @@
+D96F3E552F279F46
diff --git a/test/integration/roles/test_docker/files/docker-registry.htpasswd b/test/integration/roles/test_docker/files/docker-registry.htpasswd
new file mode 100644
index 00000000000000..7cee295817c943
--- /dev/null
+++ b/test/integration/roles/test_docker/files/docker-registry.htpasswd
@@ -0,0 +1 @@
+testdocker:$apr1$6cYd3tA9$4Dc9/I5Z.bl8/br8O/6B41
diff --git a/test/integration/roles/test_docker/files/dockertest.ansible.com.crt b/test/integration/roles/test_docker/files/dockertest.ansible.com.crt
new file mode 100644
index 00000000000000..e89327c3faf508
--- /dev/null
+++ b/test/integration/roles/test_docker/files/dockertest.ansible.com.crt
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDYTCCAkkCCQDZbz5VLyefRjANBgkqhkiG9w0BAQUFADCBhDELMAkGA1UEBhMC
+VVMxCzAJBgNVBAgMAk5DMQ8wDQYDVQQHDAZEdXJoYW0xEDAOBgNVBAoMB0Fuc2li
+bGUxHzAdBgNVBAMMFmRvY2tlcnRlc3QuYW5zaWJsZS5jb20xJDAiBgkqhkiG9w0B
+CQEWFXRrdXJhdG9taUBhbnNpYmxlLmNvbTAgFw0xNTAzMTcyMjMxNTBaGA8yMjg4
+MTIzMDIyMzE1MFowXjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5DMQ8wDQYDVQQH
+DAZEdXJoYW0xEDAOBgNVBAoMB0Fuc2libGUxHzAdBgNVBAMMFmRvY2tlcnRlc3Qu
+YW5zaWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7WpI3
+QuuARgPufAA0JkGCGIUNWqFyTEngOWvBVEuk5TnDB4x78OCE9j7rr75OxZaSc6Y7
+oFTl+hhlgt6sqj+GXehgCHLA97CCc8eUqGv3bwdIIg/hahCPjEWfYzocX1xmUdzN
+6klbV9lSO7FGSuk7W4DNga/weRfZmVoPi6jqTvx0tFsGrHVb1evholUKpxaOEYQZ
+2NJ22+UXpUyVzN/mw5TAGNG0/yR7sIgCjKYCsYF8k79SfNDMJ1VcCPy3aag45jaz
+WoA+OIJJFRkAaPSM5VtnbGBv/slpDVaKfl2ei7Ey3mKx1b7jYMzRz07Gw+zqr1gJ
+kBWvfjR7ioxXcN7jAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAJyF24tCq5R8SJto
+EMln0m9dMoJTC5usaBYBUMMe6hV2ikUGaXVDIqY+Yypt1sIcjGnLRmehJbej8iS7
+4aypuLc8Fgb4CvW+gY3I3W1iF7ZxIN/4yr237Z9KH1d1uGi+066Sk94OCXlqgsb+
+RzU6XOg+PMIjYC/us5VRv8a2qfjIA8getR+19nP+hR6NgIQcEyRKG2FmhkUSAwd8
+60FhpW4UmPQmn0ErZmRwdp2hNPj5g3my5iOSi7DzdK4CwZJAASOoWsbQIxP0k4JE
+PMo7Ad1YxXlOvNWIA8FLMkRsq3li6KJ17WBdEYgFeuxWpf1/x1WA+WpwEIfC5cuR
+A5LkaNI=
+-----END CERTIFICATE-----
diff --git a/test/integration/roles/test_docker/files/dockertest.ansible.com.csr b/test/integration/roles/test_docker/files/dockertest.ansible.com.csr
new file mode 100644
index 00000000000000..62b1f8535acf50
--- /dev/null
+++ b/test/integration/roles/test_docker/files/dockertest.ansible.com.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICozCCAYsCAQAwXjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5DMQ8wDQYDVQQH
+DAZEdXJoYW0xEDAOBgNVBAoMB0Fuc2libGUxHzAdBgNVBAMMFmRvY2tlcnRlc3Qu
+YW5zaWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7WpI3
+QuuARgPufAA0JkGCGIUNWqFyTEngOWvBVEuk5TnDB4x78OCE9j7rr75OxZaSc6Y7
+oFTl+hhlgt6sqj+GXehgCHLA97CCc8eUqGv3bwdIIg/hahCPjEWfYzocX1xmUdzN
+6klbV9lSO7FGSuk7W4DNga/weRfZmVoPi6jqTvx0tFsGrHVb1evholUKpxaOEYQZ
+2NJ22+UXpUyVzN/mw5TAGNG0/yR7sIgCjKYCsYF8k79SfNDMJ1VcCPy3aag45jaz
+WoA+OIJJFRkAaPSM5VtnbGBv/slpDVaKfl2ei7Ey3mKx1b7jYMzRz07Gw+zqr1gJ
+kBWvfjR7ioxXcN7jAgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAoPgw9dlA3Ys2
+oahtr2KMNFnHnab6hUr/CuDIygkOft+MCX1cPXY1c0R72NQq42TjAFO5UnriJ0Jg
+rcWgBAw8TCOHH77ZWawQFjWWoxNTy+bfXNJ002tzc4S/A4s8ytcFQN7E2irbGtUB
+ratVaE+c6RvD/o48N4YLUyJbJK84FZ1xMnJI0z5R6XzDWEqYbobzkM/aUWvDTT9F
++F9H5W/3sIhNFVGLygSKbhgrb6eaC8R36fcmTRfYYdT4GrpXFePoZ4LJGCKiiaGV
+p8gZzYQ9xjRYDP2OUMacBDlX1Mu5IJ2SCfjavD1hMhB54tWiiw3CRMJcNMql7ob/
+ZHH8UDMqgA==
+-----END CERTIFICATE REQUEST-----
diff --git a/test/integration/roles/test_docker/files/dockertest.ansible.com.key b/test/integration/roles/test_docker/files/dockertest.ansible.com.key
new file mode 100644
index 00000000000000..bda2bb612629c6
--- /dev/null
+++ b/test/integration/roles/test_docker/files/dockertest.ansible.com.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAu1qSN0LrgEYD7nwANCZBghiFDVqhckxJ4DlrwVRLpOU5wweM
+e/DghPY+66++TsWWknOmO6BU5foYZYLerKo/hl3oYAhywPewgnPHlKhr928HSCIP
+4WoQj4xFn2M6HF9cZlHczepJW1fZUjuxRkrpO1uAzYGv8HkX2ZlaD4uo6k78dLRb
+Bqx1W9Xr4aJVCqcWjhGEGdjSdtvlF6VMlczf5sOUwBjRtP8ke7CIAoymArGBfJO/
+UnzQzCdVXAj8t2moOOY2s1qAPjiCSRUZAGj0jOVbZ2xgb/7JaQ1Win5dnouxMt5i
+sdW+42DM0c9OxsPs6q9YCZAVr340e4qMV3De4wIDAQABAoIBABjczxSIS+pM4E6w
+o/JHtV/HUzjPcydQ2mjoFdWlExjB1qV8BfeYoqLibr0mKFIZxH6Q3FmDUGDojH5E
+HLq7KQzyv1inJltXQ1Q8exrOMu22DThUVNksEyCJk9+v8lE7km59pJiq46s8gDl6
+dG8Il+TporEi6a820qRsxlfTx8m4EUbyPIhf2e2wYdqiscLwj49ZzMs3TFJxN3j4
+lLP3QDHz9n8q+XXpUT9+rsePe4D4DVVRLhg8w35zkys36xfvBZrI+9SytSs+r1/e
+X4gVhxeX9q3FkvXiw1IDGPr0l5X7SH+5zk7JWuLfFbNBK02zR/Bd2OIaYAOmyIFk
+ZzsVfokCgYEA8Cj04S32Tga7lOAAUEuPjgXbCtGYqBUJ/9mlMHJBtyl4vaBRm1Z3
+1YQqlL3yGM1F6ZStPWs86vsVaScypr7+RnmQ/uPjz1g2jNI9vomqRkzpzd8/bBwW
+J3FCaKFIfl9uQx4ac7piAYdhNXswjQ7Kzn5xgG24i8EkUm6+UxarA38CgYEAx7X+
+qOVT+kA5WU1EDIc2x3Au0PhNIXiHOGRLW0MC7Vy1xBrgxfVrz6J8flBXOxmWYjRq
+3dFiHA9S7WPQStkgTjzE91sthLefJ8DKXE4IrRkvYXIIX8DqkcFxTHS/OzckTcK/
+z79jNOPYA1s+z2jzgd24sslXbqxNz1LqZ/PlRp0CgYEAik8cEF72/aK0/x0uMRAD
+IcjPiGCDKTHMq3M9xjPXEtQofBTLSsm2g9n05+qodY4qmEYOq1OKJs3pW8C+U/ek
+2xOB5Ll75lqoN9uQwZ3o2UnMUMskbG+UdqyskTNpW5Y8Gx1IIKQTc0vzOOi0YlhF
+hjydw1ftM1dNQsgShimE3aMCgYEAwITwFk7kcoTBBBZY+B7Mrtu1Ndt3N0HiUHlW
+r4Zc5waNbptefVbF9GY1zuqR/LYA43CWaHj1NAmNrqye2diPrPwmADHUInGEqqTO
+LsdG099Ibo6oBe6J8bJiDwsoYeQZSiDoGVPtRcoyraGjXfxVaaac6zTu5RCS/b53
+m3hhWH0CgYAqi3x10NpJHInU/zNa1GhI9UVJzabE2APdbPHvoE/yyfpCGhExiXZw
+MDImUzc59Ro0pCZ9Bk7pd5LwdjjeJXih7jaRZQlPD1BeM6dKdmJps1KMaltOOJ4J
+W0FE34E+Kt5JeIix8zmhxgaAU9NVilaNx5tI/D65Y0inMBZpqedrtg==
+-----END RSA PRIVATE KEY-----
diff --git a/test/integration/roles/test_docker/files/nginx-docker-registry.conf b/test/integration/roles/test_docker/files/nginx-docker-registry.conf
new file mode 100644
index 00000000000000..99c7802e1bf574
--- /dev/null
+++ b/test/integration/roles/test_docker/files/nginx-docker-registry.conf
@@ -0,0 +1,40 @@
+# For versions of Nginx > 1.3.9 that include chunked transfer encoding support
+# Replace with appropriate values where necessary
+
+upstream docker-registry {
+ server localhost:5000;
+}
+
+server {
+ listen 8080;
+ server_name dockertest.ansible.com;
+
+ ssl on;
+ ssl_certificate /etc/pki/tls/certs/dockertest.ansible.com.crt;
+ ssl_certificate_key /etc/pki/tls/private/dockertest.ansible.com.key;
+
+ proxy_set_header Host $http_host; # required for Docker client sake
+ proxy_set_header X-Real-IP $remote_addr; # pass on real client IP
+
+ client_max_body_size 0; # disable any limits to avoid HTTP 413 for large image uploads
+
+ # required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
+ chunked_transfer_encoding on;
+
+ location / {
+ # let Nginx know about our auth file
+ auth_basic "Restricted";
+ auth_basic_user_file /etc/nginx/docker-registry.htpasswd;
+
+ proxy_pass http://docker-registry;
+ }
+ location /_ping {
+ auth_basic off;
+ proxy_pass http://docker-registry;
+ }
+ location /v1/_ping {
+ auth_basic off;
+ proxy_pass http://docker-registry;
+ }
+
+}
diff --git a/test/integration/roles/test_docker/tasks/docker-setup-debian.yml b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml
index 01a67eee6bb8f1..068011a0937223 100644
--- a/test/integration/roles/test_docker/tasks/docker-setup-debian.yml
+++ b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml
@@ -2,5 +2,5 @@
apt:
state: present
# Note: add docker-registry when available
- name: docker.io,python-docker,netcat-openbsd
+ name: docker.io,python-docker,netcat-openbsd,nginx
diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
index d141bddc55efb0..3ba234ecffca5f 100644
--- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
+++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
@@ -1,7 +1,7 @@
- name: Install docker packages (yum)
yum:
state: present
- name: docker-io,docker-registry,python-docker-py
+ name: docker-io,docker-registry,python-docker-py,nginx
- name: Install netcat
yum:
diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml
index 11f2f9ac2c1927..10067d7ad7a5f6 100644
--- a/test/integration/roles/test_docker/tasks/docker-tests.yml
+++ b/test/integration/roles/test_docker/tasks/docker-tests.yml
@@ -33,3 +33,34 @@
assert:
that:
- "'hello world' in docker_output.stdout_lines"
+
+- name: Run a script that sets environment in busybox
+ docker:
+ image: busybox
+ state: reloaded
+ pull: always
+ env:
+ TEST: hello
+ command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"'
+ detach: True
+
+- name: Get the docker container id
+ shell: "docker ps | grep busybox | awk '{ print $1 }'"
+ register: container_id
+
+- name: Get the docker container ip
+ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
+ register: container_ip
+
+- name: Try to access the server
+ shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
+ register: docker_output
+
+- name: check that the script ran
+ assert:
+ that:
+ - "'hello world' in docker_output.stdout_lines"
+
+- name: Remove the busybox image from the local docker
+ shell: "docker rmi -f busybox"
+
diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml
index 52d840601975db..348062234ad07a 100644
--- a/test/integration/roles/test_docker/tasks/registry-tests.yml
+++ b/test/integration/roles/test_docker/tasks/registry-tests.yml
@@ -3,18 +3,24 @@
name: docker-registry
state: started
+- name: Retrieve busybox image from docker hub
+ docker:
+ image: busybox
+ state: present
+ pull: missing
+
- name: Get busybox image id
shell: "docker images | grep busybox | awk '{ print $3 }'"
register: image_id
-- name: Tag docker image into the local repository
+- name: Tag docker image into the local registry
shell: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine"
-- name: Push docker image into the local repository
+- name: Push docker image into the private registry
shell: "docker push localhost:5000/mine"
- name: Remove the busybox image from the local docker
- shell: "docker rmi -f {{ image_id.stdout_lines[0] }}"
+ shell: "docker rmi -f busybox"
- name: Remove the new image from the local docker
shell: "docker rmi -f localhost:5000/mine"
@@ -23,12 +29,13 @@
shell: "docker images |wc -l"
register: docker_output
+# docker prints a header so the header should be all that's present
- name: Check that there are no images in docker
assert:
that:
- "'1' in docker_output.stdout_lines"
-- name: Retrieve the image from private docker server
+- name: Retrieve the image from private docker registry
docker:
image: "localhost:5000/mine"
state: present
@@ -60,3 +67,96 @@
assert:
that:
- "'hello world' in docker_output.stdout_lines"
+
+- name: Remove the new image from the local docker
+ shell: "docker rmi -f localhost:5000/mine"
+
+- name: Get number of images in docker
+ shell: "docker images |wc -l"
+ register: docker_output
+
+- name: Check that there are no images in docker
+ assert:
+ that:
+ - "'1' in docker_output.stdout_lines"
+
+- name: Setup nginx with a user/password
+ copy:
+ src: docker-registry.htpasswd
+ dest: /etc/nginx/docker-registry.htpasswd
+
+- name: Setup nginx with a config file
+ copy:
+ src: nginx-docker-registry.conf
+ dest: /etc/nginx/conf.d/nginx-docker-registry.conf
+
+- name: Setup nginx docker cert
+ copy:
+ src: dockertest.ansible.com.crt
+ dest: /etc/pki/tls/certs/dockertest.ansible.com.crt
+
+- name: Setup nginx docker key
+ copy:
+ src: dockertest.ansible.com.key
+ dest: /etc/pki/tls/private/dockertest.ansible.com.key
+
+- name: Setup the ca keys
+ copy:
+ src: devdockerCA.crt
+ dest: /etc/pki/ca-trust/source/anchors/devdockerCA.crt
+
+- name: Update the ca bundle
+ command: update-ca-trust extract
+
+- name: Restart docker daemon
+ service:
+ name: docker
+ state: restarted
+
+- name: Start nginx
+ service:
+ name: nginx
+ state: restarted
+
+- name: Add domain name to hosts
+ lineinfile:
+ line: "127.0.0.1 dockertest.ansible.com"
+ dest: /etc/hosts
+ state: present
+
+- name: Start a container after getting it from a secured private registry
+ docker:
+ image: dockertest.ansible.com:8080/mine
+ registry: dockertest.ansible.com:8080
+ username: "testdocker"
+ password: "testdocker"
+ state: running
+ command: "nc -l -p 2000 -e xargs -n1 echo hello"
+ detach: True
+
+- name: Get the docker container id
+ shell: "docker ps | grep mine | awk '{ print $1 }'"
+ register: container_id
+
+- name: Get the docker container ip
+ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
+ register: container_ip
+
+- name: Try to access the server
+ shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
+ register: docker_output
+
+- name: check that the script ran
+ assert:
+ that:
+ - "'hello world' in docker_output.stdout_lines"
+
+- name: Remove the private repo image from the local docker
+ shell: "docker rmi -f dockertest.ansible.com:8080/mine"
+
+- name: Remove domain name to hosts
+ lineinfile:
+ line: "127.0.0.1 dockertest.ansible.com"
+ dest: /etc/hosts
+ state: absent
+
From c2fb0b8f9d0bccd27a08dfc7febe1d5533a1d8a4 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 13:40:19 -0700
Subject: [PATCH 0105/3617] Some debugging for why docker tests are failing in
jenkins
---
.../test_docker/tasks/registry-tests.yml | 22 ++++++++++---------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml
index 348062234ad07a..fea9bdabf70d5f 100644
--- a/test/integration/roles/test_docker/tasks/registry-tests.yml
+++ b/test/integration/roles/test_docker/tasks/registry-tests.yml
@@ -14,26 +14,27 @@
register: image_id
- name: Tag docker image into the local registry
- shell: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine"
+ command: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine"
- name: Push docker image into the private registry
- shell: "docker push localhost:5000/mine"
+ command: "docker push localhost:5000/mine"
- name: Remove the busybox image from the local docker
- shell: "docker rmi -f busybox"
+ command: "docker rmi -f busybox"
- name: Remove the new image from the local docker
- shell: "docker rmi -f localhost:5000/mine"
+ command: "docker rmi -f localhost:5000/mine"
- name: Get number of images in docker
- shell: "docker images |wc -l"
+ command: "docker images"
register: docker_output
+- debug: var=docker_output
# docker prints a header so the header should be all that's present
- name: Check that there are no images in docker
assert:
that:
- - "'1' in docker_output.stdout_lines"
+ - "{{ docker_output.stdout_lines| length }} <= 1 "
- name: Retrieve the image from private docker registry
docker:
@@ -69,16 +70,17 @@
- "'hello world' in docker_output.stdout_lines"
- name: Remove the new image from the local docker
- shell: "docker rmi -f localhost:5000/mine"
+ command: "docker rmi -f localhost:5000/mine"
- name: Get number of images in docker
- shell: "docker images |wc -l"
+ command: "docker images"
register: docker_output
+- debug: var=docker_output
- name: Check that there are no images in docker
assert:
that:
- - "'1' in docker_output.stdout_lines"
+ - "{{ docker_output.stdout_lines| length }} <= 1"
- name: Setup nginx with a user/password
copy:
@@ -152,7 +154,7 @@
- "'hello world' in docker_output.stdout_lines"
- name: Remove the private repo image from the local docker
- shell: "docker rmi -f dockertest.ansible.com:8080/mine"
+ command: "docker rmi -f dockertest.ansible.com:8080/mine"
- name: Remove domain name to hosts
lineinfile:
From 2a967879fb40a137405e00345b53fac78e4f7c80 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 14:05:27 -0700
Subject: [PATCH 0106/3617] Fix the removal of busybox image
---
test/integration/roles/test_docker/tasks/registry-tests.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml
index fea9bdabf70d5f..e8f0596171d4bf 100644
--- a/test/integration/roles/test_docker/tasks/registry-tests.yml
+++ b/test/integration/roles/test_docker/tasks/registry-tests.yml
@@ -20,7 +20,7 @@
command: "docker push localhost:5000/mine"
- name: Remove the busybox image from the local docker
- command: "docker rmi -f busybox"
+ command: "docker rmi -f {{ image_id.stdout_lines[0] }}"
- name: Remove the new image from the local docker
command: "docker rmi -f localhost:5000/mine"
From 3c52c36629bf74fb0e5225f6b98bf7d2d19dbe2e Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 17:57:29 -0700
Subject: [PATCH 0107/3617] Okay, let's see if these pauses are enough to get
this passing
---
.../roles/test_docker/tasks/docker-tests.yml | 14 ++++++--
.../test_docker/tasks/registry-tests.yml | 36 +++++++++++++------
2 files changed, 38 insertions(+), 12 deletions(-)
diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml
index 10067d7ad7a5f6..383b8eb3f657a6 100644
--- a/test/integration/roles/test_docker/tasks/docker-tests.yml
+++ b/test/integration/roles/test_docker/tasks/docker-tests.yml
@@ -8,6 +8,7 @@
image: busybox
state: present
pull: missing
+ docker_api_version: "1.14"
- name: Run a small script in busybox
docker:
@@ -16,6 +17,7 @@
pull: always
command: "nc -l -p 2000 -e xargs -n1 echo hello"
detach: True
+ docker_api_version: "1.14"
- name: Get the docker container id
shell: "docker ps | grep busybox | awk '{ print $1 }'"
@@ -25,6 +27,10 @@
shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
register: container_ip
+- name: Pause a few moments because docker is not reliable
+ pause:
+ seconds: 40
+
- name: Try to access the server
shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
register: docker_output
@@ -43,6 +49,7 @@
TEST: hello
command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"'
detach: True
+ docker_api_version: "1.14"
- name: Get the docker container id
shell: "docker ps | grep busybox | awk '{ print $1 }'"
@@ -52,6 +59,10 @@
shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
register: container_ip
+- name: Pause a few moments because docker is not reliable
+ pause:
+ seconds: 40
+
- name: Try to access the server
shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
register: docker_output
@@ -62,5 +73,4 @@
- "'hello world' in docker_output.stdout_lines"
- name: Remove the busybox image from the local docker
- shell: "docker rmi -f busybox"
-
+ shell: "docker rmi -f $(docker images -q)"
diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml
index e8f0596171d4bf..03d2fa0db73039 100644
--- a/test/integration/roles/test_docker/tasks/registry-tests.yml
+++ b/test/integration/roles/test_docker/tasks/registry-tests.yml
@@ -19,17 +19,16 @@
- name: Push docker image into the private registry
command: "docker push localhost:5000/mine"
-- name: Remove the busybox image from the local docker
- command: "docker rmi -f {{ image_id.stdout_lines[0] }}"
+- name: Remove containers
+ shell: "docker rm $(docker ps -aq)"
-- name: Remove the new image from the local docker
- command: "docker rmi -f localhost:5000/mine"
+- name: Remove all images from the local docker
+ shell: "docker rmi -f $(docker images -q)"
- name: Get number of images in docker
command: "docker images"
register: docker_output
-- debug: var=docker_output
# docker prints a header so the header should be all that's present
- name: Check that there are no images in docker
assert:
@@ -42,6 +41,7 @@
state: present
pull: missing
insecure_registry: True
+ docker_api_version: "1.14"
- name: Run a small script in the new image
docker:
@@ -51,6 +51,7 @@
command: "nc -l -p 2000 -e xargs -n1 echo hello"
detach: True
insecure_registry: True
+ docker_api_version: "1.14"
- name: Get the docker container id
shell: "docker ps | grep mine | awk '{ print $1 }'"
@@ -60,6 +61,10 @@
shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
register: container_ip
+- name: Pause a few moments because docker is not reliable
+ pause:
+ seconds: 40
+
- name: Try to access the server
shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
register: docker_output
@@ -69,14 +74,17 @@
that:
- "'hello world' in docker_output.stdout_lines"
-- name: Remove the new image from the local docker
- command: "docker rmi -f localhost:5000/mine"
+
+- name: Remove containers
+ shell: "docker rm $(docker ps -aq)"
+
+- name: Remove all images from the local docker
+ shell: "docker rmi -f $(docker images -q)"
- name: Get number of images in docker
command: "docker images"
register: docker_output
-- debug: var=docker_output
- name: Check that there are no images in docker
assert:
that:
@@ -135,6 +143,7 @@
state: running
command: "nc -l -p 2000 -e xargs -n1 echo hello"
detach: True
+ docker_api_version: "1.14"
- name: Get the docker container id
shell: "docker ps | grep mine | awk '{ print $1 }'"
@@ -144,6 +153,10 @@
shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
register: container_ip
+- name: Pause a few moments because docker is not reliable
+ pause:
+ seconds: 40
+
- name: Try to access the server
shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
register: docker_output
@@ -153,8 +166,11 @@
that:
- "'hello world' in docker_output.stdout_lines"
-- name: Remove the private repo image from the local docker
- command: "docker rmi -f dockertest.ansible.com:8080/mine"
+- name: Remove containers
+ shell: "docker rm $(docker ps -aq)"
+
+- name: Remove all images from the local docker
+ shell: "docker rmi -f $(docker images -q)"
- name: Remove domain name to hosts
lineinfile:
From 9dd5f8c758dd60af5fc8bb00e4961e1fa080b588 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 18:30:10 -0700
Subject: [PATCH 0108/3617] Update core module pointer
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index ae253593e3a0e3..e338fef730abf9 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit ae253593e3a0e3339a136bf57e0a54e62229e8e6
+Subproject commit e338fef730abf94b4b128a73433c166952c3add9
From 98db6a232d13c51763322737cb2d60831201da34 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 18:56:46 -0700
Subject: [PATCH 0109/3617] Have selinux allow docker<=>nginx communication
---
.../roles/test_docker/tasks/docker-tests.yml | 5 ++++-
.../roles/test_docker/tasks/registry-tests.yml | 14 ++++++++++++++
2 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml
index 383b8eb3f657a6..33ffe6c70ca4c3 100644
--- a/test/integration/roles/test_docker/tasks/docker-tests.yml
+++ b/test/integration/roles/test_docker/tasks/docker-tests.yml
@@ -72,5 +72,8 @@
that:
- "'hello world' in docker_output.stdout_lines"
-- name: Remove the busybox image from the local docker
+- name: Remove containers
+ shell: "docker rm $(docker ps -aq)"
+
+- name: Remove all images from the local docker
shell: "docker rmi -f $(docker images -q)"
diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml
index 03d2fa0db73039..57b4d252774176 100644
--- a/test/integration/roles/test_docker/tasks/registry-tests.yml
+++ b/test/integration/roles/test_docker/tasks/registry-tests.yml
@@ -90,6 +90,20 @@
that:
- "{{ docker_output.stdout_lines| length }} <= 1"
+#
+# Private registry secured with an SSL proxy
+#
+
+- name: Set selinux to allow docker to connect to nginx
+ seboolean:
+ name: docker_connect_any
+ state: yes
+
+- name: Set selinux to allow nginx to connect to docker
+ seboolean:
+ name: httpd_can_network_connect
+ state: yes
+
- name: Setup nginx with a user/password
copy:
src: docker-registry.htpasswd
From b8efd3f777f379e69180b377017dcc31bb708e1c Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 19:55:13 -0700
Subject: [PATCH 0110/3617] Update core module pointer
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index e338fef730abf9..76198a8223e279 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit e338fef730abf94b4b128a73433c166952c3add9
+Subproject commit 76198a8223e279bebb2aeccc452c26e66ad9b747
From 73f5a1fcddfca003a6e32741eb06f11ae29efa53 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 18 Mar 2015 20:25:53 -0700
Subject: [PATCH 0111/3617] Update the extras module pointer
---
lib/ansible/modules/extras | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index 696bc60caad2ea..cb848fcd9ec836 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit 696bc60caad2ea96c0a70c8091e24b2da060f35c
+Subproject commit cb848fcd9ec8364210fc05a5a7addd955b8a2529
From 85cfe1bd52b20cb0b35255ef37a7b2095bd3aec6 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 19 Mar 2015 11:17:16 -0400
Subject: [PATCH 0112/3617] added google addwords tag
---
docsite/_themes/srtd/layout.html | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html
index d073c4c22f8996..ce44c4284da0da 100644
--- a/docsite/_themes/srtd/layout.html
+++ b/docsite/_themes/srtd/layout.html
@@ -113,6 +113,24 @@
}
+
+
+
+
+
+
From 456f83962d2233cb0b367c5b5749b2b2c7e4455e Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 19 Mar 2015 14:31:00 -0400
Subject: [PATCH 0113/3617] ignore PE methods that are not sudo for checksums
until we get them working universally
---
lib/ansible/runner/__init__.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index 5c5554816179f8..8e326935b09dff 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -1243,7 +1243,13 @@ def _remote_checksum(self, conn, tmp, path, inject):
python_interp = 'python'
cmd = conn.shell.checksum(path, python_interp)
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True)
+
+ #TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods
+ if self.become_method == 'sudo':
+ sudoable = True
+ else:
+ sudoable = False
+ data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable)
data2 = utils.last_non_blank_line(data['stdout'])
try:
if data2 == '':
From ac1c49302dffb8b7d261df1c9199815a9590c480 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 19 Mar 2015 12:50:46 -0700
Subject: [PATCH 0114/3617] Update core modules pointer
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 76198a8223e279..fb1c92ffa4ff7f 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 76198a8223e279bebb2aeccc452c26e66ad9b747
+Subproject commit fb1c92ffa4ff7f6c82944806ca6da3d71b7af0d5
From d1641f292502d77a31594b0209fc88f25ca13772 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 19 Mar 2015 16:10:01 -0700
Subject: [PATCH 0115/3617] Remove the multiline string for arguments not.
---
docsite/rst/playbooks_intro.rst | 4 ----
1 file changed, 4 deletions(-)
diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst
index d0c702c071252d..4751467b016857 100644
--- a/docsite/rst/playbooks_intro.rst
+++ b/docsite/rst/playbooks_intro.rst
@@ -106,10 +106,6 @@ YAML dictionaries to supply the modules with their key=value arguments.::
name: httpd
state: restarted
-.. note::
-
- The above example using YAML dictionaries for module arguments can also be accomplished using the YAML multiline string syntax with the `>` character but this can lead to string quoting errors.
-
Below, we'll break down what the various features of the playbook language are.
.. _playbook_basics:
From 6264eb4e02eff67a1701b7e578073a5bf9adba68 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 19 Mar 2015 22:45:47 -0700
Subject: [PATCH 0116/3617] Pull in ec2_asg fixes from core modules
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index fb1c92ffa4ff7f..a78de5080109ee 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit fb1c92ffa4ff7f6c82944806ca6da3d71b7af0d5
+Subproject commit a78de5080109eeaf46d5e42f9bbeb4f02d510627
From 0c57bed728a90d20d8c5686a1cb83170dbf088e2 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 19 Mar 2015 17:18:23 -0400
Subject: [PATCH 0117/3617] now add_host loads hostvars
---
lib/ansible/runner/action_plugins/add_host.py | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/lib/ansible/runner/action_plugins/add_host.py b/lib/ansible/runner/action_plugins/add_host.py
index 0e49e928dbf95e..2fcea6cd5c7ab8 100644
--- a/lib/ansible/runner/action_plugins/add_host.py
+++ b/lib/ansible/runner/action_plugins/add_host.py
@@ -55,7 +55,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
if ":" in new_name:
new_name, new_port = new_name.split(":")
args['ansible_ssh_port'] = new_port
-
+
# redefine inventory and get group "all"
inventory = self.runner.inventory
allgroup = inventory.get_group('all')
@@ -72,10 +72,10 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
# Add any variables to the new_host
for k in args.keys():
if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
- new_host.set_variable(k, args[k])
-
-
- groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
+ new_host.set_variable(k, args[k])
+
+
+ groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
# add it to the group if that was specified
if groupnames:
for group_name in groupnames.split(","):
@@ -95,13 +95,17 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
vv("added host to group via add_host module: %s" % group_name)
result['new_groups'] = groupnames.split(",")
-
+
+
+ # actually load host vars
+ new_host.vars = inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password)
+
result['new_host'] = new_name
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
inventory.clear_pattern_cache()
-
+
return ReturnData(conn=conn, comm_ok=True, result=result)
From a53cf9d6fae511fb3a9444cca5c9afde5a1ea6ad Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 20 Mar 2015 11:22:07 -0400
Subject: [PATCH 0118/3617] now correctly aplies add_host passed variables last
to override existing vars.
---
lib/ansible/runner/action_plugins/add_host.py | 11 +++++------
test/integration/unicode.yml | 6 ++++++
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/lib/ansible/runner/action_plugins/add_host.py b/lib/ansible/runner/action_plugins/add_host.py
index 2fcea6cd5c7ab8..72172fcaec9991 100644
--- a/lib/ansible/runner/action_plugins/add_host.py
+++ b/lib/ansible/runner/action_plugins/add_host.py
@@ -69,12 +69,6 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
inventory._hosts_cache[new_name] = new_host
allgroup.add_host(new_host)
- # Add any variables to the new_host
- for k in args.keys():
- if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
- new_host.set_variable(k, args[k])
-
-
groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
# add it to the group if that was specified
if groupnames:
@@ -100,6 +94,11 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
# actually load host vars
new_host.vars = inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password)
+ # Add any passed variables to the new_host
+ for k in args.keys():
+ if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
+ new_host.set_variable(k, args[k])
+
result['new_host'] = new_name
# clear pattern caching completely since it's unpredictable what
diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml
index 2889155055d647..6dca7fe490b16e 100644
--- a/test/integration/unicode.yml
+++ b/test/integration/unicode.yml
@@ -42,6 +42,12 @@
debug: var=unicode_host_var
+- name: 'A play for hosts in group: ĪīĬĭ'
+ hosts: 'ĪīĬĭ'
+ gather_facts: false
+ tasks:
+ - debug: var=hostvars[inventory_hostname]
+
- name: 'A play for hosts in group: ĪīĬĭ'
hosts: 'ĪīĬĭ'
gather_facts: true
From c49685b753b63332e3f648795839d2067fa36205 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 20 Mar 2015 11:24:35 -0400
Subject: [PATCH 0119/3617] removed debug play from tests
---
test/integration/unicode.yml | 7 -------
1 file changed, 7 deletions(-)
diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml
index 6dca7fe490b16e..b04d760182c9ef 100644
--- a/test/integration/unicode.yml
+++ b/test/integration/unicode.yml
@@ -41,13 +41,6 @@
- name: 'A task with unicode host vars'
debug: var=unicode_host_var
-
-- name: 'A play for hosts in group: ĪīĬĭ'
- hosts: 'ĪīĬĭ'
- gather_facts: false
- tasks:
- - debug: var=hostvars[inventory_hostname]
-
- name: 'A play for hosts in group: ĪīĬĭ'
hosts: 'ĪīĬĭ'
gather_facts: true
From d4ebe7750204cb3d61449ad22fab6aef685e961e Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 20 Mar 2015 11:34:18 -0400
Subject: [PATCH 0120/3617] now use combine vars to preserve existing cached
host vars
---
lib/ansible/runner/action_plugins/add_host.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/runner/action_plugins/add_host.py b/lib/ansible/runner/action_plugins/add_host.py
index 72172fcaec9991..995b205b628553 100644
--- a/lib/ansible/runner/action_plugins/add_host.py
+++ b/lib/ansible/runner/action_plugins/add_host.py
@@ -20,7 +20,7 @@
from ansible.callbacks import vv
from ansible.errors import AnsibleError as ae
from ansible.runner.return_data import ReturnData
-from ansible.utils import parse_kv
+from ansible.utils import parse_kv, combine_vars
from ansible.inventory.host import Host
from ansible.inventory.group import Group
@@ -92,7 +92,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
# actually load host vars
- new_host.vars = inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password)
+ new_host.vars = combine_vars(new_host.vars, inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password))
# Add any passed variables to the new_host
for k in args.keys():
From 8a5f162e29f45ce427606706f7e3908ec4ca2bda Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?S=C3=A9bastien=20Gross?=
Date: Fri, 20 Mar 2015 16:45:54 +0100
Subject: [PATCH 0121/3617] [patch] fix "remote_src" behavior according patch
module documentation.
Patch documentation says "remote_src" us False by default. That was not
the case in the action plugin.
---
lib/ansible/runner/action_plugins/patch.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py
index dbba4c53dd7889..ebd0c6cf59454e 100644
--- a/lib/ansible/runner/action_plugins/patch.py
+++ b/lib/ansible/runner/action_plugins/patch.py
@@ -32,7 +32,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
src = options.get('src', None)
dest = options.get('dest', None)
- remote_src = utils.boolean(options.get('remote_src', 'yes'))
+ remote_src = utils.boolean(options.get('remote_src', 'no'))
if src is None:
result = dict(failed=True, msg="src is required")
From 6888f1ccd9a60d656b868317c9fa46e9524bd3f2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?S=C3=A9bastien=20Gross?=
Date: Fri, 20 Mar 2015 17:13:50 +0100
Subject: [PATCH 0122/3617] [patch] Use _make_tmp_path to prevent from copying
full patch file path.
---
lib/ansible/runner/action_plugins/patch.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py
index ebd0c6cf59454e..29d4f7eca5a190 100644
--- a/lib/ansible/runner/action_plugins/patch.py
+++ b/lib/ansible/runner/action_plugins/patch.py
@@ -47,7 +47,10 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **
else:
src = utils.path_dwim(self.runner.basedir, src)
- tmp_src = tmp + src
+ if tmp is None or "-tmp-" not in tmp:
+ tmp = self.runner._make_tmp_path(conn)
+
+ tmp_src = conn.shell.join_path(tmp, os.path.basename(src))
conn.put_file(src, tmp_src)
if self.runner.become and self.runner.become_user != 'root':
From d4eddabb2a04b61cf4f880b46b3642c4c9a4987d Mon Sep 17 00:00:00 2001
From: Eri Bastos
Date: Fri, 20 Mar 2015 14:40:44 -0300
Subject: [PATCH 0123/3617] Patch for bug #10485 - ansible_distribution fact
populates as 'RedHat' on Oracle Linux systems
---
lib/ansible/module_utils/facts.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 93fe68786d80cf..40be989241f6d2 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -87,7 +87,8 @@ class Facts(object):
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
- OSDIST_LIST = ( ('/etc/redhat-release', 'RedHat'),
+ OSDIST_LIST = ( ('/etc/oracle-release', 'Oracle Linux'),
+ ('/etc/redhat-release', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
('/etc/system-release', 'OtherLinux'),
@@ -287,6 +288,13 @@ def get_distribution_facts(self):
# Once we determine the value is one of these distros
# we trust the values are always correct
break
+ elif name == 'Oracle Linux':
+ data = get_file_content(path)
+ if 'Oracle Linux' in data:
+ self.facts['distribution'] = name
+ else:
+ self.facts['distribution'] = data.split()[0]
+ break
elif name == 'RedHat':
data = get_file_content(path)
if 'Red Hat' in data:
From b186676e381dedc7c38b0488cd586db4711880c7 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 20 Mar 2015 11:30:57 -0700
Subject: [PATCH 0124/3617] Clean up jsonify and make json_dict_*to* more
flexible at the same time.
---
v2/ansible/module_utils/basic.py | 35 ++++++++++++++++++--------------
1 file changed, 20 insertions(+), 15 deletions(-)
diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py
index 8c424663ff9df4..6c7217bd8838f6 100644
--- a/v2/ansible/module_utils/basic.py
+++ b/v2/ansible/module_utils/basic.py
@@ -65,6 +65,7 @@
import platform
import errno
import tempfile
+from itertools import imap, repeat
try:
import json
@@ -234,7 +235,7 @@ def load_platform_subclass(cls, *args, **kwargs):
return super(cls, subclass).__new__(subclass)
-def json_dict_unicode_to_bytes(d):
+def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -242,17 +243,17 @@ def json_dict_unicode_to_bytes(d):
'''
if isinstance(d, unicode):
- return d.encode('utf-8')
+ return d.encode(encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
+ return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_unicode_to_bytes, d))
+ return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_unicode_to_bytes, d))
+ return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
-def json_dict_bytes_to_unicode(d):
+def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -260,13 +261,13 @@ def json_dict_bytes_to_unicode(d):
'''
if isinstance(d, str):
- return unicode(d, 'utf-8')
+ return unicode(d, encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
+ return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_bytes_to_unicode, d))
+ return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_bytes_to_unicode, d))
+ return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
@@ -1189,13 +1190,17 @@ def boolean(self, arg):
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
- for encoding in ("utf-8", "latin-1", "unicode_escape"):
+ for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
- # Old systems using simplejson module does not support encoding keyword.
- except TypeError, e:
- return json.dumps(data)
- except UnicodeDecodeError, e:
+ # Old systems using old simplejson module does not support encoding keyword.
+ except TypeError:
+ try:
+ new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
+ except UnicodeDecodeError:
+ continue
+ return json.dumps(new_data)
+ except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
From 8d8c4c061572478cd09e0e071fa2711ee3bbb5db Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 20 Mar 2015 11:39:58 -0700
Subject: [PATCH 0125/3617] Update modules for asg tag fix
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index a78de5080109ee..4ce57ee1217344 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit a78de5080109eeaf46d5e42f9bbeb4f02d510627
+Subproject commit 4ce57ee12173449179fc52a82849888488c9b72f
From 393246fdd3ebd75eaa23de0f84efe71bfec5c305 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Fri, 20 Mar 2015 14:13:51 -0500
Subject: [PATCH 0126/3617] Make v2 playbook class attributes inheritable
Also fixing some other become-related things
---
v2/ansible/executor/connection_info.py | 16 +++----
v2/ansible/playbook/base.py | 27 ++++++++---
v2/ansible/playbook/become.py | 38 +++++++++++++++
v2/ansible/playbook/block.py | 46 ++++++++++++++-----
v2/ansible/playbook/helpers.py | 2 +
v2/ansible/playbook/play.py | 1 +
v2/ansible/playbook/role/__init__.py | 19 ++++----
v2/ansible/playbook/role/definition.py | 8 +++-
v2/ansible/playbook/task.py | 32 ++++++++-----
v2/samples/roles/test_become_r1/meta/main.yml | 1 +
.../roles/test_become_r1/tasks/main.yml | 2 +
v2/samples/roles/test_become_r2/meta/main.yml | 3 ++
.../roles/test_become_r2/tasks/main.yml | 2 +
v2/samples/test_become.yml | 6 +++
14 files changed, 152 insertions(+), 51 deletions(-)
create mode 100644 v2/samples/roles/test_become_r1/meta/main.yml
create mode 100644 v2/samples/roles/test_become_r1/tasks/main.yml
create mode 100644 v2/samples/roles/test_become_r2/meta/main.yml
create mode 100644 v2/samples/roles/test_become_r2/tasks/main.yml
diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py
index 26a14a23f9d1d4..165cd1245fb44c 100644
--- a/v2/ansible/executor/connection_info.py
+++ b/v2/ansible/executor/connection_info.py
@@ -157,13 +157,10 @@ def set_task_override(self, task):
new_info.copy(self)
for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'):
- attr_val = None
if hasattr(task, attr):
attr_val = getattr(task, attr)
- if task._block and hasattr(task._block, attr) and not attr_val:
- attr_val = getattr(task._block, attr)
- if attr_val:
- setattr(new_info, attr, attr_val)
+ if attr_val:
+ setattr(new_info, attr, attr_val)
return new_info
@@ -184,6 +181,7 @@ def make_become_cmd(self, cmd, executable, become_settings=None):
executable = executable or '$SHELL'
+ success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
if self.become:
if self.become_method == 'sudo':
# Rather than detect if sudo wants a password this time, -k makes sudo always ask for
@@ -195,23 +193,23 @@ def make_become_cmd(self, cmd, executable, become_settings=None):
exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd)))
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd)
elif self.become_method == 'su':
exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS)
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd)))
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
elif self.become_method == 'pbrun':
exe = become_settings.get('pbrun_exe', 'pbrun')
flags = become_settings.get('pbrun_flags', '')
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key, cmd)))
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, success_cmd)
elif self.become_method == 'pfexec':
exe = become_settings.get('pfexec_exe', 'pbrun')
flags = become_settings.get('pfexec_flags', '')
# No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key, cmd)))
+ becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
else:
raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index 949e6a09fdc652..e32da5d8c5a90c 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -72,11 +72,20 @@ def _get_base_attributes(self):
def munge(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
+ def _get_base_classes_munge(target_class):
+ base_classes = list(target_class.__bases__[:])
+ for base_class in target_class.__bases__:
+ base_classes.extend( _get_base_classes_munge(base_class))
+ return base_classes
+
+ base_classes = list(self.__class__.__bases__[:])
for base_class in self.__class__.__bases__:
- method = getattr(self, ("_munge_%s" % base_class.__name__).lower(), None)
- if method:
- ds = method(ds)
+ base_classes.extend(_get_base_classes_munge(base_class))
+ for base_class in base_classes:
+ method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None)
+ if method:
+ return method(ds)
return ds
def load_data(self, ds, variable_manager=None, loader=None):
@@ -271,15 +280,21 @@ def __getattr__(self, needle):
# optionally allowing masking by accessors
if not needle.startswith("_"):
- method = "get_%s" % needle
- if method in self.__dict__:
- return method(self)
+ method = "_get_attr_%s" % needle
+ if method in dir(self):
+ return getattr(self, method)()
if needle in self._attributes:
return self._attributes[needle]
raise AttributeError("attribute not found in %s: %s" % (self.__class__.__name__, needle))
+ def __setattr__(self, needle, value):
+ if hasattr(self, '_attributes') and needle in self._attributes:
+ self._attributes[needle] = value
+ else:
+ super(Base, self).__setattr__(needle, value)
+
def __getstate__(self):
return self.serialize()
diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py
index 0b0ad10176002e..67eb52b15eeedd 100644
--- a/v2/ansible/playbook/become.py
+++ b/v2/ansible/playbook/become.py
@@ -95,3 +95,41 @@ def _munge_become(self, ds):
ds['become_user'] = C.DEFAULT_BECOME_USER
return ds
+
+ def _get_attr_become(self):
+ '''
+ Override for the 'become' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become')
+ else:
+ return self._attributes['become']
+
+ def _get_attr_become_method(self):
+ '''
+ Override for the 'become_method' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become_method')
+ else:
+ return self._attributes['become_method']
+
+ def _get_attr_become_user(self):
+ '''
+ Override for the 'become_user' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become_user')
+ else:
+ return self._attributes['become_user']
+
+ def _get_attr_become_password(self):
+ '''
+ Override for the 'become_password' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become_password')
+ else:
+ return self._attributes['become_password']
+
+
diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py
index fa67b6ae1b99d8..2946e83f5ef394 100644
--- a/v2/ansible/playbook/block.py
+++ b/v2/ansible/playbook/block.py
@@ -131,23 +131,24 @@ def _load_always(self, attr, ds):
# use_handlers=self._use_handlers,
# )
- def compile(self):
- '''
- Returns the task list for this object
- '''
-
- task_list = []
- for task in self.block:
- # FIXME: evaulate task tags/conditionals here
- task_list.extend(task.compile())
-
- return task_list
-
def copy(self):
+ def _dupe_task_list(task_list, new_block):
+ new_task_list = []
+ for task in task_list:
+ new_task = task.copy(exclude_block=True)
+ new_task._block = new_block
+ new_task_list.append(new_task)
+ return new_task_list
+
new_me = super(Block, self).copy()
new_me._use_handlers = self._use_handlers
new_me._dep_chain = self._dep_chain[:]
+ new_me.block = _dupe_task_list(self.block or [], new_me)
+ new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
+ new_me.always = _dupe_task_list(self.always or [], new_me)
+ print("new block tasks are: %s" % new_me.block)
+
new_me._parent_block = None
if self._parent_block:
new_me._parent_block = self._parent_block.copy()
@@ -252,3 +253,24 @@ def set_loader(self, loader):
for dep in self._dep_chain:
dep.set_loader(loader)
+ def _get_parent_attribute(self, attr):
+ '''
+ Generic logic to get the attribute or parent attribute for a block value.
+ '''
+
+ value = self._attributes[attr]
+ if not value:
+ if self._parent_block:
+ value = getattr(self._block, attr)
+ elif self._role:
+ value = getattr(self._role, attr)
+ if not value and len(self._dep_chain):
+ reverse_dep_chain = self._dep_chain[:]
+ reverse_dep_chain.reverse()
+ for dep in reverse_dep_chain:
+ value = getattr(dep, attr)
+ if value:
+ break
+
+ return value
+
diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py
index 0e147205578406..3ea559d7997b5d 100644
--- a/v2/ansible/playbook/helpers.py
+++ b/v2/ansible/playbook/helpers.py
@@ -37,6 +37,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use
assert type(ds) in (list, NoneType)
block_list = []
+ print("in load list of blocks, ds is: %s" % ds)
if ds:
for block in ds:
b = Block.load(
@@ -50,6 +51,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use
)
block_list.append(b)
+ print("-> returning block list: %s" % block_list)
return block_list
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index cbe4e038617a82..190189aa178afe 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -219,6 +219,7 @@ def compile(self):
block_list.extend(self.tasks)
block_list.extend(self.post_tasks)
+ print("block list is: %s" % block_list)
return block_list
def get_vars(self):
diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py
index dfb1f70addf036..21bcd21803e423 100644
--- a/v2/ansible/playbook/role/__init__.py
+++ b/v2/ansible/playbook/role/__init__.py
@@ -30,6 +30,7 @@
from ansible.parsing import DataLoader
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
+from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
from ansible.playbook.role.include import RoleInclude
@@ -69,7 +70,7 @@ def hash_params(params):
ROLE_CACHE = dict()
-class Role(Base, Conditional, Taggable):
+class Role(Base, Become, Conditional, Taggable):
def __init__(self):
self._role_name = None
@@ -136,6 +137,12 @@ def _load_role_data(self, role_include, parent_role=None):
if parent_role:
self.add_parent(parent_role)
+ # copy over all field attributes, except for when and tags, which
+ # are special cases and need to preserve pre-existing values
+ for (attr_name, _) in iteritems(self._get_base_attributes()):
+ if attr_name not in ('when', 'tags'):
+ setattr(self, attr_name, getattr(role_include, attr_name))
+
current_when = getattr(self, 'when')[:]
current_when.extend(role_include.when)
setattr(self, 'when', current_when)
@@ -144,10 +151,6 @@ def _load_role_data(self, role_include, parent_role=None):
current_tags.extend(role_include.tags)
setattr(self, 'tags', current_tags)
- # save the current base directory for the loader and set it to the current role path
- #cur_basedir = self._loader.get_basedir()
- #self._loader.set_basedir(self._role_path)
-
# load the role's files, if they exist
library = os.path.join(self._role_path, 'library')
if os.path.isdir(library):
@@ -179,9 +182,6 @@ def _load_role_data(self, role_include, parent_role=None):
elif self._default_vars is None:
self._default_vars = dict()
- # and finally restore the previous base directory
- #self._loader.set_basedir(cur_basedir)
-
def _load_role_yaml(self, subdir):
file_path = os.path.join(self._role_path, subdir)
if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
@@ -313,9 +313,6 @@ def compile(self, dep_chain=[]):
for dep in deps:
dep_blocks = dep.compile(dep_chain=new_dep_chain)
for dep_block in dep_blocks:
- # since we're modifying the task, and need it to be unique,
- # we make a copy of it here and assign the dependency chain
- # to the copy, then append the copy to the task list.
new_dep_block = dep_block.copy()
new_dep_block._dep_chain = new_dep_chain
block_list.append(new_dep_block)
diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py
index d52c6795fb92d4..bc1a0daacf2ae7 100644
--- a/v2/ansible/playbook/role/definition.py
+++ b/v2/ansible/playbook/role/definition.py
@@ -28,6 +28,7 @@
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
+from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.utils.path import unfrackpath
@@ -36,7 +37,7 @@
__all__ = ['RoleDefinition']
-class RoleDefinition(Base, Conditional, Taggable):
+class RoleDefinition(Base, Become, Conditional, Taggable):
_role = FieldAttribute(isa='string')
@@ -57,6 +58,9 @@ def munge(self, ds):
assert isinstance(ds, dict) or isinstance(ds, string_types)
+ if isinstance(ds, dict):
+ ds = super(RoleDefinition, self).munge(ds)
+
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
# can preserve file:line:column information if it exists
@@ -88,7 +92,7 @@ def munge(self, ds):
self._ds = ds
# and return the cleaned-up data structure
- return super(RoleDefinition, self).munge(new_ds)
+ return new_ds
def _load_role_name(self, ds):
'''
diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py
index 79ec2df3401ad5..ab66898242cb5a 100644
--- a/v2/ansible/playbook/task.py
+++ b/v2/ansible/playbook/task.py
@@ -210,20 +210,21 @@ def get_vars(self):
del all_vars['when']
return all_vars
- def compile(self):
- '''
- For tasks, this is just a dummy method returning an array
- with 'self' in it, so we don't have to care about task types
- further up the chain.
- '''
-
- return [self]
-
- def copy(self):
+ # no longer used, as blocks are the lowest level of compilation now
+ #def compile(self):
+ # '''
+ # For tasks, this is just a dummy method returning an array
+ # with 'self' in it, so we don't have to care about task types
+ # further up the chain.
+ # '''
+ #
+ # return [self]
+
+ def copy(self, exclude_block=False):
new_me = super(Task, self).copy()
new_me._block = None
- if self._block:
+ if self._block and not exclude_block:
new_me._block = self._block.copy()
new_me._role = None
@@ -309,3 +310,12 @@ def set_loader(self, loader):
if self._task_include:
self._task_include.set_loader(loader)
+ def _get_parent_attribute(self, attr):
+ '''
+ Generic logic to get the attribute or parent attribute for a task value.
+ '''
+ value = self._attributes[attr]
+ if not value and self._block:
+ value = getattr(self._block, attr)
+ return value
+
diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/v2/samples/roles/test_become_r1/meta/main.yml
new file mode 100644
index 00000000000000..603a2d53a2507f
--- /dev/null
+++ b/v2/samples/roles/test_become_r1/meta/main.yml
@@ -0,0 +1 @@
+allow_duplicates: yes
diff --git a/v2/samples/roles/test_become_r1/tasks/main.yml b/v2/samples/roles/test_become_r1/tasks/main.yml
new file mode 100644
index 00000000000000..9231d0af98a26d
--- /dev/null
+++ b/v2/samples/roles/test_become_r1/tasks/main.yml
@@ -0,0 +1,2 @@
+- debug: msg="this is test_become_r1"
+- command: whoami
diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/v2/samples/roles/test_become_r2/meta/main.yml
new file mode 100644
index 00000000000000..9304df73a0db9b
--- /dev/null
+++ b/v2/samples/roles/test_become_r2/meta/main.yml
@@ -0,0 +1,3 @@
+allow_duplicates: yes
+dependencies:
+ - test_become_r1
diff --git a/v2/samples/roles/test_become_r2/tasks/main.yml b/v2/samples/roles/test_become_r2/tasks/main.yml
new file mode 100644
index 00000000000000..01d6d313852a54
--- /dev/null
+++ b/v2/samples/roles/test_become_r2/tasks/main.yml
@@ -0,0 +1,2 @@
+- debug: msg="this is test_become_r2"
+- command: whoami
diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml
index 4b02563ca79257..eb527e595958d2 100644
--- a/v2/samples/test_become.yml
+++ b/v2/samples/test_become.yml
@@ -1,8 +1,14 @@
- hosts: all
gather_facts: no
+ roles:
+ - { role: test_become_r2 }
+ - { role: test_become_r2, sudo_user: testing }
tasks:
+ - command: whoami
- command: whoami
become_user: testing
+ - block:
+ - command: whoami
- block:
- command: whoami
become_user: testing
From 94909bd4a2ce31d13378980b126953dcf38f555a Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 13 Mar 2015 11:43:02 -0400
Subject: [PATCH 0127/3617] Added return values documentation to modules
---
docsite/rst/common_return_values.rst | 47 ++++++++++++++++++++++++++++
hacking/module_formatter.py | 1 +
hacking/templates/rst.j2 | 19 +++++++++--
3 files changed, 65 insertions(+), 2 deletions(-)
create mode 100644 docsite/rst/common_return_values.rst
diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst
new file mode 100644
index 00000000000000..ebee58c1c25900
--- /dev/null
+++ b/docsite/rst/common_return_values.rst
@@ -0,0 +1,47 @@
+Common Return Values
+====================
+
+.. contents:: Topics
+
+Ansible modules normally return a data structure that can be registered into a variable,
+or seen directly when using the `ansible` program as output.
+
+.. _facts:
+
+Facts
+`````
+
+Some modules return 'facts' to ansible (i.e setup), this is done through a 'ansible_facts' key and anything inside
+will automatically be available for the current host directly as a variable and there is no need to
+register this data.
+
+
+.. _status:
+
+Status
+``````
+
+Every module must return a status, saying if the module was successful, if anything changed or not. Ansible itself
+will return a status if it skips the module due to a user condition (when: ) or running in check mode when the module
+does not support it.
+
+
+.. _other:
+
+Other common returns
+````````````````````
+
+It is common on failure or success to return a 'msg' that either explains the failure or makes a note about the execution.
+Some modules, specifically those that execute shell or commands directly, will return stdout and stderr, if ansible sees
+a stdout in the results it will append a stdout_lines which is just a list or the lines in stdout.
+
+.. seealso::
+
+ :doc:`modules`
+ Learn about available modules
+ `GitHub modules directory `_
+ Browse source of core modules
+ `Mailing List `_
+ Development mailing list
+ `irc.freenode.net `_
+ #ansible IRC chat channel
diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py
index 1bc83ad93049a2..6d595c634d6ba4 100755
--- a/hacking/module_formatter.py
+++ b/hacking/module_formatter.py
@@ -289,6 +289,7 @@ def process_module(module, options, env, template, outputname, module_map, alias
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
+ doc['returndocs'] = returndocs
# here is where we build the table of contents...
diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2
index e5562d3e56b136..122cebb590e71f 100644
--- a/hacking/templates/rst.j2
+++ b/hacking/templates/rst.j2
@@ -106,6 +106,21 @@ Examples
{% endif %}
{% endif %}
+
+{% if returndocs %}
+Return Values
+-------------
+
+Common return values are documented here ::doc::`common_return_values`, the following are the fields unique to this module:
+
+.. raw:: html
+
+@{ returndocs }@
+
+
+::
+{% endif %}
+
{% if notes %}
{% for note in notes %}
.. note:: @{ note | convert_symbols_to_format }@
@@ -120,7 +135,7 @@ This is a Core Module
---------------------
This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo.
-
+
If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
@@ -135,7 +150,7 @@ This is an Extras Module
------------------------
This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo.
-
+
If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
From 690d227034354a8f6cc286de029344a70cfb9830 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 13 Mar 2015 11:45:22 -0400
Subject: [PATCH 0128/3617] extended return value explanation
---
docsite/rst/common_return_values.rst | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst
index ebee58c1c25900..38a6917233989b 100644
--- a/docsite/rst/common_return_values.rst
+++ b/docsite/rst/common_return_values.rst
@@ -3,8 +3,9 @@ Common Return Values
.. contents:: Topics
-Ansible modules normally return a data structure that can be registered into a variable,
-or seen directly when using the `ansible` program as output.
+Ansible modules normally return a data structure that can be registered into a variable, or seen directly when using
+the `ansible` program as output. Here we document the values common to all modules, each module can optionally document
+it's own unique returns. If these docs exist they will be visible through ansible-doc and https://docs.ansible.com.
.. _facts:
From 2cacac4b23c6979daf8e037738d152afac78899d Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 13 Mar 2015 12:17:15 -0400
Subject: [PATCH 0129/3617] minor adjustments to formatting
---
hacking/templates/rst.j2 | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2
index 122cebb590e71f..6d3c21f4240804 100644
--- a/hacking/templates/rst.j2
+++ b/hacking/templates/rst.j2
@@ -114,11 +114,15 @@ Return Values
Common return values are documented here ::doc::`common_return_values`, the following are the fields unique to this module:
.. raw:: html
-
-@{ returndocs }@
-
+
+
+
+ @{ returndocs }@
+
+
::
+
{% endif %}
{% if notes %}
From 64b447f01bf5338195627eff2fec4e62257f6f02 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 13 Mar 2015 12:22:55 -0400
Subject: [PATCH 0130/3617] grammer correction
---
docsite/rst/common_return_values.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst
index 38a6917233989b..ff2b92b4af0a92 100644
--- a/docsite/rst/common_return_values.rst
+++ b/docsite/rst/common_return_values.rst
@@ -5,7 +5,7 @@ Common Return Values
Ansible modules normally return a data structure that can be registered into a variable, or seen directly when using
the `ansible` program as output. Here we document the values common to all modules, each module can optionally document
-it's own unique returns. If these docs exist they will be visible through ansible-doc and https://docs.ansible.com.
+its own unique returns. If these docs exist they will be visible through ansible-doc and https://docs.ansible.com.
.. _facts:
From c3076b84788f78a075764e4d9e8fb28fef5db60c Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 20 Mar 2015 16:54:22 -0400
Subject: [PATCH 0131/3617] added module returnval documentation to web docs
---
hacking/module_formatter.py | 5 +++-
hacking/templates/rst.j2 | 53 ++++++++++++++++++++++++++++++++-----
2 files changed, 50 insertions(+), 8 deletions(-)
diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py
index 6d595c634d6ba4..c3aca94949c2bf 100755
--- a/hacking/module_formatter.py
+++ b/hacking/module_formatter.py
@@ -289,7 +289,10 @@ def process_module(module, options, env, template, outputname, module_map, alias
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
- doc['returndocs'] = returndocs
+ if returndocs:
+ doc['returndocs'] = yaml.safe_load(returndocs)
+ else:
+ doc['returndocs'] = None
# here is where we build the table of contents...
diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2
index 6d3c21f4240804..6873c3fea5855d 100644
--- a/hacking/templates/rst.j2
+++ b/hacking/templates/rst.j2
@@ -111,18 +111,57 @@ Examples
Return Values
-------------
-Common return values are documented here ::doc::`common_return_values`, the following are the fields unique to this module:
+Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
.. raw:: html
-
-
- @{ returndocs }@
-
-
+
+
+
name
+
despcription
+
returned
+
type
+
sample
+
-::
+ {% for entry in returndocs %}
+
+
@{ entry }@
+
@{ returndocs[entry].description }@
+
@{ returndocs[entry].returned }@
+
@{ returndocs[entry].type }@
+
@{ returndocs[entry].sample}@
+
+ {% if returndocs[entry].type == 'dictionary' %}
+
contains:
+
+
+
+
name
+
despcription
+
returned
+
type
+
sample
+
+
+ {% for sub in returndocs[entry].contains %}
+
+
@{ sub }@
+
@{ returndocs[entry].contains[sub].description }@
+
@{ returndocs[entry].contains[sub].returned }@
+
@{ returndocs[entry].contains[sub].type }@
+
@{ returndocs[entry].contains[sub].sample}@
+
+ {% endfor %}
+
+
+
+ {% endif %}
+ {% endfor %}
+
+
+
{% endif %}
{% if notes %}
From 72586d0df5fd0c7b51a0be193622f0653d7c7e1e Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 20 Mar 2015 17:27:00 -0400
Subject: [PATCH 0132/3617] updated to latest core/devel
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 4ce57ee1217344..7683f36613ec09 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 4ce57ee12173449179fc52a82849888488c9b72f
+Subproject commit 7683f36613ec0904618b9b2d07f215b3f028a4e0
From c7c8425856f55d7b2e54b179ef9b27a5a3efb98c Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 20 Mar 2015 23:12:16 -0400
Subject: [PATCH 0133/3617] fixed command line PE options to be the same as in
1.9
---
v2/ansible/utils/cli.py | 48 ++++++++++++++++++++---------------------
1 file changed, 23 insertions(+), 25 deletions(-)
diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py
index f846d6f73ca336..6ef416b9745fa1 100644
--- a/v2/ansible/utils/cli.py
+++ b/v2/ansible/utils/cli.py
@@ -55,12 +55,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
help='ask for SSH password')
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password')
- parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
- help='ask for su password')
- parser.add_option('--ask-become-pass', default=False, dest='ask_become_pass', action='store_true',
- help='ask for privlege escalation password')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
@@ -86,29 +80,33 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
help='log output to this directory')
if runas_opts:
- parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true",
- dest='become', help="run operations with become (nopasswd implied)")
- parser.add_option('-B', '--become-user', help='run operations with as this '
- 'user (default=%s)' % C.DEFAULT_BECOME_USER)
- parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true",
- dest='sudo', help="run operations with sudo (nopasswd)")
+ # priv user defaults to root later on to enable detecting when this option was given here
+ parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password (deprecated, use become)')
+ parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
+ help='ask for su password (deprecated, use become)')
+ parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
+ help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
- help='desired sudo user (default=root)') # Can't default to root because we need to detect when this option was given
- parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER,
- dest='remote_user', help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
+ help='desired sudo user (default=root) (deprecated, use become)')
+ parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
+ help='run operations with su (deprecated, use become)')
+ parser.add_option('-R', '--su-user', default=None,
+ help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
+
+ # consolidated privilege escalation (become)
+ parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
+ help="run operations with become (nopasswd implied)")
+ parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
+ help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
+ parser.add_option('--become-user', default=None, dest='become_user', type='string',
+ help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
+ parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
+ help='ask for privilege escalation password')
- parser.add_option('-S', '--su', default=C.DEFAULT_SU,
- action='store_true', help='run operations with su')
- parser.add_option('-R', '--su-user', help='run operations with su as this '
- 'user (default=%s)' % C.DEFAULT_SU_USER)
if connect_opts:
- parser.add_option('-c', '--connection', dest='connection',
- default=C.DEFAULT_TRANSPORT,
- help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
- parser.add_option('--become-method', dest='become_method',
- default=C.DEFAULT_BECOME_METHOD,
- help="privlege escalation method to use (default=%s)" % C.DEFAULT_BECOME_METHOD)
+ parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
From cf96c7719e4974f69cd4691ecfe21ba5cda29c55 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 20 Mar 2015 23:48:52 -0400
Subject: [PATCH 0134/3617] added become_method list and pipeline support to
connection class methods
added generic method to check supported become methods for the connection plugin
---
v2/ansible/plugins/connections/__init__.py | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py
index aad19b77643de5..11015d7431338d 100644
--- a/v2/ansible/plugins/connections/__init__.py
+++ b/v2/ansible/plugins/connections/__init__.py
@@ -34,8 +34,18 @@ class ConnectionBase:
A base class for connections to contain common code.
'''
+ has_pipelining = False
+ become_methods = C.BECOME_METHODS
+
def __init__(self, connection_info, *args, **kwargs):
self._connection_info = connection_info
- self._has_pipelining = False
self._display = Display(connection_info)
+
+ def _become_method_supported(self, become_method):
+ ''' Checks if the current class supports this privilege escalation method '''
+
+ if become_method in self.__class__.become_methods:
+ return True
+
+ raise errors.AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method)
From 93c9803818d6fe46ece22c6019f0af932f405a42 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Fri, 20 Mar 2015 21:43:41 -0500
Subject: [PATCH 0135/3617] Removing some leftover debug prints and cleaning up
test sample
---
v2/ansible/playbook/block.py | 1 -
v2/ansible/playbook/helpers.py | 2 --
v2/ansible/playbook/play.py | 1 -
v2/samples/roles/test_become_r1/meta/main.yml | 2 +-
v2/samples/roles/test_become_r1/tasks/main.yml | 1 -
v2/samples/roles/test_become_r2/meta/main.yml | 2 +-
v2/samples/roles/test_become_r2/tasks/main.yml | 1 -
v2/samples/test_become.yml | 4 ----
8 files changed, 2 insertions(+), 12 deletions(-)
diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py
index 2946e83f5ef394..03957bfe2f6691 100644
--- a/v2/ansible/playbook/block.py
+++ b/v2/ansible/playbook/block.py
@@ -147,7 +147,6 @@ def _dupe_task_list(task_list, new_block):
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
- print("new block tasks are: %s" % new_me.block)
new_me._parent_block = None
if self._parent_block:
diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py
index 3ea559d7997b5d..0e147205578406 100644
--- a/v2/ansible/playbook/helpers.py
+++ b/v2/ansible/playbook/helpers.py
@@ -37,7 +37,6 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use
assert type(ds) in (list, NoneType)
block_list = []
- print("in load list of blocks, ds is: %s" % ds)
if ds:
for block in ds:
b = Block.load(
@@ -51,7 +50,6 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use
)
block_list.append(b)
- print("-> returning block list: %s" % block_list)
return block_list
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index 190189aa178afe..cbe4e038617a82 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -219,7 +219,6 @@ def compile(self):
block_list.extend(self.tasks)
block_list.extend(self.post_tasks)
- print("block list is: %s" % block_list)
return block_list
def get_vars(self):
diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/v2/samples/roles/test_become_r1/meta/main.yml
index 603a2d53a2507f..cb58e2857bc3a3 100644
--- a/v2/samples/roles/test_become_r1/meta/main.yml
+++ b/v2/samples/roles/test_become_r1/meta/main.yml
@@ -1 +1 @@
-allow_duplicates: yes
+#allow_duplicates: yes
diff --git a/v2/samples/roles/test_become_r1/tasks/main.yml b/v2/samples/roles/test_become_r1/tasks/main.yml
index 9231d0af98a26d..ef8d396978e611 100644
--- a/v2/samples/roles/test_become_r1/tasks/main.yml
+++ b/v2/samples/roles/test_become_r1/tasks/main.yml
@@ -1,2 +1 @@
-- debug: msg="this is test_become_r1"
- command: whoami
diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/v2/samples/roles/test_become_r2/meta/main.yml
index 9304df73a0db9b..55b258adb4d336 100644
--- a/v2/samples/roles/test_become_r2/meta/main.yml
+++ b/v2/samples/roles/test_become_r2/meta/main.yml
@@ -1,3 +1,3 @@
-allow_duplicates: yes
+#allow_duplicates: yes
dependencies:
- test_become_r1
diff --git a/v2/samples/roles/test_become_r2/tasks/main.yml b/v2/samples/roles/test_become_r2/tasks/main.yml
index 01d6d313852a54..ef8d396978e611 100644
--- a/v2/samples/roles/test_become_r2/tasks/main.yml
+++ b/v2/samples/roles/test_become_r2/tasks/main.yml
@@ -1,2 +1 @@
-- debug: msg="this is test_become_r2"
- command: whoami
diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml
index eb527e595958d2..b7550f33c778fe 100644
--- a/v2/samples/test_become.yml
+++ b/v2/samples/test_become.yml
@@ -1,14 +1,10 @@
- hosts: all
gather_facts: no
roles:
- - { role: test_become_r2 }
- { role: test_become_r2, sudo_user: testing }
tasks:
- - command: whoami
- command: whoami
become_user: testing
- - block:
- - command: whoami
- block:
- command: whoami
become_user: testing
From b370728439b17de1265f6c9227f151dec803bc75 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 21 Mar 2015 00:35:56 -0400
Subject: [PATCH 0136/3617] several fixes to cli tools
- fixed issue with previous commit with bad constants vs C ref on become
- added list-tags
- rearranged common options to utils/cli.py
- added generic validate for both vault and become conflicts
- removed dupes and conflicting options
---
v2/ansible/utils/cli.py | 64 ++++++++++++++++++++++++++++++-----------
v2/bin/ansible | 23 ++++-----------
v2/bin/ansible-playbook | 25 ++++++----------
3 files changed, 63 insertions(+), 49 deletions(-)
diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py
index 6ef416b9745fa1..3b899e49c56f7a 100644
--- a/v2/ansible/utils/cli.py
+++ b/v2/ansible/utils/cli.py
@@ -38,7 +38,7 @@ def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
-def base_parser(usage="", output_opts=False, runas_opts=False,
+def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
@@ -52,7 +52,7 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
default=C.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
- help='ask for SSH password')
+ help='ask for connection password')
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
@@ -64,14 +64,16 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
default=None)
+ parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
+ help="set additional variables as key=value or YAML/JSON", default=[])
if subset_opts:
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
-
- parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int',
- dest='timeout',
- help="override the SSH timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
+ parser.add_option('-t', '--tags', dest='tags', default='all',
+ help="only run plays and tasks tagged with these values")
+ parser.add_option('--skip-tags', dest='skip_tags',
+ help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
@@ -85,28 +87,32 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
help='ask for sudo password (deprecated, use become)')
parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
- parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
+ parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
- parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
+ parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
parser.add_option('-R', '--su-user', default=None,
- help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
+ help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
- parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
+ parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (nopasswd implied)")
- parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
- help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
+ parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string',
+ help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
- help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
+ help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if connect_opts:
- parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
+ parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
+ help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
+ parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
+ help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
+
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
@@ -117,14 +123,20 @@ def base_parser(usage="", output_opts=False, runas_opts=False,
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
- help="don't make any changes; instead, try to predict some of the changes that may occur"
- )
+ help="don't make any changes; instead, try to predict some of the changes that may occur")
+ parser.add_option('--syntax-check', dest='syntax', action='store_true',
+ help="perform a syntax check on the playbook, but do not execute it")
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
+ if meta_opts:
+ parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
+ help="run handlers even if a task fails")
+ parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
+ help="clear the fact cache")
return parser
@@ -219,3 +231,23 @@ def _gitinfo():
f.close()
return result
+def validate_conflicts(parser, options):
+
+ # Check for vault related conflicts
+ if (options.ask_vault_pass and options.vault_password_file):
+ parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+
+
+ # Check for privilege escalation conflicts
+ if (options.su or options.su_user or options.ask_su_pass) and \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
+ (options.su or options.su_user or options.ask_su_pass) and \
+ (options.become or options.become_user or options.become_ask_pass) or \
+ (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
+ (options.become or options.become_user or options.become_ask_pass):
+
+ parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
+ "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
+ "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
+ " are exclusive of each other")
+
diff --git a/v2/bin/ansible b/v2/bin/ansible
index c51040c6a844d5..1e298623f52848 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -29,7 +29,7 @@ from ansible.inventory import Inventory
from ansible.parsing import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
-from ansible.utils.cli import base_parser
+from ansible.utils.cli import base_parser, validate_conflicts
from ansible.vars import VariableManager
########################################################
@@ -45,15 +45,14 @@ class Cli(object):
parser = base_parser(
usage='%prog [options]',
- runas_opts=True,
- subset_opts=True,
+ runas_opts=True,
async_opts=True,
- output_opts=True,
- connect_opts=True,
+ output_opts=True,
+ connect_opts=True,
check_opts=True,
- diff_opts=False,
)
+ # options unique to ansible ad-hoc
parser.add_option('-a', '--args', dest='module_args',
help="module arguments", default=C.DEFAULT_MODULE_ARGS)
parser.add_option('-m', '--module-name', dest='module_name',
@@ -66,15 +65,7 @@ class Cli(object):
parser.print_help()
sys.exit(1)
- # su and sudo command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass):
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') are "
- "mutually exclusive")
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+ validate_conflicts(parser,options)
return (options, args)
@@ -113,8 +104,6 @@ class Cli(object):
variable_manager = VariableManager()
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory)
- if options.subset:
- inventory.subset(options.subset)
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0:
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index bdd9598ec82174..26bbe14c7acb56 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -12,7 +12,7 @@ from ansible.parsing import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook import Playbook
from ansible.playbook.task import Task
-from ansible.utils.cli import base_parser
+from ansible.utils.cli import base_parser, validate_conflicts
from ansible.utils.unicode import to_unicode
from ansible.utils.vars import combine_vars
from ansible.utils.vault import read_vault_file
@@ -30,31 +30,22 @@ def main(args):
parser = base_parser(
usage = "%prog playbook.yml",
connect_opts=True,
+ meta_opts=True,
runas_opts=True,
subset_opts=True,
check_opts=True,
- diff_opts=True
+ diff_opts=True,
)
- parser.add_option('--vault-password', dest="vault_password",
- help="password for vault encrypted files")
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-t', '--tags', dest='tags', default='all',
- help="only run plays and tasks tagged with these values")
- parser.add_option('--skip-tags', dest='skip_tags',
- help="only run plays and tasks whose tags do not match these values")
- parser.add_option('--syntax-check', dest='syntax', action='store_true',
- help="perform a syntax check on the playbook, but do not execute it")
+
+ # ansible playbook specific opts
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at',
help="start the playbook at the task matching this name")
- parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
- help="run handlers even if a task fails")
- parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
- help="clear the fact cache")
+ parser.add_option('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
options, args = parser.parse_args(args)
@@ -62,6 +53,8 @@ def main(args):
parser.print_help(file=sys.stderr)
return 1
+ validate_conflicts(parser,options)
+
vault_pass = None
if options.ask_vault_pass:
# FIXME: prompt here
From 9d3a63945d7ca11a024409b20f010d48b157605d Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 21 Mar 2015 00:48:38 -0400
Subject: [PATCH 0137/3617] moved pipeline check to class var that was
previouslly added
---
v2/ansible/plugins/action/__init__.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py
index d430bd748beb1f..e56003021588bf 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/v2/ansible/plugins/action/__init__.py
@@ -130,10 +130,10 @@ def _late_needs_tmp_path(self, tmp, module_style):
if tmp and "tmp" in tmp:
# tmp has already been created
return False
- if not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become:
+ if not self._connection.__class__.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become:
# tmp is necessary to store module source code
return True
- if not self._connection._has_pipelining:
+ if not self._connection.__class__.has_pipelining:
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
@@ -380,7 +380,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_
# FIXME: all of the old-module style and async stuff has been removed from here, and
# might need to be re-added (unless we decide to drop support for old-style modules
# at this point and rework things to support non-python modules specifically)
- if self._connection._has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
+ if self._connection.__class__.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
in_data = module_data
else:
if remote_module_path:
From edb1bd25ddb9b63eb9a8c8d3224277489d13de4f Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 21 Mar 2015 01:19:07 -0400
Subject: [PATCH 0138/3617] added password prompting and become/sudo/su
collapsing
---
v2/ansible/utils/cli.py | 47 +++++++++++++++++++++++++++++++++++++++++
v2/bin/ansible | 15 +++++++------
v2/bin/ansible-playbook | 14 +++++++-----
3 files changed, 65 insertions(+), 11 deletions(-)
diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py
index 3b899e49c56f7a..09f5ef4a30f9de 100644
--- a/v2/ansible/utils/cli.py
+++ b/v2/ansible/utils/cli.py
@@ -24,9 +24,11 @@
import os
import time
import yaml
+import getpass
from ansible import __version__
from ansible import constants as C
+from ansible.utils.unicode import to_bytes
# FIXME: documentation for methods here, which have mostly been
# copied directly over from the old utils/__init__.py
@@ -231,6 +233,51 @@ def _gitinfo():
f.close()
return result
+
+def ask_passwords(options):
+ sshpass = None
+ becomepass = None
+ vaultpass = None
+ become_prompt = ''
+
+ if options.ask_pass:
+ sshpass = getpass.getpass(prompt="SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % options.become_method.upper()
+ if sshpass:
+ sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
+ else:
+ become_prompt = "%s password: " % options.become_method.upper()
+
+ if options.become_ask_pass:
+ becomepass = getpass.getpass(prompt=become_prompt)
+ if options.ask_pass and becomepass == '':
+ becomepass = sshpass
+ if becomepass:
+ becomepass = to_bytes(becomepass)
+
+ if options.ask_vault_pass:
+ vaultpass = getpass.getpass(prompt="Vault password: ")
+ if vaultpass:
+ vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
+
+ return (sshpass, becomepass, vaultpass)
+
+
+def normalize_become_options(options):
+ ''' this keeps backwards compatibility with sudo/su options '''
+ options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
+ options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
+
+ if options.become:
+ pass
+ elif options.sudo:
+ options.become = True
+ options.become_method = 'sudo'
+ elif options.su:
+ options.become = True
+ options.become_method = 'su'
+
+
def validate_conflicts(parser, options):
# Check for vault related conflicts
diff --git a/v2/bin/ansible b/v2/bin/ansible
index 1e298623f52848..74ee46121aa90c 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -29,7 +29,7 @@ from ansible.inventory import Inventory
from ansible.parsing import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
-from ansible.utils.cli import base_parser, validate_conflicts
+from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
from ansible.vars import VariableManager
########################################################
@@ -79,11 +79,14 @@ class Cli(object):
#-------------------------------------------------------------------------------
# FIXME: the password asking stuff needs to be ported over still
#-------------------------------------------------------------------------------
- #sshpass = None
- #sudopass = None
- #su_pass = None
- #vault_pass = None
- #
+ sshpass = None
+ becomepass = None
+ vault_pass = None
+
+ normalize_become_options(options)
+ (sshpass, becomepass, vault_pass) = ask_passwords(options)
+
+
#options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
## Never ask for an SSH password when we run with local connection
#if options.connection == "local":
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 26bbe14c7acb56..f1b590958b343b 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -12,7 +12,7 @@ from ansible.parsing import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook import Playbook
from ansible.playbook.task import Task
-from ansible.utils.cli import base_parser, validate_conflicts
+from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
from ansible.utils.unicode import to_unicode
from ansible.utils.vars import combine_vars
from ansible.utils.vault import read_vault_file
@@ -55,11 +55,15 @@ def main(args):
validate_conflicts(parser,options)
+ # Manage passwords
+ sshpass = None
+ becomepass = None
vault_pass = None
- if options.ask_vault_pass:
- # FIXME: prompt here
- pass
- elif options.vault_password_file:
+
+ normalize_become_options(options)
+ (sshpass, becomepass, vault_pass) = ask_passwords(options)
+
+ if options.vault_password_file:
# read vault_pass from a file
vault_pass = read_vault_file(options.vault_password_file)
From 10e14d0e0ab54746f6c4599dacbfb806629f6cc8 Mon Sep 17 00:00:00 2001
From: Henry Todd
Date: Sat, 21 Mar 2015 13:21:55 +0800
Subject: [PATCH 0139/3617] Update add_host example in AWS Guide
The add_host module now uses "groups" instead of "groupname" to allow for specifying more than one group.
---
docsite/rst/guide_aws.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst
index 7cfffc218db9f3..97eb0904fe2f98 100644
--- a/docsite/rst/guide_aws.rst
+++ b/docsite/rst/guide_aws.rst
@@ -107,7 +107,7 @@ From this, we'll use the add_host module to dynamically create a host group cons
register: ec2
- name: Add all instance public IPs to host group
- add_host: hostname={{ item.public_ip }} groupname=ec2hosts
+ add_host: hostname={{ item.public_ip }} groups=ec2hosts
with_items: ec2.instances
With the host group now created, a second play at the bottom of the the same provisioning playbook file might now have some configuration steps::
From 08896e2cfdd6bcf338724f8214309a9422bbcfe4 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 21 Mar 2015 01:23:28 -0400
Subject: [PATCH 0140/3617] enabled vault password file and fixed prompting for
connection password on local
---
v2/bin/ansible | 26 ++++++++------------------
1 file changed, 8 insertions(+), 18 deletions(-)
diff --git a/v2/bin/ansible b/v2/bin/ansible
index 74ee46121aa90c..f8478b32c227f0 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -30,6 +30,7 @@ from ansible.parsing import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
+from ansible.utils.vault import read_vault_file
from ansible.vars import VariableManager
########################################################
@@ -76,9 +77,9 @@ class Cli(object):
pattern = args[0]
- #-------------------------------------------------------------------------------
- # FIXME: the password asking stuff needs to be ported over still
- #-------------------------------------------------------------------------------
+ if options.connection == "local":
+ options.ask_pass = False
+
sshpass = None
becomepass = None
vault_pass = None
@@ -86,23 +87,12 @@ class Cli(object):
normalize_become_options(options)
(sshpass, becomepass, vault_pass) = ask_passwords(options)
-
- #options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
- ## Never ask for an SSH password when we run with local connection
- #if options.connection == "local":
- # options.ask_pass = False
- #options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS
- #options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS
- #options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
- #
- #(sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass)
- #
+ if options.vault_password_file:
# read vault_pass from a file
- #if not options.ask_vault_pass and options.vault_password_file:
- # vault_pass = utils.read_vault_file(options.vault_password_file)
- #-------------------------------------------------------------------------------
+ vault_pass = read_vault_file(options.vault_password_file)
+
- # FIXME: needs vault password, after the above is fixed
+ # FIXME: needs vault password
loader = DataLoader()
variable_manager = VariableManager()
From ca540ef9f831e20bb1f9054fad889dd063954c23 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 21 Mar 2015 01:33:10 -0400
Subject: [PATCH 0141/3617] added vault password to dataloder creation
---
v2/bin/ansible | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/v2/bin/ansible b/v2/bin/ansible
index f8478b32c227f0..8eb5c97a6f5568 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -91,9 +91,7 @@ class Cli(object):
# read vault_pass from a file
vault_pass = read_vault_file(options.vault_password_file)
-
- # FIXME: needs vault password
- loader = DataLoader()
+ loader = DataLoader(vault_password=vault_pass)
variable_manager = VariableManager()
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory)
From ec8118ec413ed4fc27d6f95874ece5022df335e7 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 21 Mar 2015 02:02:59 -0400
Subject: [PATCH 0142/3617] now ansible ignores tempate errors on passwords
they could be caused by random character combinations, fixes #10468
---
lib/ansible/runner/__init__.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
index 8e326935b09dff..4565b90a04d798 100644
--- a/lib/ansible/runner/__init__.py
+++ b/lib/ansible/runner/__init__.py
@@ -934,8 +934,12 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port,
# user/pass may still contain variables at this stage
actual_user = template.template(self.basedir, actual_user, inject)
- actual_pass = template.template(self.basedir, actual_pass, inject)
- self.become_pass = template.template(self.basedir, self.become_pass, inject)
+ try:
+ actual_pass = template.template(self.basedir, actual_pass, inject)
+ self.become_pass = template.template(self.basedir, self.become_pass, inject)
+ except:
+ # ignore password template errors, could be triggered by password charaters #10468
+ pass
# make actual_user available as __magic__ ansible_ssh_user variable
inject['ansible_ssh_user'] = actual_user
From 9a680472f8d90ba87cbae917b6ab1f0d0cf67ffb Mon Sep 17 00:00:00 2001
From: Tim Rupp
Date: Sat, 21 Mar 2015 19:22:12 -0700
Subject: [PATCH 0143/3617] Fixes a brief spelling error
Fixes a simple spelling mistake that was bugging me when I read the online
docs. Trying to make the docs as great as possible.
---
docsite/rst/faq.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst
index e7b21456afd5c5..1b499c547406bb 100644
--- a/docsite/rst/faq.rst
+++ b/docsite/rst/faq.rst
@@ -5,7 +5,7 @@ Here are some commonly-asked questions and their answers.
.. _users_and_ports:
-If you are looking to set environment varialbes remotely for your project (in a task, not locally for Ansible)
+If you are looking to set environment variables remotely for your project (in a task, not locally for Ansible)
The keyword is simply `environment`
```
From c5d5ed17ea2c5c1e6f81f2a4a87f196051b7a44d Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sun, 22 Mar 2015 02:05:27 -0400
Subject: [PATCH 0144/3617] added tag resolution mirroring updated v1
---
v2/ansible/playbook/taggable.py | 59 +++++++++++++++++++++++----------
1 file changed, 42 insertions(+), 17 deletions(-)
diff --git a/v2/ansible/playbook/taggable.py b/v2/ansible/playbook/taggable.py
index e83f1d7ae50c2a..ce1bdfcf8a7ff3 100644
--- a/v2/ansible/playbook/taggable.py
+++ b/v2/ansible/playbook/taggable.py
@@ -24,6 +24,8 @@
from ansible.template import Templar
class Taggable:
+
+ untagged = set(['untagged'])
_tags = FieldAttribute(isa='list', default=[])
def __init__(self):
@@ -38,22 +40,45 @@ def _load_tags(self, attr, ds):
raise AnsibleError('tags must be specified as a list', obj=ds)
def evaluate_tags(self, only_tags, skip_tags, all_vars):
- templar = Templar(loader=self._loader, variables=all_vars)
- tags = templar.template(self.tags)
- if not isinstance(tags, list):
- tags = set([tags])
- else:
- tags = set(tags)
-
- #print("%s tags are: %s, only_tags=%s, skip_tags=%s" % (self, my_tags, only_tags, skip_tags))
- if skip_tags:
- skipped_tags = tags.intersection(skip_tags)
- if len(skipped_tags) > 0:
- return False
- matched_tags = tags.intersection(only_tags)
- #print("matched tags are: %s" % matched_tags)
- if len(matched_tags) > 0 or 'all' in only_tags:
- return True
+ ''' this checks if the current item should be executed depending on tag options '''
+
+ should_run = True
+
+ if self.tags:
+ templar = Templar(loader=self._loader, variables=all_vars)
+ tags = templar.template(self.tags)
+
+ if not isinstance(tags, list):
+ if tags.find(',') != -1:
+ tags = set(tags.split(','))
+ else:
+ tags = set([tags])
+ else:
+ tags = set(tags)
else:
- return False
+ # this makes intersection work for untagged
+ tags = self.__class__.untagged
+
+ if only_tags:
+
+ should_run = False
+
+ if 'always' in tags or 'all' in only_tags:
+ should_run = True
+ elif tags.intersection(only_tags):
+ should_run = True
+ elif 'tagged' in only_tags and tags != self.__class__.untagged:
+ should_run = True
+
+ if should_run and skip_tags:
+
+ # Check for tags that we need to skip
+ if 'all' in skip_tags:
+ if 'always' not in tags or 'always' in skip_tags:
+ should_run = False
+ elif tags.intersection(skip_tags):
+ should_run = False
+ elif 'tagged' in skip_tags and tags != self.__class__.untagged:
+ should_run = False
+ return should_run
From bda83fdf84068bcd3720f5c6d82c21a7d5e66594 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Sun, 22 Mar 2015 19:17:04 -0500
Subject: [PATCH 0145/3617] Fixing bug in v2 dynamic include code, pointed out
by apollo13
---
v2/ansible/plugins/strategies/linear.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py
index c6b9445b2e673b..b503d6ebd51022 100644
--- a/v2/ansible/plugins/strategies/linear.py
+++ b/v2/ansible/plugins/strategies/linear.py
@@ -236,7 +236,7 @@ def __repr__(self):
for include_result in include_results:
original_task = iterator.get_original_task(res._host, res._task)
if original_task and original_task._role:
- include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file)
+ include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
else:
include_file = self._loader.path_dwim(res._task.args.get('_raw_params'))
From 5942144868f503dbc3b4652fdf4281db1cb7197a Mon Sep 17 00:00:00 2001
From: Pierre-Louis Bonicoli
Date: Mon, 23 Mar 2015 01:25:18 +0100
Subject: [PATCH 0146/3617] Port #10258 to v2
---
v2/ansible/module_utils/basic.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py
index 6c7217bd8838f6..79a0fab67b6e4c 100644
--- a/v2/ansible/module_utils/basic.py
+++ b/v2/ansible/module_utils/basic.py
@@ -1376,7 +1376,7 @@ def atomic_move(self, src, dest):
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
- os.chmod(dest, 0666 ^ umask)
+ os.chmod(dest, 0666 & ~umask)
if switched_user:
os.chown(dest, os.getuid(), os.getgid())
From 317728f64955f0d38da014fd7e48cba97883b646 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Mon, 23 Mar 2015 09:20:27 -0500
Subject: [PATCH 0147/3617] Allow ansible-galaxy to install symlinks
---
bin/ansible-galaxy | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy
index f281bf97ae896b..a6d625671ec548 100755
--- a/bin/ansible-galaxy
+++ b/bin/ansible-galaxy
@@ -556,7 +556,7 @@ def install_role(role_name, role_version, role_filename, options):
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop the leading directory, as mentioned above
- if member.isreg():
+ if member.isreg() or member.issym():
parts = member.name.split("/")[1:]
final_parts = []
for part in parts:
From 095990b4d8dcd93e65b188fb9ffeb37b1d3b09e5 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 23 Mar 2015 15:19:13 -0500
Subject: [PATCH 0148/3617] Moving from getattr to properties for the v2 base
class
---
v2/ansible/playbook/base.py | 54 ++++++++++++++++++++-----------------
1 file changed, 29 insertions(+), 25 deletions(-)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index e32da5d8c5a90c..c33dde858fe7ed 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -21,6 +21,7 @@
import uuid
+from functools import partial
from inspect import getmembers
from io import FileIO
@@ -50,11 +51,24 @@ def __init__(self):
# every object gets a random uuid:
self._uuid = uuid.uuid4()
- # each class knows attributes set upon it, see Task.py for example
- self._attributes = dict()
+ # and initialize the base attributes
+ self._initialize_base_attributes()
+
+ @staticmethod
+ def _generic_g(key, self):
+ method = "_get_attr_%s" % key
+ if method in dir(self):
+ return getattr(self, method)()
+
+ return self._attributes[key]
- for (name, value) in iteritems(self._get_base_attributes()):
- self._attributes[name] = value.default
+ @staticmethod
+ def _generic_s(key, self, value):
+ self._attributes[key] = value
+
+ @staticmethod
+ def _generic_d(key, self):
+ del self._attributes[key]
def _get_base_attributes(self):
'''
@@ -69,6 +83,17 @@ def _get_base_attributes(self):
base_attributes[name] = value
return base_attributes
+ def _initialize_base_attributes(self):
+ # each class knows attributes set upon it, see Task.py for example
+ self._attributes = dict()
+
+ for (name, value) in self._get_base_attributes().items():
+ getter = partial(self._generic_g, name)
+ setter = partial(self._generic_s, name)
+ deleter = partial(self._generic_d, name)
+ setattr(Base, name, property(getter, setter, deleter))
+ setattr(self, name, value.default)
+
def munge(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
@@ -274,27 +299,6 @@ def deserialize(self, data):
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
- def __getattr__(self, needle):
-
- # return any attribute names as if they were real
- # optionally allowing masking by accessors
-
- if not needle.startswith("_"):
- method = "_get_attr_%s" % needle
- if method in dir(self):
- return getattr(self, method)()
-
- if needle in self._attributes:
- return self._attributes[needle]
-
- raise AttributeError("attribute not found in %s: %s" % (self.__class__.__name__, needle))
-
- def __setattr__(self, needle, value):
- if hasattr(self, '_attributes') and needle in self._attributes:
- self._attributes[needle] = value
- else:
- super(Base, self).__setattr__(needle, value)
-
def __getstate__(self):
return self.serialize()
From 79cf7e72927bfd61d5bdc6e4630317d18d539c9e Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Mon, 23 Mar 2015 15:20:24 -0500
Subject: [PATCH 0149/3617] Modifying sample for test_become to show more test
cases
---
v2/samples/roles/test_become_r1/meta/main.yml | 2 +-
v2/samples/roles/test_become_r2/meta/main.yml | 2 +-
v2/samples/test_become.yml | 5 +++++
3 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/v2/samples/roles/test_become_r1/meta/main.yml
index cb58e2857bc3a3..603a2d53a2507f 100644
--- a/v2/samples/roles/test_become_r1/meta/main.yml
+++ b/v2/samples/roles/test_become_r1/meta/main.yml
@@ -1 +1 @@
-#allow_duplicates: yes
+allow_duplicates: yes
diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/v2/samples/roles/test_become_r2/meta/main.yml
index 55b258adb4d336..9304df73a0db9b 100644
--- a/v2/samples/roles/test_become_r2/meta/main.yml
+++ b/v2/samples/roles/test_become_r2/meta/main.yml
@@ -1,3 +1,3 @@
-#allow_duplicates: yes
+allow_duplicates: yes
dependencies:
- test_become_r1
diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml
index b7550f33c778fe..3dd318c89961a3 100644
--- a/v2/samples/test_become.yml
+++ b/v2/samples/test_become.yml
@@ -1,10 +1,15 @@
- hosts: all
gather_facts: no
+ remote_user: root
roles:
+ - { role: test_become_r2 }
- { role: test_become_r2, sudo_user: testing }
tasks:
+ - command: whoami
- command: whoami
become_user: testing
+ - block:
+ - command: whoami
- block:
- command: whoami
become_user: testing
From 577cdcadb35cc4eee73626262984275fd81e8dda Mon Sep 17 00:00:00 2001
From: Cristian Ciupitu
Date: Mon, 23 Mar 2015 22:45:23 +0200
Subject: [PATCH 0150/3617] Doc: use literal code blocks for YAML examples
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Without this, the straight double quotes (") are displayed as curved
quotes (“ and ”).
---
docsite/rst/YAMLSyntax.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst
index 424db0ad46600b..d3eb843523173b 100644
--- a/docsite/rst/YAMLSyntax.rst
+++ b/docsite/rst/YAMLSyntax.rst
@@ -85,11 +85,11 @@ That's all you really need to know about YAML to start writing
Gotchas
-------
-While YAML is generally friendly, the following is going to result in a YAML syntax error:
+While YAML is generally friendly, the following is going to result in a YAML syntax error::
foo: somebody said I should put a colon here: so I did
-You will want to quote any hash values using colons, like so:
+You will want to quote any hash values using colons, like so::
foo: "somebody said I should put a colon here: so I did"
From fdf51e9a967a0d488e89d60c6409c86fb8b41513 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 23 Mar 2015 16:14:34 -0700
Subject: [PATCH 0151/3617] Use class.mro() instead of custom base_class finder
code
---
v2/ansible/playbook/base.py | 12 +-----------
1 file changed, 1 insertion(+), 11 deletions(-)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index c33dde858fe7ed..2a42441309a55d 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -97,17 +97,7 @@ def _initialize_base_attributes(self):
def munge(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
- def _get_base_classes_munge(target_class):
- base_classes = list(target_class.__bases__[:])
- for base_class in target_class.__bases__:
- base_classes.extend( _get_base_classes_munge(base_class))
- return base_classes
-
- base_classes = list(self.__class__.__bases__[:])
- for base_class in self.__class__.__bases__:
- base_classes.extend(_get_base_classes_munge(base_class))
-
- for base_class in base_classes:
+ for base_class in self.__class__.mro():
method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
From 63c54035de58d68dde422351be137fc5361677e7 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 23 Mar 2015 16:38:51 -0700
Subject: [PATCH 0152/3617] Get rid of iteritems usage when we only care about
the keys
---
v2/ansible/playbook/base.py | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index 2a42441309a55d..4ab2347dc97a8f 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -97,6 +97,9 @@ def _initialize_base_attributes(self):
def munge(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
+ ### FIXME: Can't find any classes with methods named
+ # _munge_base_class.__name__ so maybe Base.munge should be reduced down
+ # to return ds
for base_class in self.__class__.mro():
method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None)
if method:
@@ -132,7 +135,7 @@ def load_data(self, ds, variable_manager=None, loader=None):
# FIXME: we currently don't do anything with private attributes but
# may later decide to filter them out of 'ds' here.
- for (name, attribute) in iteritems(self._get_base_attributes()):
+ for name in self._get_base_attributes():
# copy the value over unless a _load_field method is defined
if name in ds:
method = getattr(self, '_load_%s' % name, None)
@@ -151,7 +154,7 @@ def load_data(self, ds, variable_manager=None, loader=None):
return self
def get_ds(self):
- try:
+ try:
return getattr(self, '_ds')
except AttributeError:
return None
@@ -168,7 +171,7 @@ def _validate_attributes(self, ds):
not map to attributes for this object.
'''
- valid_attrs = [name for (name, attribute) in iteritems(self._get_base_attributes())]
+ valid_attrs = frozenset(name for name in self._get_base_attributes())
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
@@ -191,7 +194,7 @@ def copy(self):
new_me = self.__class__()
- for (name, attribute) in iteritems(self._get_base_attributes()):
+ for name in self._get_base_attributes():
setattr(new_me, name, getattr(self, name))
new_me._loader = self._loader
@@ -223,7 +226,7 @@ def post_validate(self, all_vars=dict(), fail_on_undefined=True):
try:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
-
+
# run the post-validator if present
method = getattr(self, '_post_validate_%s' % name, None)
if method:
@@ -262,7 +265,7 @@ def serialize(self):
repr = dict()
- for (name, attribute) in iteritems(self._get_base_attributes()):
+ for name in self._get_base_attributes():
repr[name] = getattr(self, name)
# serialize the uuid field
From 6ba24e9fa1c73120440f52878cc148b17552a206 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 23 Mar 2015 17:41:02 -0700
Subject: [PATCH 0153/3617] Remove comment on changing Base.munge => it's used
by become.py
---
v2/ansible/playbook/base.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index 4ab2347dc97a8f..4ac815552a51a3 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -97,9 +97,6 @@ def _initialize_base_attributes(self):
def munge(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
- ### FIXME: Can't find any classes with methods named
- # _munge_base_class.__name__ so maybe Base.munge should be reduced down
- # to return ds
for base_class in self.__class__.mro():
method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None)
if method:
From bc69ad81479fe687163421a0e1d905b5780110b5 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 23 Mar 2015 18:42:28 -0700
Subject: [PATCH 0154/3617] Rename munge methods to preprocess_data.
Remove the call to preprocess_loop data from playbook_include as
includes can't be used with loops.
---
v2/ansible/playbook/base.py | 12 ++++++------
v2/ansible/playbook/become.py | 8 +++++++-
v2/ansible/playbook/block.py | 8 ++++----
v2/ansible/playbook/play.py | 4 ++--
v2/ansible/playbook/playbook_include.py | 13 ++++++-------
v2/ansible/playbook/role/definition.py | 4 ++--
v2/ansible/playbook/role/requirement.py | 4 ++--
v2/ansible/playbook/task.py | 8 ++++----
8 files changed, 33 insertions(+), 28 deletions(-)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index 4ac815552a51a3..5aff5348ee7ab7 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -94,11 +94,11 @@ def _initialize_base_attributes(self):
setattr(Base, name, property(getter, setter, deleter))
setattr(self, name, value.default)
- def munge(self, ds):
+ def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
for base_class in self.__class__.mro():
- method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None)
+ method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
return ds
@@ -121,10 +121,10 @@ def load_data(self, ds, variable_manager=None, loader=None):
if isinstance(ds, string_types) or isinstance(ds, FileIO):
ds = self._loader.load(ds)
- # call the munge() function to massage the data into something
- # we can more easily parse, and then call the validation function
- # on it to ensure there are no incorrect key values
- ds = self.munge(ds)
+ # call the preprocess_data() function to massage the data into
+ # something we can more easily parse, and then call the validation
+ # function on it to ensure there are no incorrect key values
+ ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class.
diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py
index 67eb52b15eeedd..291cff2b716570 100644
--- a/v2/ansible/playbook/become.py
+++ b/v2/ansible/playbook/become.py
@@ -51,7 +51,13 @@ def _detect_privilege_escalation_conflict(self, ds):
elif has_sudo and has_su:
raise errors.AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
- def _munge_become(self, ds):
+ def _preprocess_data_become(self, ds):
+ """Preprocess the playbook data for become attributes
+
+ This is called from the Base object's preprocess_data() method which
+ in turn is called pretty much anytime any sort of playbook object
+ (plays, tasks, blocks, etc) are created.
+ """
self._detect_privilege_escalation_conflict(ds)
diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py
index 03957bfe2f6691..6506345172aae3 100644
--- a/v2/ansible/playbook/block.py
+++ b/v2/ansible/playbook/block.py
@@ -66,7 +66,7 @@ def load(data, parent_block=None, role=None, task_include=None, use_handlers=Fal
b = Block(parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
- def munge(self, ds):
+ def preprocess_data(self, ds):
'''
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
@@ -80,11 +80,11 @@ def munge(self, ds):
if not is_block:
if isinstance(ds, list):
- return super(Block, self).munge(dict(block=ds))
+ return super(Block, self).preprocess_data(dict(block=ds))
else:
- return super(Block, self).munge(dict(block=[ds]))
+ return super(Block, self).preprocess_data(dict(block=[ds]))
- return super(Block, self).munge(ds)
+ return super(Block, self).preprocess_data(ds)
def _load_block(self, attr, ds):
return load_list_of_tasks(
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index cbe4e038617a82..a96e6e1ecaa0b7 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -102,7 +102,7 @@ def load(data, variable_manager=None, loader=None):
p = Play()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
- def munge(self, ds):
+ def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
@@ -121,7 +121,7 @@ def munge(self, ds):
ds['remote_user'] = ds['user']
del ds['user']
- return super(Play, self).munge(ds)
+ return super(Play, self).preprocess_data(ds)
def _load_vars(self, attr, ds):
'''
diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py
index e1d7f6be34f24b..f7eae230f7c0e8 100644
--- a/v2/ansible/playbook/playbook_include.py
+++ b/v2/ansible/playbook/playbook_include.py
@@ -48,7 +48,8 @@ def load_data(self, ds, basedir, variable_manager=None, loader=None):
from ansible.playbook import Playbook
# first, we use the original parent method to correctly load the object
- # via the munge/load_data system we normally use for other playbook objects
+ # via the load_data/preprocess_data system we normally use for other
+ # playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
# then we use the object to load a Playbook
@@ -67,7 +68,7 @@ def load_data(self, ds, basedir, variable_manager=None, loader=None):
return pb
- def munge(self, ds):
+ def preprocess_data(self, ds):
'''
Regorganizes the data for a PlaybookInclude datastructure to line
up with what we expect the proper attributes to be
@@ -83,9 +84,7 @@ def munge(self, ds):
for (k,v) in ds.iteritems():
if k == 'include':
- self._munge_include(ds, new_ds, k, v)
- elif k.replace("with_", "") in lookup_loader:
- self._munge_loop(ds, new_ds, k, v)
+ self._preprocess_include(ds, new_ds, k, v)
else:
# some basic error checking, to make sure vars are properly
# formatted and do not conflict with k=v parameters
@@ -98,9 +97,9 @@ def munge(self, ds):
raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds)
new_ds[k] = v
- return super(PlaybookInclude, self).munge(new_ds)
+ return super(PlaybookInclude, self).preprocess_data(new_ds)
- def _munge_include(self, ds, new_ds, k, v):
+ def _preprocess_include(self, ds, new_ds, k, v):
'''
Splits the include line up into filename and parameters
'''
diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py
index bc1a0daacf2ae7..fb96a0e55f9c83 100644
--- a/v2/ansible/playbook/role/definition.py
+++ b/v2/ansible/playbook/role/definition.py
@@ -54,12 +54,12 @@ def __init__(self, role_basedir=None):
def load(data, variable_manager=None, loader=None):
raise AnsibleError("not implemented")
- def munge(self, ds):
+ def preprocess_data(self, ds):
assert isinstance(ds, dict) or isinstance(ds, string_types)
if isinstance(ds, dict):
- ds = super(RoleDefinition, self).munge(ds)
+ ds = super(RoleDefinition, self).preprocess_data(ds)
# we create a new data structure here, using the same
# object used internally by the YAML parsing code so we
diff --git a/v2/ansible/playbook/role/requirement.py b/v2/ansible/playbook/role/requirement.py
index d321f6e17dfb32..61db0cb1fd4979 100644
--- a/v2/ansible/playbook/role/requirement.py
+++ b/v2/ansible/playbook/role/requirement.py
@@ -61,7 +61,7 @@ def parse(self, ds):
if isinstance(ds, string_types):
role_name = ds
else:
- ds = self._munge_role_spec(ds)
+ ds = self._preprocess_role_spec(ds)
(new_ds, role_params) = self._split_role_params(ds)
# pull the role name out of the ds
@@ -70,7 +70,7 @@ def parse(self, ds):
return (new_ds, role_name, role_params)
- def _munge_role_spec(self, ds):
+ def _preprocess_role_spec(self, ds):
if 'role' in ds:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role_info = self._role_spec_parse(ds['role'])
diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py
index ab66898242cb5a..0f5e7674866bbd 100644
--- a/v2/ansible/playbook/task.py
+++ b/v2/ansible/playbook/task.py
@@ -137,7 +137,7 @@ def __repr__(self):
''' returns a human readable representation of the task '''
return "TASK: %s" % self.get_name()
- def _munge_loop(self, ds, new_ds, k, v):
+ def _preprocess_loop(self, ds, new_ds, k, v):
''' take a lookup plugin name and store it correctly '''
loop_name = k.replace("with_", "")
@@ -146,7 +146,7 @@ def _munge_loop(self, ds, new_ds, k, v):
new_ds['loop'] = loop_name
new_ds['loop_args'] = v
- def munge(self, ds):
+ def preprocess_data(self, ds):
'''
tasks are especially complex arguments so need pre-processing.
keep it short.
@@ -178,11 +178,11 @@ def munge(self, ds):
# determined by the ModuleArgsParser() above
continue
elif k.replace("with_", "") in lookup_loader:
- self._munge_loop(ds, new_ds, k, v)
+ self._preprocess_loop(ds, new_ds, k, v)
else:
new_ds[k] = v
- return super(Task, self).munge(new_ds)
+ return super(Task, self).preprocess_data(new_ds)
def post_validate(self, all_vars=dict(), fail_on_undefined=True):
'''
From 8a0b8629e86efeddec7da5f8976231deee000f7f Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 24 Mar 2015 00:17:10 -0400
Subject: [PATCH 0155/3617] readded -u option
---
v2/ansible/utils/cli.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py
index 09f5ef4a30f9de..6500234c74125e 100644
--- a/v2/ansible/utils/cli.py
+++ b/v2/ansible/utils/cli.py
@@ -46,6 +46,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False,
parser = SortedOptParser(usage, version=version("%prog"))
+ parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
From 131683523b97f9a2ce4ab062f566a26243d53b9f Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 23 Mar 2015 23:15:30 -0700
Subject: [PATCH 0156/3617] Add some comments to explain how the property code
for Attributes works
---
v2/ansible/playbook/base.py | 39 ++++++++++++++++++++++++++++++-------
1 file changed, 32 insertions(+), 7 deletions(-)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index 5aff5348ee7ab7..e834d3b729684f 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -54,21 +54,40 @@ def __init__(self):
# and initialize the base attributes
self._initialize_base_attributes()
+ # The following three functions are used to programatically define data
+ # descriptors (aka properties) for the Attributes of all of the playbook
+ # objects (tasks, blocks, plays, etc).
+ #
+ # The function signature is a little strange because of how we define
+ # them. We use partial to give each method the name of the Attribute that
+ # it is for. Since partial prefills the positional arguments at the
+ # beginning of the function we end up with the first positional argument
+ # being allocated to the name instead of to the class instance (self) as
+ # normal. To deal with that we make the property name field the first
+ # positional argument and self the second arg.
+ #
+ # Because these methods are defined inside of the class, they get bound to
+ # the instance when the object is created. After we run partial on them
+ # and put the result back into the class as a property, they get bound
+ # a second time. This leads to self being placed in the arguments twice.
+ # To work around that, we mark the functions as @staticmethod so that the
+ # first binding to the instance doesn't happen.
+
@staticmethod
- def _generic_g(key, self):
- method = "_get_attr_%s" % key
+ def _generic_g(prop_name, self):
+ method = "_get_attr_%s" % prop_name
if method in dir(self):
return getattr(self, method)()
- return self._attributes[key]
+ return self._attributes[prop_name]
@staticmethod
- def _generic_s(key, self, value):
- self._attributes[key] = value
+ def _generic_s(prop_name, self, value):
+ self._attributes[prop_name] = value
@staticmethod
- def _generic_d(key, self):
- del self._attributes[key]
+ def _generic_d(prop_name, self):
+ del self._attributes[prop_name]
def _get_base_attributes(self):
'''
@@ -91,7 +110,13 @@ def _initialize_base_attributes(self):
getter = partial(self._generic_g, name)
setter = partial(self._generic_s, name)
deleter = partial(self._generic_d, name)
+
+ # Place the property into the class so that cls.name is the
+ # property functions.
setattr(Base, name, property(getter, setter, deleter))
+
+ # Place the value into the instance so that the property can
+ # process and hold that value/
setattr(self, name, value.default)
def preprocess_data(self, ds):
From c6942578bfb8ecf79850f418ca94d2655b3cef12 Mon Sep 17 00:00:00 2001
From: Henrik Danielsson
Date: Tue, 24 Mar 2015 11:27:12 +0100
Subject: [PATCH 0157/3617] Added installation instructions for Arch Linux.
---
docsite/rst/intro_installation.rst | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst
index 303880cac11f84..450d125e5f5460 100644
--- a/docsite/rst/intro_installation.rst
+++ b/docsite/rst/intro_installation.rst
@@ -261,6 +261,17 @@ Ansible is available for Solaris as `SysV package from OpenCSW `_.
+
.. _from_pip:
Latest Releases Via Pip
From 19ba26e9a5ddb4aa1d326ae058e8a79b349345dc Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 24 Mar 2015 14:48:50 -0400
Subject: [PATCH 0158/3617] makes raw module have quiet ssh so as to avoid
extra output when not requried
---
lib/ansible/runner/connection_plugins/ssh.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py
index a7a57a01cf25f1..036175f6a9c3e2 100644
--- a/lib/ansible/runner/connection_plugins/ssh.py
+++ b/lib/ansible/runner/connection_plugins/ssh.py
@@ -272,7 +272,10 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab
if utils.VERBOSITY > 3:
ssh_cmd += ["-vvv"]
else:
- ssh_cmd += ["-v"]
+ if self.runner.module_name == 'raw':
+ ssh_cmd += ["-q"]
+ else:
+ ssh_cmd += ["-v"]
ssh_cmd += self.common_args
if self.ipv6:
From cf6155f1c2f8696e9e0cc681c13e8a26ac05885a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?J=C3=A1n=20Dzurek?=
Date: Tue, 24 Mar 2015 20:00:51 +0100
Subject: [PATCH 0159/3617] rst.j2 template better core module source wording
---
hacking/templates/rst.j2 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2
index 6873c3fea5855d..d6d252c5c6b005 100644
--- a/hacking/templates/rst.j2
+++ b/hacking/templates/rst.j2
@@ -177,7 +177,7 @@ Common return values are documented here :doc:`common_return_values`, the follow
This is a Core Module
---------------------
-This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo.
+The source of this module is hosted on GitHub in the `ansible-modules-core `_ repo.
If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
From b6ec502983a598e1a4043f541df3c2279e80a99e Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 24 Mar 2015 21:09:04 -0400
Subject: [PATCH 0160/3617] added missing element to make google groups link a
actual link
---
hacking/templates/rst.j2 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2
index d6d252c5c6b005..444b4243af5241 100644
--- a/hacking/templates/rst.j2
+++ b/hacking/templates/rst.j2
@@ -196,7 +196,7 @@ This source of this module is hosted on GitHub in the `ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
+Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_.
Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
From aca4e292fa3f762f85b027c089cab181cd0761da Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 25 Mar 2015 09:55:39 -0400
Subject: [PATCH 0161/3617] some updates of what 1.9 includes
---
CHANGELOG.md | 45 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 45 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b5adaa6e5320c7..688fc78ff9eca2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,21 @@ Ansible Changes By Release
in progress, details pending
+* Tags rehaul: added 'always', 'untagged' and 'tagged' special tags and normalized
+ tag resolution. Added tag information to --list-tasks and new --list-tags option.
+
+* Privilege Escalation generalization, new 'Become' system and varialbes now will
+ handle existing and new methods. Sudo and su have been kept for backwards compatibility.
+ New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin.
+
+* Improved ssh connection error reporting, now you get back the specific message from ssh.
+
+* Added facility to document task module return values for registered vars, both for
+ ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be
+ updated individually (we will start doing so incrementally).
+
* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally.
+
* Safety changes: several modules have force parameters that defaulted to true.
These have been changed to default to false so as not to accidentally lose
work. Playbooks that depended on the former behaviour simply to add
@@ -29,8 +43,39 @@ in progress, details pending
* Optimize the plugin loader to cache available plugins much more efficiently.
For some use cases this can lead to dramatic improvements in startup time.
+* Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly.
+
* Fix skipped tasks to not display their parameters if no_log is specified.
+* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries.
+
+* Added travis integration to github for basic tests, this should speed up ticket triage and merging.
+
+* environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it.
+
+* expanded facts and OS support for existing facts.
+
+* new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return.
+
+* the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes).
+
+* new filters:
+ * ternary: allows for trueval/falseval assignement dependint on conditional
+ * cartesian: returns the cartesian product of 2 lists
+ * to_uuid: given a string it will return an ansible domain specific UUID
+ * A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr
+
+* new lookup plugins (allow fetching data for use in plays):
+ * dig: does dns resolution and returns IPs.
+ * url: allows pulling data from a url.
+
+* new callback plugins:
+ * syslog_json: allows logging play output to a syslog network server using json format
+
+* new task modules:
+
+* Many documentation additions and fixes.
+
## 1.8.4 "You Really Got Me" - Feb 19, 2015
* Fixed regressions in ec2 and mount modules, introduced in 1.8.3
From 699f6b16dbe953cb5d3b3538a40a9f5726573f97 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 25 Mar 2015 10:36:20 -0400
Subject: [PATCH 0162/3617] a few more updates
---
CHANGELOG.md | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 688fc78ff9eca2..313ae81e624830 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,9 @@ Ansible Changes By Release
in progress, details pending
-* Tags rehaul: added 'always', 'untagged' and 'tagged' special tags and normalized
+* Added kerberos suport to winrm connection plugin.
+
+* Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized
tag resolution. Added tag information to --list-tasks and new --list-tags option.
* Privilege Escalation generalization, new 'Become' system and varialbes now will
@@ -53,16 +55,23 @@ in progress, details pending
* environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it.
-* expanded facts and OS support for existing facts.
+* expanded facts and OS/distribution support for existing facts and improved performance with pypy.
* new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return.
* the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes).
+* allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules).
+
+* sped up YAML parsing in ansible by up to 25% by switching to CParser loader.
+
* new filters:
* ternary: allows for trueval/falseval assignement dependint on conditional
* cartesian: returns the cartesian product of 2 lists
* to_uuid: given a string it will return an ansible domain specific UUID
+ * checksum: uses the ansible internal checksum to return a hash from a string
+ * hash: get a hash from a string (md5, sha1, etc)
+ * password_hash: get a hash form as string that can be used as a password in the user module (and others)
* A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr
* new lookup plugins (allow fetching data for use in plays):
@@ -73,9 +82,15 @@ in progress, details pending
* syslog_json: allows logging play output to a syslog network server using json format
* new task modules:
+ * patch: allows for patching files on target systems
+
+* new inventory scripts:
+ * vbox: virtualbox
+ * consul: use consul as an inventory source
* Many documentation additions and fixes.
+
## 1.8.4 "You Really Got Me" - Feb 19, 2015
* Fixed regressions in ec2 and mount modules, introduced in 1.8.3
From 34cd6deb9f93050efe1c6600f2acb62f986c7a12 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 07:41:13 -0700
Subject: [PATCH 0163/3617] Spelling
---
CHANGELOG.md | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 313ae81e624830..f5cb2f0e5d24dc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,7 +10,7 @@ in progress, details pending
* Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized
tag resolution. Added tag information to --list-tasks and new --list-tags option.
-* Privilege Escalation generalization, new 'Become' system and varialbes now will
+* Privilege Escalation generalization, new 'Become' system and variables now will
handle existing and new methods. Sudo and su have been kept for backwards compatibility.
New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin.
@@ -24,23 +24,23 @@ in progress, details pending
* Safety changes: several modules have force parameters that defaulted to true.
These have been changed to default to false so as not to accidentally lose
- work. Playbooks that depended on the former behaviour simply to add
+ work. Playbooks that depended on the former behaviour simply need to add
force=True to the task that needs it. Affected modules:
* bzr: When local modifications exist in a checkout, the bzr module used to
- default to temoving the modifications on any operation. Now the module
+ default to removing the modifications on any operation. Now the module
will not remove the modifications unless force=yes is specified.
Operations that depend on a clean working tree may fail unless force=yes is
added.
* git: When local modifications exist in a checkout, the git module will now
- fail unless force is explictly specified. Specifying force will allow the
- module to revert and overwrite local modifications to make git actions
+ fail unless force is explictly specified. Specifying force=yes will allow
+ the module to revert and overwrite local modifications to make git actions
succeed.
* hg: When local modifications exist in a checkout, the hg module used to
default to removing the modifications on any operation. Now the module
will not remove the modifications unless force=yes is specified.
* subversion: When updating a checkout with local modifications, you now need
- to add force so the module will revert the modifications before updating.
+ to add force=yes so the module will revert the modifications before updating.
* Optimize the plugin loader to cache available plugins much more efficiently.
For some use cases this can lead to dramatic improvements in startup time.
From 00b9364699cfd1ea7faf13ea9327ac4f51a9a3bc Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 25 Mar 2015 10:56:30 -0400
Subject: [PATCH 0164/3617] added modules from extras
---
CHANGELOG.md | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f5cb2f0e5d24dc..1dd459892c6803 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -82,7 +82,20 @@ in progress, details pending
* syslog_json: allows logging play output to a syslog network server using json format
* new task modules:
+ * cryptab: manages linux encrypted block devices
+ * gce_img: for utilizing GCE image resources
+ * gluster_volume: manage glusterfs volumes
+ * haproxy: for the load balancer of same name
+ * known_hosts: manages the ssh known_hosts file
+ * lxc_container: manage lxc containers
* patch: allows for patching files on target systems
+ * pkg5: installing and uninstalling packages on Solaris
+ * pkg5_publisher: manages Solaris pkg5 repository configuration
+ * postgresql_ext: manage postgresql extensions
+ * snmp_facts: gather facts via snmp
+ * svc: manages daemontools based services
+ * uptimerobot: manage monitoring with this service
+
* new inventory scripts:
* vbox: virtualbox
From 1b11e45f3cb4e1e5671104d85e58430b43a70725 Mon Sep 17 00:00:00 2001
From: Matthieu Caneill
Date: Wed, 25 Mar 2015 16:34:07 +0100
Subject: [PATCH 0165/3617] doc: building debian package: 'asciidoc' is a
required dependency
---
packaging/debian/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/packaging/debian/README.md b/packaging/debian/README.md
index c7538dbf793603..715084380d76aa 100644
--- a/packaging/debian/README.md
+++ b/packaging/debian/README.md
@@ -4,7 +4,7 @@ Ansible Debian Package
To create an Ansible DEB package:
sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools sshpass
- sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot
+ sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc
git clone git://github.com/ansible/ansible.git
cd ansible
make deb
From 1aaf444943f1f338878c494dec2b59a2639e6669 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 08:51:35 -0700
Subject: [PATCH 0166/3617] Put all module changes in the same location
---
CHANGELOG.md | 48 +++++++++++++++++++++++++-----------------------
1 file changed, 25 insertions(+), 23 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1dd459892c6803..e9024224115247 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -20,28 +20,6 @@ in progress, details pending
ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be
updated individually (we will start doing so incrementally).
-* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally.
-
-* Safety changes: several modules have force parameters that defaulted to true.
- These have been changed to default to false so as not to accidentally lose
- work. Playbooks that depended on the former behaviour simply need to add
- force=True to the task that needs it. Affected modules:
-
- * bzr: When local modifications exist in a checkout, the bzr module used to
- default to removing the modifications on any operation. Now the module
- will not remove the modifications unless force=yes is specified.
- Operations that depend on a clean working tree may fail unless force=yes is
- added.
- * git: When local modifications exist in a checkout, the git module will now
- fail unless force is explictly specified. Specifying force=yes will allow
- the module to revert and overwrite local modifications to make git actions
- succeed.
- * hg: When local modifications exist in a checkout, the hg module used to
- default to removing the modifications on any operation. Now the module
- will not remove the modifications unless force=yes is specified.
- * subversion: When updating a checkout with local modifications, you now need
- to add force=yes so the module will revert the modifications before updating.
-
* Optimize the plugin loader to cache available plugins much more efficiently.
For some use cases this can lead to dramatic improvements in startup time.
@@ -97,13 +75,37 @@ in progress, details pending
* uptimerobot: manage monitoring with this service
+* module enhancements and notable changes
+ * The selinux module now sets the current running state to permissive if state='disabled'
+ * Can now set accounts as expired via the user module
+ * vsphere_guest now supports deploying guests from a template
+ * Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally.
+ * Safety changes: several modules have force parameters that defaulted to true.
+ These have been changed to default to false so as not to accidentally lose
+ work. Playbooks that depended on the former behaviour simply need to add
+ force=True to the task that needs it. Affected modules:
+ * bzr: When local modifications exist in a checkout, the bzr module used to
+ default to removing the modifications on any operation. Now the module
+ will not remove the modifications unless force=yes is specified.
+ Operations that depend on a clean working tree may fail unless force=yes is
+ added.
+ * git: When local modifications exist in a checkout, the git module will now
+ fail unless force is explictly specified. Specifying force=yes will allow
+ the module to revert and overwrite local modifications to make git actions
+ succeed.
+ * hg: When local modifications exist in a checkout, the hg module used to
+ default to removing the modifications on any operation. Now the module
+ will not remove the modifications unless force=yes is specified.
+ * subversion: When updating a checkout with local modifications, you now need
+ to add force=yes so the module will revert the modifications before updating.
+
+
* new inventory scripts:
* vbox: virtualbox
* consul: use consul as an inventory source
* Many documentation additions and fixes.
-
## 1.8.4 "You Really Got Me" - Feb 19, 2015
* Fixed regressions in ec2 and mount modules, introduced in 1.8.3
From 9b20ca31d6e7e3cc9344468328b7e85823f660a3 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 09:24:48 -0700
Subject: [PATCH 0167/3617] Add a unch of changelog entries for 1.9
---
CHANGELOG.md | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e9024224115247..bb1dfcad2987d4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,7 +5,7 @@ Ansible Changes By Release
in progress, details pending
-* Added kerberos suport to winrm connection plugin.
+* Added kerberos support to winrm connection plugin.
* Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized
tag resolution. Added tag information to --list-tasks and new --list-tags option.
@@ -76,10 +76,26 @@ in progress, details pending
* module enhancements and notable changes
+ * vsphere_guest now supports deploying guests from a template
+ * ec2_vol gained the ability to specify the EBS volume type
+ * ec2_vol can now detach volumes by specifying instance=None
+ * Added tenancy support for the ec2 module
+ * rds module has gained the ability to manage tags and set charset and public accessibility
+ * ec2_snapshot module gained the capability to remove snapshots
+ * Several important docker changes:
+ * restart_policy parameters to configure when the container automatically restarts
+ * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option
+ * Add insecure_registry parameter for connecting to registries via http
+ * authorized_keys can now use url as a key source
* The selinux module now sets the current running state to permissive if state='disabled'
* Can now set accounts as expired via the user module
- * vsphere_guest now supports deploying guests from a template
+ * Overhaul of the service module to make code simpler and behave better for systems running systemd or rcctl
+ * yum module now has a parameter to refresh its cache of package metadata
+ * Add parameters to the postgres modules to specify a unix socket to connect to the db
+ * The mount module now supports bind mounts
+ * django_manage can now handle
* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally.
+ * Add a refspec argument to the git module that allows pulling commits that aren't part of a branch
* Safety changes: several modules have force parameters that defaulted to true.
These have been changed to default to false so as not to accidentally lose
work. Playbooks that depended on the former behaviour simply need to add
From 2c3e58ad594ed5b3d5dd75263a383dd3cbf9119e Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 10:15:19 -0700
Subject: [PATCH 0168/3617] And all of core module changes added
---
CHANGELOG.md | 41 ++++++++++++++++++++++++++++++++++-------
1 file changed, 34 insertions(+), 7 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bb1dfcad2987d4..ada38e6f155732 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -77,20 +77,47 @@ in progress, details pending
* module enhancements and notable changes
* vsphere_guest now supports deploying guests from a template
- * ec2_vol gained the ability to specify the EBS volume type
- * ec2_vol can now detach volumes by specifying instance=None
- * Added tenancy support for the ec2 module
- * rds module has gained the ability to manage tags and set charset and public accessibility
- * ec2_snapshot module gained the capability to remove snapshots
+ * Multiple new enhancements to the amazon web service modules:
+ * ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one
+ * ec2_vol gained the ability to specify the EBS volume type
+ * ec2_vol can now detach volumes by specifying instance=None
+ * Fix ec2_group to purge specific grants rather than whole rules
+ * Added tenancy support for the ec2 module
+ * rds module has gained the ability to manage tags and set charset and public accessibility
+ * ec2_snapshot module gained the capability to remove snapshots
+ * Add alias support for route53
+ * Add private_zones support to route53
+ * ec2_asg: Add wait_for_instances parameter that waits until an instance is ready before ending the ansible task
+ * gce gained the ip_forward parameter to forward ip packets
+ * disk_auto_delete parameter to gce that will remove the boot disk after an instance is destroyed
+ * gce can now spawn instances with no external ip
+ * gce_pd gained the ability to choose a disk type
+ * gce_net gained target_tags parameter for creating firewall rules
+ * rax module has new parameters for making use of a boot volume
+ * Add scheduler_hints to the nova_compute module for optional parameters
* Several important docker changes:
* restart_policy parameters to configure when the container automatically restarts
* If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option
* Add insecure_registry parameter for connecting to registries via http
+ * New parameter to set a container's domainname
+ * Undeprecated docker_image module until there's replacement functionality
+ * Allow setting the container's pid namespace
+ * Add a pull parameter that chooses when ansible will look for more recent images in the registry
+ * docker module states have been greatly enhanced. The reworked and new states are:
+ * present now creates but does not start containers
+ * restarted always restarts a container
+ * reloaded restarts a container if ansible detects that the configuration is different than what is spcified
+ * reloaded accounts for exposed ports, env vars, and volumes
+ * Can now connect to the docker server using TLS
+ * Many fixes for hardlink and softlink handling in file-related modules
+ * Implement user, group, mode, and selinux parameters for the unarchive module
* authorized_keys can now use url as a key source
+ * authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task
* The selinux module now sets the current running state to permissive if state='disabled'
- * Can now set accounts as expired via the user module
- * Overhaul of the service module to make code simpler and behave better for systems running systemd or rcctl
+ * Can now set accounts to expire via the user module
+ * Overhaul of the service module to make code simpler and behave better for systems running several popular init systems
* yum module now has a parameter to refresh its cache of package metadata
+ * apt module gained a build_dep parameter to install a package's build dependencies
* Add parameters to the postgres modules to specify a unix socket to connect to the db
* The mount module now supports bind mounts
* django_manage can now handle
From 1eed3edc2fa1e53466f7a74e275424e1e80b3b42 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Wed, 25 Mar 2015 14:17:02 -0500
Subject: [PATCH 0169/3617] tweaking the CHANGELOG
---
CHANGELOG.md | 223 ++++++++++++++++++++++++---------------------------
1 file changed, 106 insertions(+), 117 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ada38e6f155732..e1d171e8b45770 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,152 +1,141 @@
Ansible Changes By Release
==========================
-## 1.9 "Dancing In the Street" - ACTIVE DEVELOPMENT
+## 2.0 "TBD" - ACTIVE DEVELOPMENT
-in progress, details pending
+Major Changes:
-* Added kerberos support to winrm connection plugin.
+New Modules:
+
+Other Notable Changes:
+
+## 1.9 "Dancing In the Street" - Mar 25, 2015
+Major changes:
+
+* Added kerberos support to winrm connection plugin.
* Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized
tag resolution. Added tag information to --list-tasks and new --list-tags option.
-
* Privilege Escalation generalization, new 'Become' system and variables now will
handle existing and new methods. Sudo and su have been kept for backwards compatibility.
New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin.
-
* Improved ssh connection error reporting, now you get back the specific message from ssh.
-
* Added facility to document task module return values for registered vars, both for
ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be
updated individually (we will start doing so incrementally).
-
* Optimize the plugin loader to cache available plugins much more efficiently.
For some use cases this can lead to dramatic improvements in startup time.
-
* Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly.
-
* Fix skipped tasks to not display their parameters if no_log is specified.
-
* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries.
-
* Added travis integration to github for basic tests, this should speed up ticket triage and merging.
-
* environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it.
-
* expanded facts and OS/distribution support for existing facts and improved performance with pypy.
-
* new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return.
-
* the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes).
-
* allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules).
-
* sped up YAML parsing in ansible by up to 25% by switching to CParser loader.
-* new filters:
- * ternary: allows for trueval/falseval assignement dependint on conditional
- * cartesian: returns the cartesian product of 2 lists
- * to_uuid: given a string it will return an ansible domain specific UUID
- * checksum: uses the ansible internal checksum to return a hash from a string
- * hash: get a hash from a string (md5, sha1, etc)
- * password_hash: get a hash form as string that can be used as a password in the user module (and others)
- * A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr
-
-* new lookup plugins (allow fetching data for use in plays):
- * dig: does dns resolution and returns IPs.
- * url: allows pulling data from a url.
+New Modules:
-* new callback plugins:
+* cryptab: manages linux encrypted block devices
+* gce_img: for utilizing GCE image resources
+* gluster_volume: manage glusterfs volumes
+* haproxy: for the load balancer of same name
+* known_hosts: manages the ssh known_hosts file
+* lxc_container: manage lxc containers
+* patch: allows for patching files on target systems
+* pkg5: installing and uninstalling packages on Solaris
+* pkg5_publisher: manages Solaris pkg5 repository configuration
+* postgresql_ext: manage postgresql extensions
+* snmp_facts: gather facts via snmp
+* svc: manages daemontools based services
+* uptimerobot: manage monitoring with this service
+
+New Filters:
+
+* ternary: allows for trueval/falseval assignement dependint on conditional
+* cartesian: returns the cartesian product of 2 lists
+* to_uuid: given a string it will return an ansible domain specific UUID
+* checksum: uses the ansible internal checksum to return a hash from a string
+* hash: get a hash from a string (md5, sha1, etc)
+* password_hash: get a hash form as string that can be used as a password in the user module (and others)
+* A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr
+
+Other Notable Changes:
+
+* New lookup plugins:
+ * dig: does dns resolution and returns IPs.
+ * url: allows pulling data from a url.
+* New callback plugins:
* syslog_json: allows logging play output to a syslog network server using json format
-
-* new task modules:
- * cryptab: manages linux encrypted block devices
- * gce_img: for utilizing GCE image resources
- * gluster_volume: manage glusterfs volumes
- * haproxy: for the load balancer of same name
- * known_hosts: manages the ssh known_hosts file
- * lxc_container: manage lxc containers
- * patch: allows for patching files on target systems
- * pkg5: installing and uninstalling packages on Solaris
- * pkg5_publisher: manages Solaris pkg5 repository configuration
- * postgresql_ext: manage postgresql extensions
- * snmp_facts: gather facts via snmp
- * svc: manages daemontools based services
- * uptimerobot: manage monitoring with this service
-
-
-* module enhancements and notable changes
- * vsphere_guest now supports deploying guests from a template
- * Multiple new enhancements to the amazon web service modules:
- * ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one
- * ec2_vol gained the ability to specify the EBS volume type
- * ec2_vol can now detach volumes by specifying instance=None
- * Fix ec2_group to purge specific grants rather than whole rules
- * Added tenancy support for the ec2 module
- * rds module has gained the ability to manage tags and set charset and public accessibility
- * ec2_snapshot module gained the capability to remove snapshots
- * Add alias support for route53
- * Add private_zones support to route53
- * ec2_asg: Add wait_for_instances parameter that waits until an instance is ready before ending the ansible task
- * gce gained the ip_forward parameter to forward ip packets
- * disk_auto_delete parameter to gce that will remove the boot disk after an instance is destroyed
- * gce can now spawn instances with no external ip
- * gce_pd gained the ability to choose a disk type
- * gce_net gained target_tags parameter for creating firewall rules
- * rax module has new parameters for making use of a boot volume
- * Add scheduler_hints to the nova_compute module for optional parameters
- * Several important docker changes:
- * restart_policy parameters to configure when the container automatically restarts
- * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option
- * Add insecure_registry parameter for connecting to registries via http
- * New parameter to set a container's domainname
- * Undeprecated docker_image module until there's replacement functionality
- * Allow setting the container's pid namespace
- * Add a pull parameter that chooses when ansible will look for more recent images in the registry
- * docker module states have been greatly enhanced. The reworked and new states are:
- * present now creates but does not start containers
- * restarted always restarts a container
- * reloaded restarts a container if ansible detects that the configuration is different than what is spcified
- * reloaded accounts for exposed ports, env vars, and volumes
- * Can now connect to the docker server using TLS
- * Many fixes for hardlink and softlink handling in file-related modules
- * Implement user, group, mode, and selinux parameters for the unarchive module
- * authorized_keys can now use url as a key source
- * authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task
- * The selinux module now sets the current running state to permissive if state='disabled'
- * Can now set accounts to expire via the user module
- * Overhaul of the service module to make code simpler and behave better for systems running several popular init systems
- * yum module now has a parameter to refresh its cache of package metadata
- * apt module gained a build_dep parameter to install a package's build dependencies
- * Add parameters to the postgres modules to specify a unix socket to connect to the db
- * The mount module now supports bind mounts
- * django_manage can now handle
- * Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally.
- * Add a refspec argument to the git module that allows pulling commits that aren't part of a branch
- * Safety changes: several modules have force parameters that defaulted to true.
- These have been changed to default to false so as not to accidentally lose
- work. Playbooks that depended on the former behaviour simply need to add
- force=True to the task that needs it. Affected modules:
- * bzr: When local modifications exist in a checkout, the bzr module used to
- default to removing the modifications on any operation. Now the module
- will not remove the modifications unless force=yes is specified.
- Operations that depend on a clean working tree may fail unless force=yes is
- added.
- * git: When local modifications exist in a checkout, the git module will now
- fail unless force is explictly specified. Specifying force=yes will allow
- the module to revert and overwrite local modifications to make git actions
- succeed.
- * hg: When local modifications exist in a checkout, the hg module used to
- default to removing the modifications on any operation. Now the module
- will not remove the modifications unless force=yes is specified.
- * subversion: When updating a checkout with local modifications, you now need
- to add force=yes so the module will revert the modifications before updating.
-
-
-* new inventory scripts:
+* Many new enhancements to the amazon web service modules:
+ * ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one
+ * ec2_vol gained the ability to specify the EBS volume type
+ * ec2_vol can now detach volumes by specifying instance=None
+ * Fix ec2_group to purge specific grants rather than whole rules
+ * Added tenancy support for the ec2 module
+ * rds module has gained the ability to manage tags and set charset and public accessibility
+ * ec2_snapshot module gained the capability to remove snapshots
+ * Add alias support for route53
+ * Add private_zones support to route53
+ * ec2_asg: Add wait_for_instances parameter that waits until an instance is ready before ending the ansible task
+* Many new docker improvements:
+ * restart_policy parameters to configure when the container automatically restarts
+ * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option
+ * Add insecure_registry parameter for connecting to registries via http
+ * New parameter to set a container's domainname
+ * Undeprecated docker_image module until there's replacement functionality
+ * Allow setting the container's pid namespace
+ * Add a pull parameter that chooses when ansible will look for more recent images in the registry
+ * docker module states have been greatly enhanced. The reworked and new states are:
+ * present now creates but does not start containers
+ * restarted always restarts a container
+ * reloaded restarts a container if ansible detects that the configuration is different than what is spcified
+ * reloaded accounts for exposed ports, env vars, and volumes
+ * Can now connect to the docker server using TLS
+* Several source control modules had force parameters that defaulted to true.
+ These have been changed to default to false so as not to accidentally lose
+ work. Playbooks that depended on the former behaviour simply need to add
+ force=True to the task that needs it. Affected modules:
+ * bzr: When local modifications exist in a checkout, the bzr module used to
+ default to removing the modifications on any operation. Now the module
+ will not remove the modifications unless force=yes is specified.
+ Operations that depend on a clean working tree may fail unless force=yes is
+ added.
+ * git: When local modifications exist in a checkout, the git module will now
+ fail unless force is explictly specified. Specifying force=yes will allow
+ the module to revert and overwrite local modifications to make git actions
+ succeed.
+ * hg: When local modifications exist in a checkout, the hg module used to
+ default to removing the modifications on any operation. Now the module
+ will not remove the modifications unless force=yes is specified.
+ * subversion: When updating a checkout with local modifications, you now need
+ to add force=yes so the module will revert the modifications before updating.
+* New inventory scripts:
* vbox: virtualbox
* consul: use consul as an inventory source
-
+* gce gained the ip_forward parameter to forward ip packets
+* disk_auto_delete parameter to gce that will remove the boot disk after an instance is destroyed
+* gce can now spawn instances with no external ip
+* gce_pd gained the ability to choose a disk type
+* gce_net gained target_tags parameter for creating firewall rules
+* rax module has new parameters for making use of a boot volume
+* Add scheduler_hints to the nova_compute module for optional parameters
+* vsphere_guest now supports deploying guests from a template
+* Many fixes for hardlink and softlink handling in file-related modules
+* Implement user, group, mode, and selinux parameters for the unarchive module
+* authorized_keys can now use url as a key source
+* authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task
+* The selinux module now sets the current running state to permissive if state='disabled'
+* Can now set accounts to expire via the user module
+* Overhaul of the service module to make code simpler and behave better for systems running several popular init systems
+* yum module now has a parameter to refresh its cache of package metadata
+* apt module gained a build_dep parameter to install a package's build dependencies
+* Add parameters to the postgres modules to specify a unix socket to connect to the db
+* The mount module now supports bind mounts
+* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally.
+* Add a refspec argument to the git module that allows pulling commits that aren't part of a branch
* Many documentation additions and fixes.
## 1.8.4 "You Really Got Me" - Feb 19, 2015
From c024057e9721f8736068b5fb5743ff8b18f6248e Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 12:21:46 -0700
Subject: [PATCH 0170/3617] Fix assert to work with unicode values
---
lib/ansible/utils/__init__.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
index f164b25bd47cba..07e8174893fc39 100644
--- a/lib/ansible/utils/__init__.py
+++ b/lib/ansible/utils/__init__.py
@@ -260,10 +260,10 @@ def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
- if conditional in inject and '-' not in str(inject[conditional]):
- conditional = inject[conditional]
+ if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
+ conditional = to_unicode(inject[conditional], nonstring='simplerepr')
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
- original = str(conditional).replace("jinja2_compare ","")
+ original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
From aaa25eb75c84662d0d496188e143bc616e60ecc5 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 12:22:45 -0700
Subject: [PATCH 0171/3617] Make run_command() work when we get byte str with
non-ascii characters (instead of unicode type like we were expecting)
Fix and test.
Fixes #10536
---
lib/ansible/module_utils/basic.py | 7 ++++++-
test/integration/unicode.yml | 9 +++++++++
2 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index b68a36b9c651a2..ad1d43f86ca99b 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -1457,7 +1457,12 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat
# in reporting later, which strips out things like
# passwords from the args list
if isinstance(args, basestring):
- to_clean_args = shlex.split(args.encode('utf-8'))
+ if isinstance(args, unicode):
+ b_args = args.encode('utf-8')
+ else:
+ b_args = args
+ to_clean_args = shlex.split(b_args)
+ del b_args
else:
to_clean_args = args
diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml
index b04d760182c9ef..1044c2527053ed 100644
--- a/test/integration/unicode.yml
+++ b/test/integration/unicode.yml
@@ -41,6 +41,15 @@
- name: 'A task with unicode host vars'
debug: var=unicode_host_var
+ - name: 'A task with unicode shell parameters'
+ shell: echo '¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×'
+ register: output
+
+ - name: 'Assert that the unicode was echoed'
+ assert:
+ that:
+ - "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines"
+
- name: 'A play for hosts in group: ĪīĬĭ'
hosts: 'ĪīĬĭ'
gather_facts: true
From 38892e986ef78271a06b1d228a0d3294281c40d4 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 13:56:46 -0700
Subject: [PATCH 0172/3617] Convert exceptions to unicode using to_unicode
rather than str. that stops unicode errors if the string has non-ascii text
---
v2/ansible/executor/task_executor.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py
index 7eaba0061ef29e..4ac062251391a5 100644
--- a/v2/ansible/executor/task_executor.py
+++ b/v2/ansible/executor/task_executor.py
@@ -26,6 +26,7 @@
from ansible.playbook.task import Task
from ansible.plugins import lookup_loader, connection_loader, action_loader
from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.unicode import to_unicode
from ansible.utils.debug import debug
@@ -89,7 +90,7 @@ def run(self):
debug("done dumping result, returning")
return result
except AnsibleError, e:
- return dict(failed=True, msg=str(e))
+ return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
def _get_loop_items(self):
'''
From 60f972dfe4bc58180c666f820ef2d602acf917e4 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 25 Mar 2015 13:57:48 -0700
Subject: [PATCH 0173/3617] Fix the command module handling of non-ascii
values.
We can't depend on the args being unicode text because we're in module
land, not in the ansible controller land
---
v2/ansible/module_utils/basic.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py
index 79a0fab67b6e4c..b3cebf0ba5a0fc 100644
--- a/v2/ansible/module_utils/basic.py
+++ b/v2/ansible/module_utils/basic.py
@@ -1433,7 +1433,7 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat
msg = None
st_in = None
- # Set a temporart env path if a prefix is passed
+ # Set a temporary env path if a prefix is passed
env=os.environ
if path_prefix:
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
@@ -1442,7 +1442,12 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat
# in reporting later, which strips out things like
# passwords from the args list
if isinstance(args, basestring):
- to_clean_args = shlex.split(args.encode('utf-8'))
+ if isinstance(args, unicode):
+ b_args = args.encode('utf-8')
+ else:
+ b_args = args
+ to_clean_args = shlex.split(b_args)
+ del b_args
else:
to_clean_args = args
From c697bc2546444a3adbe86b7537e3e2d71ea75523 Mon Sep 17 00:00:00 2001
From: Andrew Thompson
Date: Wed, 25 Mar 2015 21:58:24 -0400
Subject: [PATCH 0174/3617] Fix some typos in CHANGELOG.md
---
CHANGELOG.md | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e1d171e8b45770..f354dfd145720f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -29,9 +29,9 @@ Major changes:
* Fix skipped tasks to not display their parameters if no_log is specified.
* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries.
* Added travis integration to github for basic tests, this should speed up ticket triage and merging.
-* environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it.
+* environment: directive now can also be applied to play and is inhertited by tasks, which can still override it.
* expanded facts and OS/distribution support for existing facts and improved performance with pypy.
-* new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return.
+* new 'wantlist' option to lookups allows for selecting a list typed variable vs a command delimited string as the return.
* the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes).
* allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules).
* sped up YAML parsing in ansible by up to 25% by switching to CParser loader.
@@ -126,7 +126,7 @@ Other Notable Changes:
* Many fixes for hardlink and softlink handling in file-related modules
* Implement user, group, mode, and selinux parameters for the unarchive module
* authorized_keys can now use url as a key source
-* authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task
+* authorized_keys has a new exclusive parameter that determines if keys that weren't specified in the task
* The selinux module now sets the current running state to permissive if state='disabled'
* Can now set accounts to expire via the user module
* Overhaul of the service module to make code simpler and behave better for systems running several popular init systems
From 51d6db136cf9f58847fefe5e2ba398e4e2ee974d Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 25 Mar 2015 22:06:30 -0400
Subject: [PATCH 0175/3617] updated changelog
---
CHANGELOG.md | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f354dfd145720f..3ae9d1d189182e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,8 +4,14 @@ Ansible Changes By Release
## 2.0 "TBD" - ACTIVE DEVELOPMENT
Major Changes:
+ big_ip modules now support turning off ssl certificat validation (use only for self signed)
New Modules:
+ vertica_configuration
+ vertica_facts
+ vertica_role
+ vertica_schema
+ vertica_user
Other Notable Changes:
From 74ef30cec1e90cf9f8b33937ea5c8bf7418d20b4 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 25 Mar 2015 23:16:05 -0400
Subject: [PATCH 0176/3617] added pushover module to changelog
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3ae9d1d189182e..72804bb65135ed 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ Major Changes:
big_ip modules now support turning off ssl certificat validation (use only for self signed)
New Modules:
+ pushover
vertica_configuration
vertica_facts
vertica_role
From 361517165160718e04755ccaf4a242f2fff8bbd0 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 25 Mar 2015 23:56:26 -0400
Subject: [PATCH 0177/3617] added maven artifact to changelog
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 72804bb65135ed..553e6090bb3607 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ Major Changes:
big_ip modules now support turning off ssl certificat validation (use only for self signed)
New Modules:
+ maven_artifact
pushover
vertica_configuration
vertica_facts
From e9c8e89c77738a65d9791d23f700023176206524 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 26 Mar 2015 01:16:32 -0400
Subject: [PATCH 0178/3617] added cloudtrail to changelog
---
CHANGELOG.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 553e6090bb3607..38c09d0b59d268 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,7 @@ Major Changes:
big_ip modules now support turning off ssl certificat validation (use only for self signed)
New Modules:
+ cloudtrail
maven_artifact
pushover
vertica_configuration
From bb6d983290e030502bd407ba800ba0eb2f60209c Mon Sep 17 00:00:00 2001
From: Rene Moser
Date: Thu, 26 Mar 2015 10:26:33 +0100
Subject: [PATCH 0179/3617] cloudstack: add utils for common functionality
---
lib/ansible/module_utils/cloudstack.py | 182 +++++++++++++++++++++++++
1 file changed, 182 insertions(+)
create mode 100644 lib/ansible/module_utils/cloudstack.py
diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py
new file mode 100644
index 00000000000000..cb482ae993290d
--- /dev/null
+++ b/lib/ansible/module_utils/cloudstack.py
@@ -0,0 +1,182 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser
+#
+# This code is part of Ansible, but is an independent component.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import sys
+
+try:
+ from cs import CloudStack, CloudStackException, read_config
+except ImportError:
+ print("failed=True " + \
+ "msg='python library cs required: pip install cs'")
+ sys.exit(1)
+
+
+class AnsibleCloudStack:
+
+ def __init__(self, module):
+ self.module = module
+ self._connect()
+
+ self.project_id = None
+ self.ip_address_id = None
+ self.zone_id = None
+ self.vm_id = None
+ self.os_type_id = None
+ self.hypervisor = None
+
+
+ def _connect(self):
+ api_key = self.module.params.get('api_key')
+ api_secret = self.module.params.get('secret_key')
+ api_url = self.module.params.get('api_url')
+ api_http_method = self.module.params.get('api_http_method')
+
+ if api_key and api_secret and api_url:
+ self.cs = CloudStack(
+ endpoint=api_url,
+ key=api_key,
+ secret=api_secret,
+ method=api_http_method
+ )
+ else:
+ self.cs = CloudStack(**read_config())
+
+
+ def get_project_id(self):
+ if self.project_id:
+ return self.project_id
+
+ project = self.module.params.get('project')
+ if not project:
+ return None
+
+ projects = self.cs.listProjects()
+ if projects:
+ for p in projects['project']:
+ if project in [ p['name'], p['displaytext'], p['id'] ]:
+ self.project_id = p['id']
+ return self.project_id
+ self.module.fail_json(msg="project '%s' not found" % project)
+
+
+ def get_ip_address_id(self):
+ if self.ip_address_id:
+ return self.ip_address_id
+
+ ip_address = self.module.params.get('ip_address')
+ if not ip_address:
+ self.module.fail_json(msg="IP address param 'ip_address' is required")
+
+ args = {}
+ args['ipaddress'] = ip_address
+ args['projectid'] = self.get_project_id()
+ ip_addresses = self.cs.listPublicIpAddresses(**args)
+
+ if not ip_addresses:
+ self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
+
+ self.ip_address_id = ip_addresses['publicipaddress'][0]['id']
+ return self.ip_address_id
+
+
+ def get_vm_id(self):
+ if self.vm_id:
+ return self.vm_id
+
+ vm = self.module.params.get('vm')
+ if not vm:
+ self.module.fail_json(msg="Virtual machine param 'vm' is required")
+
+ args = {}
+ args['projectid'] = self.get_project_id()
+ vms = self.cs.listVirtualMachines(**args)
+ if vms:
+ for v in vms['virtualmachine']:
+ if vm in [ v['name'], v['id'] ]:
+ self.vm_id = v['id']
+ return self.vm_id
+ self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
+
+
+ def get_zone_id(self):
+ if self.zone_id:
+ return self.zone_id
+
+ zone = self.module.params.get('zone')
+ zones = self.cs.listZones()
+
+ # use the first zone if no zone param given
+ if not zone:
+ self.zone_id = zones['zone'][0]['id']
+ return self.zone_id
+
+ if zones:
+ for z in zones['zone']:
+ if zone in [ z['name'], z['id'] ]:
+ self.zone_id = z['id']
+ return self.zone_id
+ self.module.fail_json(msg="zone '%s' not found" % zone)
+
+
+ def get_os_type_id(self):
+ if self.os_type_id:
+ return self.os_type_id
+
+ os_type = self.module.params.get('os_type')
+ if not os_type:
+ return None
+
+ os_types = self.cs.listOsTypes()
+ if os_types:
+ for o in os_types['ostype']:
+ if os_type in [ o['description'], o['id'] ]:
+ self.os_type_id = o['id']
+ return self.os_type_id
+ self.module.fail_json(msg="OS type '%s' not found" % os_type)
+
+
+ def get_hypervisor(self):
+ if self.hypervisor:
+ return self.hypervisor
+
+ hypervisor = self.module.params.get('hypervisor')
+ hypervisors = self.cs.listHypervisors()
+
+ # use the first hypervisor if no hypervisor param given
+ if not hypervisor:
+ self.hypervisor = hypervisors['hypervisor'][0]['name']
+ return self.hypervisor
+
+ for h in hypervisors['hypervisor']:
+ if hypervisor.lower() == h['name'].lower():
+ self.hypervisor = h['name']
+ return self.hypervisor
+ self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
+
+
+ def _poll_job(self, job=None, key=None):
+ if 'jobid' in job:
+ while True:
+ res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
+ if res['jobstatus'] != 0:
+ if 'jobresult' in res and key is not None and key in res['jobresult']:
+ job = res['jobresult'][key]
+ break
+ time.sleep(2)
+ return job
From 1ba05dd3a298ccc0a377f718046dc80aeaea5860 Mon Sep 17 00:00:00 2001
From: Rene Moser
Date: Thu, 26 Mar 2015 14:10:18 +0100
Subject: [PATCH 0180/3617] cloudstack: add doc fragment
---
.../utils/module_docs_fragments/cloudstack.py | 62 +++++++++++++++++++
1 file changed, 62 insertions(+)
create mode 100644 lib/ansible/utils/module_docs_fragments/cloudstack.py
diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py
new file mode 100644
index 00000000000000..8d173ea756f3c4
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015 René Moser
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+class ModuleDocFragment(object):
+
+ # Standard cloudstack documentation fragment
+ DOCUMENTATION = '''
+options:
+ api_key:
+ description:
+ - API key of the CloudStack API.
+ required: false
+ default: null
+ aliases: []
+ api_secret:
+ description:
+ - Secret key of the CloudStack API.
+ required: false
+ default: null
+ aliases: []
+ api_url:
+ description:
+ - URL of the CloudStack API e.g. https://cloud.example.com/client/api.
+ required: false
+ default: null
+ aliases: []
+ api_http_method:
+ description:
+ - HTTP method used.
+ required: false
+ default: 'get'
+ aliases: []
+requirements:
+ - cs
+notes:
+ - Ansible uses the C(cs) library's configuration method if credentials are not
+ provided by the options C(api_url), C(api_key), C(api_secret).
+ Configuration is read from several locations, in the following order:
+ - The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
+ C(CLOUDSTACK_METHOD) environment variables.
+ - A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
+ - A C(cloudstack.ini) file in the current working directory.
+ - A C(.cloudstack.ini) file in the users home directory.
+ See https://github.com/exoscale/cs for more information.
+ - This module supports check mode.
+'''
From c066a60b7c48c9a31b51834d49bccfd0b00dd2e5 Mon Sep 17 00:00:00 2001
From: Rene Moser
Date: Thu, 26 Mar 2015 15:32:58 +0100
Subject: [PATCH 0181/3617] cloudstack: fail_json() if library cs is not found
---
lib/ansible/module_utils/cloudstack.py | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py
index cb482ae993290d..ab72f2c7894157 100644
--- a/lib/ansible/module_utils/cloudstack.py
+++ b/lib/ansible/module_utils/cloudstack.py
@@ -17,19 +17,20 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import sys
try:
from cs import CloudStack, CloudStackException, read_config
+ has_lib_cs = True
except ImportError:
- print("failed=True " + \
- "msg='python library cs required: pip install cs'")
- sys.exit(1)
+ has_lib_cs = False
class AnsibleCloudStack:
def __init__(self, module):
+ if not has_lib_cs:
+ module.fail_json(msg="python library cs required: pip install cs")
+
self.module = module
self._connect()
From 3e7d959c9d398d5cbe02b72d4717d86cc45b310a Mon Sep 17 00:00:00 2001
From: Rene Moser
Date: Thu, 26 Mar 2015 15:39:02 +0100
Subject: [PATCH 0182/3617] cloudstack: module utils are BSD licensed
---
lib/ansible/module_utils/cloudstack.py | 30 +++++++++++++++++---------
1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py
index ab72f2c7894157..f72d270d30b3b7 100644
--- a/lib/ansible/module_utils/cloudstack.py
+++ b/lib/ansible/module_utils/cloudstack.py
@@ -3,19 +3,29 @@
# (c) 2015, René Moser
#
# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
From 5bf9ea629882a9ef58fe37b68d84dd49980450c6 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 26 Mar 2015 11:52:19 -0700
Subject: [PATCH 0183/3617] make sure the shebang we inject into the module is
a str
Fixes #8564
---
lib/ansible/module_common.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py
index 5e3732e9677bac..2ee23c90b774a3 100644
--- a/lib/ansible/module_common.py
+++ b/lib/ansible/module_common.py
@@ -26,6 +26,7 @@
from ansible import utils
from ansible import constants as C
from ansible import __version__
+from asnible.utils.unicode import to_bytes
REPLACER = "#<>"
REPLACER_ARGS = "\"<>\""
@@ -184,7 +185,8 @@ def modify_module(self, module_path, complex_args, module_args, inject):
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in inject:
- lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
+ interpreter = to_bytes(inject[interpreter_config], errors='strict')
+ lines[0] = shebang = "#!%s %s" % (interpreter, " ".join(args[1:]))
module_data = "\n".join(lines)
return (module_data, module_style, shebang)
From ea2d00c5585a474b67f5031f689c143974eb9dc9 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 26 Mar 2015 11:57:27 -0700
Subject: [PATCH 0184/3617] v2 equivalent for
https://github.com/ansible/ansible/pull/8564
Looks like there's currently no code for the ansible_*_interpreter but
modified the note abouot adding it
---
v2/ansible/executor/module_common.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py
index 9f878fb6b02b56..7c76fd7427d363 100644
--- a/v2/ansible/executor/module_common.py
+++ b/v2/ansible/executor/module_common.py
@@ -165,23 +165,25 @@ def modify_module(module_path, module_args, strip_comments=False):
# facility = inject['ansible_syslog_facility']
# module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
- lines = module_data.split("\n", 1)
+ lines = module_data.split(b"\n", 1)
shebang = None
- if lines[0].startswith("#!"):
+ if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
# FIXME: more inject stuff here...
+ #from ansible.utils.unicode import to_bytes
#if interpreter_config in inject:
- # lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
+ # interpreter = to_bytes(inject[interpreter_config], errors='strict')
+ # lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
lines.insert(1, ENCODING_STRING)
else:
lines.insert(0, ENCODING_STRING)
- module_data = "\n".join(lines)
+ module_data = b"\n".join(lines)
return (module_data, module_style, shebang)
From 0ec1b025a912c7c487083f87ae3ea87b7267dab6 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 26 Mar 2015 11:59:53 -0700
Subject: [PATCH 0185/3617] Update the module pointers
---
lib/ansible/modules/core | 2 +-
lib/ansible/modules/extras | 2 +-
v2/ansible/modules/extras | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 7683f36613ec09..5d776936cc67b2 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 7683f36613ec0904618b9b2d07f215b3f028a4e0
+Subproject commit 5d776936cc67b2f43d6be9630872595243213fb0
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index cb848fcd9ec836..400166a655b304 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit cb848fcd9ec8364210fc05a5a7addd955b8a2529
+Subproject commit 400166a655b304094005aace178d0fab1cfe9763
diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras
index 46e316a20a92b5..400166a655b304 160000
--- a/v2/ansible/modules/extras
+++ b/v2/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit 46e316a20a92b5a54b982eddb301eb3d57da397e
+Subproject commit 400166a655b304094005aace178d0fab1cfe9763
From b7936009c2bc279e1175da8ec39eb5143f753204 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 26 Mar 2015 12:09:36 -0700
Subject: [PATCH 0186/3617] Correct typo
---
lib/ansible/module_common.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py
index 2ee23c90b774a3..118c757f8dcae1 100644
--- a/lib/ansible/module_common.py
+++ b/lib/ansible/module_common.py
@@ -26,7 +26,7 @@
from ansible import utils
from ansible import constants as C
from ansible import __version__
-from asnible.utils.unicode import to_bytes
+from ansible.utils.unicode import to_bytes
REPLACER = "#<>"
REPLACER_ARGS = "\"<>\""
From 7b63a5799343c9a79679388416be99e1ef671a52 Mon Sep 17 00:00:00 2001
From: deimosfr
Date: Thu, 26 Mar 2015 21:40:36 +0100
Subject: [PATCH 0187/3617] fix consul inventory issue (missing method param)
---
plugins/inventory/consul_io.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugins/inventory/consul_io.py b/plugins/inventory/consul_io.py
index 46d47fd3bf5456..e0ff3fbbebd675 100755
--- a/plugins/inventory/consul_io.py
+++ b/plugins/inventory/consul_io.py
@@ -212,7 +212,7 @@ def load_data_for_node(self, node, datacenter):
'''loads the data for a sinle node adding it to various groups based on
metadata retrieved from the kv store and service availablity'''
- index, node_data = self.consul_api.catalog.node(node, datacenter)
+ index, node_data = self.consul_api.catalog.node(node, dc=datacenter)
node = node_data['Node']
self.add_node_to_map(self.nodes, 'all', node)
self.add_metadata(node_data, "consul_datacenter", datacenter)
From bc2e6d4d0eb6dd213abc4f179376922d41a0795d Mon Sep 17 00:00:00 2001
From: jxn
Date: Thu, 26 Mar 2015 20:00:52 -0500
Subject: [PATCH 0188/3617] Fix a few spelling errors in the changelog
---
CHANGELOG.md | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 38c09d0b59d268..10a9ca16048885 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,7 +4,7 @@ Ansible Changes By Release
## 2.0 "TBD" - ACTIVE DEVELOPMENT
Major Changes:
- big_ip modules now support turning off ssl certificat validation (use only for self signed)
+ big_ip modules now support turning off ssl certificate validation (use only for self signed)
New Modules:
cloudtrail
@@ -63,8 +63,8 @@ New Modules:
New Filters:
-* ternary: allows for trueval/falseval assignement dependint on conditional
-* cartesian: returns the cartesian product of 2 lists
+* ternary: allows for trueval/falseval assignment dependent on conditional
+* cartesian: returns the Cartesian product of 2 lists
* to_uuid: given a string it will return an ansible domain specific UUID
* checksum: uses the ansible internal checksum to return a hash from a string
* hash: get a hash from a string (md5, sha1, etc)
@@ -93,14 +93,14 @@ Other Notable Changes:
* restart_policy parameters to configure when the container automatically restarts
* If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option
* Add insecure_registry parameter for connecting to registries via http
- * New parameter to set a container's domainname
+ * New parameter to set a container's domain name
* Undeprecated docker_image module until there's replacement functionality
* Allow setting the container's pid namespace
* Add a pull parameter that chooses when ansible will look for more recent images in the registry
* docker module states have been greatly enhanced. The reworked and new states are:
* present now creates but does not start containers
* restarted always restarts a container
- * reloaded restarts a container if ansible detects that the configuration is different than what is spcified
+ * reloaded restarts a container if ansible detects that the configuration is different than what is specified
* reloaded accounts for exposed ports, env vars, and volumes
* Can now connect to the docker server using TLS
* Several source control modules had force parameters that defaulted to true.
From e964439b990dd6695d1ee5c5d977d9e053edfcc4 Mon Sep 17 00:00:00 2001
From: kristous
Date: Fri, 27 Mar 2015 07:47:20 +0100
Subject: [PATCH 0189/3617] Update README.md
to get debuild you need to install devscripts
---
packaging/debian/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/packaging/debian/README.md b/packaging/debian/README.md
index 715084380d76aa..62c6af084c02d3 100644
--- a/packaging/debian/README.md
+++ b/packaging/debian/README.md
@@ -4,7 +4,7 @@ Ansible Debian Package
To create an Ansible DEB package:
sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools sshpass
- sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc
+ sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc devscripts
git clone git://github.com/ansible/ansible.git
cd ansible
make deb
From 576832e4c9224caaed8826f83e3b12a430e68277 Mon Sep 17 00:00:00 2001
From: Kim Johansson
Date: Fri, 27 Mar 2015 10:46:01 +0100
Subject: [PATCH 0190/3617] Always define error before using it
When the error reason is "Forbidden", the code throws a Python exception
rather than simply outputting the exception reason.
It's not nice to throw a Python exception when all the info to display
a proper message is available.
---
plugins/inventory/ec2.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py
index 5f7bd061d7210d..e93df1053d1e53 100755
--- a/plugins/inventory/ec2.py
+++ b/plugins/inventory/ec2.py
@@ -382,6 +382,8 @@ def get_rds_instances_by_region(self, region):
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError, e:
+ error = e.reason
+
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
From 5ec1f3bd6ed226c63436d6ad7682f2a09d0a636a Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 27 Mar 2015 08:45:04 -0400
Subject: [PATCH 0191/3617] removed folding sudo/su to become logic from
constants as it is already present downstream in playbook/play/tasks
---
lib/ansible/constants.py | 8 ++++----
v2/ansible/constants.py | 6 +++---
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 20079863e7d636..71efefdbc383da 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -112,7 +112,6 @@ def shell_expand_path(path):
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
@@ -123,6 +122,7 @@ def shell_expand_path(path):
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
+DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
@@ -139,10 +139,10 @@ def shell_expand_path(path):
#TODO: get rid of ternary chain mess
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
-DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True)
+DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root')
-DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True)
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
+DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# need to rethink impementing these 2
DEFAULT_BECOME_EXE = None
#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo')
diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py
index f2da07ffb02059..72b571ebb8034e 100644
--- a/v2/ansible/constants.py
+++ b/v2/ansible/constants.py
@@ -145,10 +145,10 @@ def shell_expand_path(path):
#TODO: get rid of ternary chain mess
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
-DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True)
+DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root')
-DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True)
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', None)
+DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# need to rethink impementing these 2
DEFAULT_BECOME_EXE = None
#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo')
From 104b2036f77727766c9d0e537591c4fbec8bd7f8 Mon Sep 17 00:00:00 2001
From: Matt Martz
Date: Fri, 27 Mar 2015 12:03:20 -0500
Subject: [PATCH 0192/3617] egg_info is now written directly to lib
---
hacking/env-setup | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/hacking/env-setup b/hacking/env-setup
index f52c91a8b9cf8c..49390dfe5e0964 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -42,11 +42,10 @@ expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_M
# Do the work in a function so we don't repeat ourselves later
gen_egg_info()
{
- python setup.py egg_info
if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then
rm -r "$PREFIX_PYTHONPATH/ansible.egg-info"
fi
- mv "ansible.egg-info" "$PREFIX_PYTHONPATH"
+ python setup.py egg_info
}
if [ "$ANSIBLE_HOME" != "$PWD" ] ; then
From 35a2ca8a5db25eb3280c51e3342b8c05719d9b0a Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 27 Mar 2015 15:41:02 -0400
Subject: [PATCH 0193/3617] made sequence more flexible, can handle descending
and negative sequences and is skipped if start==end
---
lib/ansible/runner/lookup_plugins/sequence.py | 20 +++++++++++++------
1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/lib/ansible/runner/lookup_plugins/sequence.py
index b162b3069e7d57..13891343b1a053 100644
--- a/lib/ansible/runner/lookup_plugins/sequence.py
+++ b/lib/ansible/runner/lookup_plugins/sequence.py
@@ -151,10 +151,17 @@ def sanity_check(self):
)
elif self.count is not None:
# convert count to end
- self.end = self.start + self.count * self.stride - 1
+ if self.count != 0:
+ self.end = self.start + self.count * self.stride - 1
+ else:
+ self.start = 0
+ self.end = 0
+ self.stride = 0
del self.count
- if self.end < self.start:
- raise AnsibleError("can't count backwards")
+ if self.stride > 0 and self.end < self.start:
+ raise AnsibleError("to count backwards make stride negative")
+ if self.stride < 0 and self.end > self.start:
+ raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
@@ -193,12 +200,13 @@ def run(self, terms, inject=None, **kwargs):
self.sanity_check()
- results.extend(self.generate_sequence())
+ if self.start != self.end:
+ results.extend(self.generate_sequence())
except AnsibleError:
raise
- except Exception:
+ except Exception, e:
raise AnsibleError(
- "unknown error generating sequence"
+ "unknown error generating sequence: %s" % str(e)
)
return results
From 662b35cbce50b43f542750451fd35d58bfa2ffd9 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 27 Mar 2015 18:30:42 -0400
Subject: [PATCH 0194/3617] readded sudo/su vars to allow role/includes to work
with passed sudo/su
---
lib/ansible/playbook/play.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index edec30df758651..a24c5fff1b5036 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -577,7 +577,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, bec
# evaluate privilege escalation vars for current and child tasks
included_become_vars = {}
- for k in ["become", "become_user", "become_method", "become_exe"]:
+ for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
if k in x:
included_become_vars[k] = x[k]
elif k in become_vars:
From c90e3f0d16d5cc365240d772e90c507b45b940e5 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sun, 29 Mar 2015 15:58:56 -0400
Subject: [PATCH 0195/3617] small updates to community and contribution page
---
docsite/rst/community.rst | 112 +++++++++++++++++++++++---------------
1 file changed, 69 insertions(+), 43 deletions(-)
diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst
index 4d2de28ce16d14..f33109337dbe27 100644
--- a/docsite/rst/community.rst
+++ b/docsite/rst/community.rst
@@ -132,39 +132,63 @@ Modules are some of the easiest places to get started.
Contributing Code (Features or Bugfixes)
----------------------------------------
-The Ansible project keeps its source on github at
-`github.com/ansible/ansible `_ for the core application, and two sub repos ansible/ansible-modules-core and ansible/ansible-modules-extras for module related items. If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module.
+The Ansible project keeps its source on github at `github.com/ansible/ansible `_ for
+the core application, and two sub repos `github.com/ansible/ansible-modules-core `_
+and `ansible/ansible-modules-extras `_ for module related items.
+If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module.
-The project takes contributions through
-`github pull requests `_.
+The project takes contributions through `github pull requests `_.
-It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission, and this especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request for the first time, that revisions are needed. (This is not usually needed for module development, but can be nice for large changes).
+It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission,
+and this especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request
+for the first time, that revisions are needed. (This is not usually needed for module development, but can be nice for large changes).
Note that we do keep Ansible to a particular aesthetic, so if you are unclear about whether a feature
is a good fit or not, having the discussion on the development list is often a lot easier than having
to modify a pull request later.
-When submitting patches, be sure to run the unit tests first “make tests” and always use
-“git rebase” vs “git merge” (aliasing git pull to git pull --rebase is a great idea) to
-avoid merge commits in your submissions. There are also integration tests that can be run in the "test/integration" directory.
+When submitting patches, be sure to run the unit tests first “make tests” and always use, these are the same basic
+tests that will automatically run on Travis when creating the PR. There are more in depth tests in the tests/integration
+directory, classified as destructive and non_destructive, run these if they pertain to your modification. They are setup
+with tags so you can run subsets, some of the tests requrie cloud credentials and will only run if they are provided.
+When adding new features of fixing bugs it would be nice to add new tests to avoid regressions.
-In order to keep the history clean and better audit incoming code, we will require resubmission of pull requests that contain merge commits. Use "git pull --rebase" vs "git pull" and "git rebase" vs "git merge". Also be sure to use topic branches to keep your additions on different branches, such that they won't pick up stray commits later.
+Use “git rebase” vs “git merge” (aliasing git pull to git pull --rebase is a great idea) to avoid merge commits in
+your submissions. There are also integration tests that can be run in the "test/integration" directory.
-We’ll then review your contributions and engage with you about questions and so on.
+In order to keep the history clean and better audit incoming code, we will require resubmission of pull requests that
+contain merge commits. Use "git pull --rebase" vs "git pull" and "git rebase" vs "git merge". Also be sure to use topic
+branches to keep your additions on different branches, such that they won't pick up stray commits later.
-As we have a very large and active community, so it may take awhile to get your contributions
+If you make a mistake you do not need to close your PR, create a clean branch locally and then push to github
+with --force to overwrite the existing branch (permissible in this case as no one else should be using that
+branch as reference). Code comments won't be lost, they just won't be attached to the existing branch.
+
+We’ll then review your contributions and engage with you about questions and so on.
+
+As we have a very large and active community, so it may take awhile to get your contributions
in! See the notes about priorities in a later section for understanding our work queue.
+Be patient, your request might not get merged right away, we also try to keep the devel branch more
+or less usable so we like to examine Pull requests carefully, which takes time.
-Patches should be made against the 'devel' branch.
+Patches should always be made against the 'devel' branch.
-Contributions can be for new features like modules, or to fix bugs you or others have found. If you
-are interested in writing new modules to be included in the core Ansible distribution, please refer
+Keep in mind that small and focused requests are easier to examine and accept, having example cases
+also help us understand the utility of a bug fix or a new feature.
+
+Contributions can be for new features like modules, or to fix bugs you or others have found. If you
+are interested in writing new modules to be included in the core Ansible distribution, please refer
to the `module development documentation `_.
-Ansible's aesthetic encourages simple, readable code and consistent, conservatively extending,
-backwards-compatible improvements. Code developed for Ansible needs to support Python 2.6+,
+Ansible's aesthetic encourages simple, readable code and consistent, conservatively extending,
+backwards-compatible improvements. Code developed for Ansible needs to support Python 2.6+,
while code in modules must run under Python 2.4 or higher. Please also use a 4-space indent
-and no tabs.
+and no tabs, we do not enforce 80 column lines, we are fine wtih 120-140. We do not take 'style only'
+requests unless the code is nearly unreadable, we are "PEP8ish", but not strictly compliant.
+
+You can also contribute by testing and revising other requests, specially if it is one you are interested
+in using. Please keep your comments clear and to the point, courteous and constructive, tickets are not a
+good place to start discussions (ansible-devel and IRC exist for this).
Tip: To easily run from a checkout, source "./hacking/env-setup" and that's it -- no install
required. You're now live!
@@ -175,32 +199,34 @@ Other Topics
Ansible Staff
-------------
-Ansible, Inc is a company supporting Ansible and building additional solutions based on
-Ansible. We also do services and support for those that are interested.
+Ansible, Inc is a company supporting Ansible and building additional solutions based on
+Ansible. We also do services and support for those that are interested. We also offer an
+enterprise web front end to Ansible (see Tower below).
-Our most
-important task however is enabling all the great things that happen in the Ansible
+Our most important task however is enabling all the great things that happen in the Ansible
community, including organizing software releases of Ansible. For more information about
any of these things, contact info@ansible.com
-On IRC, you can find us as mdehaan, jimi_c, abadger1999, Tybstar, and others. On the mailing list,
+On IRC, you can find us as jimi_c, abadger1999, Tybstar, bcoca, and others. On the mailing list,
we post with an @ansible.com address.
Mailing List Information
------------------------
-Ansible has several mailing lists. Your first post to the mailing list will be
+Ansible has several mailing lists. Your first post to the mailing list will be
moderated (to reduce spam), so please allow a day or less for your first post.
-`Ansible Project List `_ is for sharing Ansible Tips, answering questions, and general user discussion.
+`Ansible Project List `_ is for sharing Ansible Tips,
+answering questions, and general user discussion.
-`Ansible Development List `_ is for learning how to develop on Ansible, asking about prospective feature design, or discussions
-about extending ansible or features in progress.
+`Ansible Development List `_ is for learning how to develop on Ansible,
+asking about prospective feature design, or discussions about extending ansible or features in progress.
-`Ansible Announce list `_ is a read-only list that shares information about new releases of Ansible, and also rare infrequent
-event information, such as announcements about an AnsibleFest coming up, which is our official conference series.
+`Ansible Announce list `_ is a read-only list that shares information
+about new releases of Ansible, and also rare infrequent event information, such as announcements about an AnsibleFest coming up,
+which is our official conference series.
-To subscribe to a group from a non-google account, you can email the subscription address, for
+To subscribe to a group from a non-google account, you can email the subscription address, for
example ansible-devel+subscribe@googlegroups.com.
Release Numbering
@@ -208,9 +234,9 @@ Release Numbering
Releases ending in ".0" are major releases and this is where all new features land. Releases ending
in another integer, like "0.X.1" and "0.X.2" are dot releases, and these are only going to contain
-bugfixes.
+bugfixes.
-Typically we don't do dot releases for minor bugfixes (reserving these for larger items),
+Typically we don't do dot releases for minor bugfixes (reserving these for larger items),
but may occasionally decide to cut dot releases containing a large number of smaller fixes if it's still a fairly long time before
the next release comes out.
@@ -219,7 +245,7 @@ Releases are also given code names based on Van Halen songs, that no one really
Tower Support Questions
-----------------------
-Ansible `Tower `_ is a UI, Server, and REST endpoint for Ansible, produced by Ansible, Inc.
+Ansible `Tower `_ is a UI, Server, and REST endpoint for Ansible, produced by Ansible, Inc.
If you have a question about tower, email `support@ansible.com `_ rather than using the IRC
channel or the general project mailing list.
@@ -227,7 +253,7 @@ channel or the general project mailing list.
IRC Channel
-----------
-Ansible has an IRC channel #ansible on irc.freenode.net.
+Ansible has an IRC channel #ansible on irc.freenode.net.
Notes on Priority Flags
-----------------------
@@ -241,10 +267,10 @@ As a result, we have a LOT of incoming activity to process.
In the interest of transparency, we're telling you how we sort incoming requests.
In our bug tracker you'll notice some labels - P1, P2, P3, P4, and P5. These are our internal
-priority orders that we use to sort tickets.
+priority orders that we use to sort tickets.
-With some exceptions for easy merges (like documentation typos for instance),
-we're going to spend most of our time working on P1 and P2 items first, including pull requests.
+With some exceptions for easy merges (like documentation typos for instance),
+we're going to spend most of our time working on P1 and P2 items first, including pull requests.
These usually relate to important bugs or features affecting large segments of the userbase. So if you see something categorized
"P3 or P4", and it's not appearing to get a lot of immediate attention, this is why.
@@ -264,18 +290,18 @@ is help close P2 bug reports.
Community Code of Conduct
-------------------------
-Ansible’s community welcomes users of all types, backgrounds, and skill levels. Please
-treat others as you expect to be treated, keep discussions positive, and avoid discrimination of all kinds, profanity, allegations of Cthulhu worship, or engaging in controversial debates (except vi vs emacs is cool).
+Ansible’s community welcomes users of all types, backgrounds, and skill levels. Please treat others as you expect to be treated,
+keep discussions positive, and avoid discrimination of all kinds, profanity, allegations of Cthulhu worship, or engaging in
+controversial debates (except vi vs emacs is cool).
The same expectations apply to community events as they do to online interactions.
-Posts to mailing lists should remain focused around Ansible and IT automation. Abuse of these community guidelines will not be tolerated and may result in banning from community resources.
+Posts to mailing lists should remain focused around Ansible and IT automation. Abuse of these community guidelines will not be
+tolerated and may result in banning from community resources.
Contributors License Agreement
------------------------------
-By contributing you agree that these contributions are your own (or approved by your employer)
-and you grant a full, complete, irrevocable
-copyright license to all users and developers of the project, present and future, pursuant
-to the license of the project.
+By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable
+copyright license to all users and developers of the project, present and future, pursuant to the license of the project.
From 3afc54d298ad08d24e0c803c4bb98dde124f1d07 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sun, 29 Mar 2015 16:51:11 -0400
Subject: [PATCH 0196/3617] added zabbix modules to changelog
---
CHANGELOG.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 10a9ca16048885..4dc9219f2a7f85 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,9 @@ New Modules:
cloudtrail
maven_artifact
pushover
+ zabbix_host
+ zabbix_hostmacro
+ zabbix_screen
vertica_configuration
vertica_facts
vertica_role
From 3a70affb9aa8ff78f3ff33fc21d1095fdc1b911d Mon Sep 17 00:00:00 2001
From: joefis
Date: Mon, 30 Mar 2015 16:39:09 +0100
Subject: [PATCH 0197/3617] Vagrant inventory: exit 0 on success
Current code has sys.exit(1) at the end of the codepath for the
options --help, --list and --host. These are not error conditions
so should be returning 0 for success, not 1 which is EPERM i.e.
"Operation not permitted". Newer Vagrant versions examine the exit
codes from subprocesses and interpret this as a failure.
---
plugins/inventory/vagrant.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/plugins/inventory/vagrant.py b/plugins/inventory/vagrant.py
index ea59a7bc02364b..7f6dc925e83fca 100755
--- a/plugins/inventory/vagrant.py
+++ b/plugins/inventory/vagrant.py
@@ -107,7 +107,7 @@ def get_a_ssh_config(box_name):
hosts['vagrant'].append(data['HostName'])
print json.dumps(hosts)
- sys.exit(1)
+ sys.exit(0)
# Get out the host details
#------------------------------
@@ -122,11 +122,11 @@ def get_a_ssh_config(box_name):
result['ansible_ssh_port'] = result['Port']
print json.dumps(result)
- sys.exit(1)
+ sys.exit(0)
# Print out help
#------------------------------
else:
parser.print_help()
- sys.exit(1)
\ No newline at end of file
+ sys.exit(0)
From 2a8a302e7ecef0b47cfd851b3e273a3b199f466c Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 30 Mar 2015 20:34:17 -0400
Subject: [PATCH 0198/3617] fixed corner case when counting backwards, added
test cases for count=0 and backwards counts
---
lib/ansible/runner/lookup_plugins/sequence.py | 6 +++++-
.../roles/test_iterators/tasks/main.yml | 18 ++++++++++++++++++
2 files changed, 23 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/lib/ansible/runner/lookup_plugins/sequence.py
index 13891343b1a053..68b0bbec90d6a0 100644
--- a/lib/ansible/runner/lookup_plugins/sequence.py
+++ b/lib/ansible/runner/lookup_plugins/sequence.py
@@ -166,7 +166,11 @@ def sanity_check(self):
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
- numbers = xrange(self.start, self.end + 1, self.stride)
+ if self.stride > 0:
+ adjust = 1
+ else:
+ adjust = -1
+ numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
diff --git a/test/integration/roles/test_iterators/tasks/main.yml b/test/integration/roles/test_iterators/tasks/main.yml
index c95eaff3da4739..b9592aba2f7ed1 100644
--- a/test/integration/roles/test_iterators/tasks/main.yml
+++ b/test/integration/roles/test_iterators/tasks/main.yml
@@ -60,6 +60,10 @@
set_fact: "{{ 'x' + item }}={{ item }}"
with_sequence: start=0 end=3
+- name: test with_sequence backwards
+ set_fact: "{{ 'y' + item }}={{ item }}"
+ with_sequence: start=3 end=0 stride=-1
+
- name: verify with_sequence
assert:
that:
@@ -67,6 +71,20 @@
- "x1 == '1'"
- "x2 == '2'"
- "x3 == '3'"
+ - "y3 == '3'"
+ - "y2 == '2'"
+ - "y1 == '1'"
+ - "y0 == '0'"
+
+- name: test with_sequence not failing on count == 0
+ debug: msg='previously failed with backward counting error'
+ with_sequence: count=0
+ register: count_of_zero
+
+- assert:
+ that:
+ - count_of_zero | skipped
+ - not count_of_zero | failed
# WITH_RANDOM_CHOICE
From 68880a797d226a410c4278bb8a11ad809bb99abe Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 26 Mar 2015 12:15:16 -0700
Subject: [PATCH 0199/3617] Update core to fix cloudformation problem
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 5d776936cc67b2..7e7eafb3e31ad0 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 5d776936cc67b2f43d6be9630872595243213fb0
+Subproject commit 7e7eafb3e31ad03b255c633460766e8c93616e65
From dc9b36ccb0d78b707364e29ea67ae7560b12a7bb Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 27 Mar 2015 07:48:26 -0700
Subject: [PATCH 0200/3617] Some notes on optimizing module_replacer
---
v2/ansible/executor/module_common.py | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py
index 7c76fd7427d363..23890d64e61a69 100644
--- a/v2/ansible/executor/module_common.py
+++ b/v2/ansible/executor/module_common.py
@@ -140,6 +140,16 @@ def modify_module(module_path, module_args, strip_comments=False):
which results in the inclusion of the common code from powershell.ps1
"""
+ ### TODO: Optimization ideas if this code is actually a source of slowness:
+ # * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)
+ # * Use pyminifier if installed
+ # * comment stripping/pyminifier needs to have config setting to turn it
+ # off for debugging purposes (goes along with keep remote but should be
+ # separate otherwise users wouldn't be able to get info on what the
+ # minifier output)
+ # * Only split into lines and recombine into strings once
+ # * Cache the modified module? If only the args are different and we do
+ # that as the last step we could cache sll the work up to that point.
with open(module_path) as f:
From ce512e18f0254b54e941bf863214d5a1caab0ad1 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 27 Mar 2015 09:06:07 -0700
Subject: [PATCH 0201/3617] Remove fireball connection plugin. v2 will have
accelerate but not fireball
---
v2/ansible/plugins/connections/fireball.py | 151 ---------------------
1 file changed, 151 deletions(-)
delete mode 100644 v2/ansible/plugins/connections/fireball.py
diff --git a/v2/ansible/plugins/connections/fireball.py b/v2/ansible/plugins/connections/fireball.py
deleted file mode 100644
index dd9e09bacda6d6..00000000000000
--- a/v2/ansible/plugins/connections/fireball.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# (c) 2012, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-import json
-import os
-import base64
-from ansible.callbacks import vvv
-from ansible import utils
-from ansible import errors
-from ansible import constants
-
-HAVE_ZMQ=False
-
-try:
- import zmq
- HAVE_ZMQ=True
-except ImportError:
- pass
-
-class Connection(object):
- ''' ZeroMQ accelerated connection '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
-
- self.runner = runner
- self.has_pipelining = False
-
- # attempt to work around shared-memory funness
- if getattr(self.runner, 'aes_keys', None):
- utils.AES_KEYS = self.runner.aes_keys
-
- self.host = host
- self.key = utils.key_for_hostname(host)
- self.context = None
- self.socket = None
-
- if port is None:
- self.port = constants.ZEROMQ_PORT
- else:
- self.port = port
-
- def connect(self):
- ''' activates the connection object '''
-
- if not HAVE_ZMQ:
- raise errors.AnsibleError("zmq is not installed")
-
- # this is rough/temporary and will likely be optimized later ...
- self.context = zmq.Context()
- socket = self.context.socket(zmq.REQ)
- addr = "tcp://%s:%s" % (self.host, self.port)
- socket.connect(addr)
- self.socket = socket
-
- return self
-
- def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=None):
- ''' run a command on the remote host '''
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- vvv("EXEC COMMAND %s" % cmd)
-
- if (self.runner.sudo and sudoable) or (self.runner.su and su):
- raise errors.AnsibleError(
- "When using fireball, do not specify sudo or su to run your tasks. " +
- "Instead sudo the fireball action with sudo. " +
- "Task will communicate with the fireball already running in sudo mode."
- )
-
- data = dict(
- mode='command',
- cmd=cmd,
- tmp_path=tmp_path,
- executable=executable,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
-
- def put_file(self, in_path, out_path):
-
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- data = file(in_path).read()
- data = base64.b64encode(data)
-
- data = dict(mode='put', data=data, out_path=out_path)
- # TODO: support chunked file transfer
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- # no meaningful response needed for this
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- data = dict(mode='fetch', in_path=in_path)
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- response = response['data']
- response = base64.b64decode(response)
-
- fh = open(out_path, "w")
- fh.write(response)
- fh.close()
-
- def close(self):
- ''' terminate the connection '''
- # Be a good citizen
- try:
- self.socket.close()
- self.context.term()
- except:
- pass
-
From 4aa3ac41a14099af41c39323d6a102b584c0f785 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 27 Mar 2015 12:19:23 -0700
Subject: [PATCH 0202/3617] Port sivel's fix for egg_info (#10563) to v2
---
v2/hacking/env-setup | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/v2/hacking/env-setup b/v2/hacking/env-setup
index c03fa0874e1ef7..8f2c331fe46927 100644
--- a/v2/hacking/env-setup
+++ b/v2/hacking/env-setup
@@ -42,11 +42,10 @@ expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_M
# Do the work in a function so we don't repeat ourselves later
gen_egg_info()
{
- python setup.py egg_info
if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then
rm -r "$PREFIX_PYTHONPATH/ansible.egg-info"
fi
- mv "ansible.egg-info" "$PREFIX_PYTHONPATH"
+ python setup.py egg_info
}
if [ "$ANSIBLE_HOME" != "$PWD" ] ; then
From 1cc2135a0d8400952ef0ee9631f6a07db6d93058 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 30 Mar 2015 12:45:04 -0700
Subject: [PATCH 0203/3617] Fix no closed parens
---
v2/ansible/plugins/action/copy.py | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py
index 088a806b61b0ae..09990743bb7ad2 100644
--- a/v2/ansible/plugins/action/copy.py
+++ b/v2/ansible/plugins/action/copy.py
@@ -31,12 +31,17 @@
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum
+### FIXME: Find a different way to fix 3518 as sys.defaultencoding() breaks
+# the python interpreter in subtle ways. It appears that this does not fix
+# 3518 anyway (using binary files via lookup(). Instead, it tries to fix
+# utf-8 strings in the content parameter. That should be fixable by properly
+# encoding or decoding the value before we write it to a file.
+#
## fixes https://github.com/ansible/ansible/issues/3518
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
+#import sys
+#reload(sys)
+#sys.setdefaultencoding("utf8")
class ActionModule(ActionBase):
@@ -231,7 +236,7 @@ def run(self, tmp=None, task_vars=dict()):
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
- if (self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:
From 43c1a9744765eebfb9eaf9113336d552cfc9096b Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 30 Mar 2015 19:19:34 -0700
Subject: [PATCH 0204/3617] Various unicode and backslash escape cleanups
* Do backslash escape parsing in parse_kv() [was being done in the copy
module purely for newlines in the copy module's content param before]
* Make parse_kv always return unicode
* Add bandaid to transform args to unicode until we can fix things
calling parse_kv to always send it unicode.
* Make split_args deal with unicode internally. Warning, no bandaid for
things calling split_args without giving it unicode (shouldn't matter
as dealt with str internally before)
* Fix copy and unarchive action plugins to not use setdefaultencoding
* Remove escaping from copy (it was broken and made content into latin-1
sometimes). escaping is now in parse_kv.
* Expect that content is now a unicode string so transform to bytes just
before writing to the file.
* Add initial unittests for split_args and parse_kv. 4 failing
tests.because split_args is injecting extra newlines.
---
v2/ansible/parsing/splitter.py | 42 +++++++---
v2/ansible/plugins/action/copy.py | 28 +------
v2/ansible/plugins/action/unarchive.py | 8 +-
v2/test/parsing/test_splitter.py | 109 +++++++++++++++++++++++++
4 files changed, 143 insertions(+), 44 deletions(-)
create mode 100644 v2/test/parsing/test_splitter.py
diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py
index 9705baf169d6a9..4af1c7b171e11f 100644
--- a/v2/ansible/parsing/splitter.py
+++ b/v2/ansible/parsing/splitter.py
@@ -19,6 +19,27 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import re
+import codecs
+
+# Decode escapes adapted from rspeer's answer here:
+# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
+_HEXCHAR = '[a-fA-F0-9]'
+_ESCAPE_SEQUENCE_RE = re.compile(r'''
+ ( \\U{0} # 8-digit hex escapes
+ | \\u{1} # 4-digit hex escapes
+ | \\x{2} # 2-digit hex escapes
+ | \\[0-7]{{1,3}} # Octal escapes
+ | \\N\{{[^}}]+\}} # Unicode characters by name
+ | \\[\\'"abfnrtv] # Single-character escapes
+ )'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
+
+def _decode_escapes(s):
+ def decode_match(match):
+ return codecs.decode(match.group(0), 'unicode-escape')
+
+ return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
+
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
@@ -27,6 +48,10 @@ def parse_kv(args, check_raw=False):
they will simply be ignored.
'''
+ ### FIXME: args should already be a unicode string
+ from ansible.utils.unicode import to_unicode
+ args = to_unicode(args, nonstring='passthru')
+
options = {}
if args is not None:
try:
@@ -39,6 +64,7 @@ def parse_kv(args, check_raw=False):
raw_params = []
for x in vargs:
+ x = _decode_escapes(x)
if "=" in x:
pos = 0
try:
@@ -72,7 +98,7 @@ def parse_kv(args, check_raw=False):
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
- options['_raw_params'] = ' '.join(raw_params)
+ options[u'_raw_params'] = ' '.join(raw_params)
return options
@@ -126,17 +152,11 @@ def split_args(args):
'''
# the list of params parsed out of the arg string
- # this is going to be the result value when we are donei
+ # this is going to be the result value when we are done
params = []
- # here we encode the args, so we have a uniform charset to
- # work with, and split on white space
+ # Initial split on white space
args = args.strip()
- try:
- args = args.encode('utf-8')
- do_decode = True
- except UnicodeDecodeError:
- do_decode = False
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
@@ -242,10 +262,6 @@ def split_args(args):
if print_depth or block_depth or comment_depth or inside_quotes:
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
- # finally, we decode each param back to the unicode it was in the arg string
- if do_decode:
- params = [x.decode('utf-8') for x in params]
-
return params
def is_quoted(data):
diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py
index 09990743bb7ad2..89c2fde7b3f1e9 100644
--- a/v2/ansible/plugins/action/copy.py
+++ b/v2/ansible/plugins/action/copy.py
@@ -30,18 +30,7 @@
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum
-
-### FIXME: Find a different way to fix 3518 as sys.defaultencoding() breaks
-# the python interpreter in subtle ways. It appears that this does not fix
-# 3518 anyway (using binary files via lookup(). Instead, it tries to fix
-# utf-8 strings in the content parameter. That should be fixable by properly
-# encoding or decoding the value before we write it to a file.
-#
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-#import sys
-#reload(sys)
-#sys.setdefaultencoding("utf8")
+from ansible.utils.unicode import to_bytes
class ActionModule(ActionBase):
@@ -55,16 +44,6 @@ def run(self, tmp=None, task_vars=dict()):
raw = boolean(self._task.args.get('raw', 'no'))
force = boolean(self._task.args.get('force', 'yes'))
- # content with newlines is going to be escaped to safely load in yaml
- # now we need to unescape it so that the newlines are evaluated properly
- # when writing the file to disk
- if content:
- if isinstance(content, unicode):
- try:
- content = content.decode('unicode-escape')
- except UnicodeDecodeError:
- pass
-
# FIXME: first available file needs to be reworked somehow...
#if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
# result=dict(failed=True, msg="src (or content) and dest are required")
@@ -86,7 +65,7 @@ def run(self, tmp=None, task_vars=dict()):
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
- if type(content) is dict:
+ if isinstance(content, dict):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
@@ -316,7 +295,8 @@ def run(self, tmp=None, task_vars=dict()):
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
- f = os.fdopen(fd, 'w')
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
try:
f.write(content)
except Exception, err:
diff --git a/v2/ansible/plugins/action/unarchive.py b/v2/ansible/plugins/action/unarchive.py
index f99d7e28e64e08..1b6cb354f0fdf7 100644
--- a/v2/ansible/plugins/action/unarchive.py
+++ b/v2/ansible/plugins/action/unarchive.py
@@ -17,16 +17,10 @@
# along with Ansible. If not, see .
import os
+import pipes
from ansible.plugins.action import ActionBase
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-import pipes
-
class ActionModule(ActionBase):
diff --git a/v2/test/parsing/test_splitter.py b/v2/test/parsing/test_splitter.py
new file mode 100644
index 00000000000000..fc2c05d36fb1f0
--- /dev/null
+++ b/v2/test/parsing/test_splitter.py
@@ -0,0 +1,109 @@
+# coding: utf-8
+# (c) 2015, Toshio Kuratomi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from nose import tools
+from ansible.compat.tests import unittest
+
+from ansible.parsing.splitter import split_args, parse_kv
+
+
+# Tests using nose's test generators cannot use unittest base class.
+# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
+class TestSplitter_Gen:
+ SPLIT_DATA = (
+ (u'a',
+ [u'a'],
+ {u'_raw_params': u'a'}),
+ (u'a=b',
+ [u'a=b'],
+ {u'a': u'b'}),
+ (u'a="foo bar"',
+ [u'a="foo bar"'],
+ {u'a': u'foo bar'}),
+ (u'"foo bar baz"',
+ [u'"foo bar baz"'],
+ {u'_raw_params': '"foo bar baz"'}),
+ (u'foo bar baz',
+ [u'foo', u'bar', u'baz'],
+ {u'_raw_params': u'foo bar baz'}),
+ (u'a=b c="foo bar"',
+ [u'a=b', u'c="foo bar"'],
+ {u'a': u'b', u'c': u'foo bar'}),
+ (u'a="echo \\"hello world\\"" b=bar',
+ [u'a="echo \\"hello world\\""', u'b=bar'],
+ {u'a': u'echo "hello world"', u'b': u'bar'}),
+ (u'a="multi\nline"',
+ [u'a="multi\nline"'],
+ {u'a': u'multi\nline'}),
+ (u'a="blank\n\nline"',
+ [u'a="blank\n\nline"'],
+ {u'a': u'blank\n\nline'}),
+ (u'a="blank\n\n\nlines"',
+ [u'a="blank\n\n\nlines"'],
+ {u'a': u'blank\n\n\nlines'}),
+ (u'a="a long\nmessage\\\nabout a thing\n"',
+ [u'a="a long\nmessage\\\nabout a thing\n"'],
+ {u'a': u'a long\nmessage\\\nabout a thing\n'}),
+ (u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"',
+ [u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'],
+ {u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}),
+ (u'a={{jinja}}',
+ [u'a={{jinja}}'],
+ {u'a': u'{{jinja}}'}),
+ (u'a={{ jinja }}',
+ [u'a={{ jinja }}'],
+ {u'a': u'{{ jinja }}'}),
+ (u'a="{{jinja}}"',
+ [u'a="{{jinja}}"'],
+ {u'a': u'{{jinja}}'}),
+ (u'a={{ jinja }}{{jinja2}}',
+ [u'a={{ jinja }}{{jinja2}}'],
+ {u'a': u'{{ jinja }}{{jinja2}}'}),
+ (u'a="{{ jinja }}{{jinja2}}"',
+ [u'a="{{ jinja }}{{jinja2}}"'],
+ {u'a': u'{{ jinja }}{{jinja2}}'}),
+ (u'a={{jinja}} b={{jinja2}}',
+ [u'a={{jinja}}', u'b={{jinja2}}'],
+ {u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}),
+ (u'a="café eñyei"',
+ [u'a="café eñyei"'],
+ {u'a': u'café eñyei'}),
+ (u'a=café b=eñyei',
+ [u'a=café', u'b=eñyei'],
+ {u'a': u'café', u'b': u'eñyei'}),
+ )
+
+ def check_split_args(self, args, expected):
+ tools.eq_(split_args(args), expected)
+
+ def test_split_args(self):
+ for datapoint in self.SPLIT_DATA:
+ yield self.check_split_args, datapoint[0], datapoint[1]
+
+ def check_parse_kv(self, args, expected):
+ tools.eq_(parse_kv(args), expected)
+
+ def test_parse_kv(self):
+ for datapoint in self.SPLIT_DATA:
+ try:
+ yield self.check_parse_kv, datapoint[0], datapoint[2]
+ except: pass
From 378dc561cbf15ededd5f20d88eb6e173953f4de7 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 30 Mar 2015 22:47:56 -0700
Subject: [PATCH 0205/3617] Possible fix for the first newline and triple
newline problems
---
v2/ansible/parsing/splitter.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py
index 4af1c7b171e11f..a1dc051d24c993 100644
--- a/v2/ansible/parsing/splitter.py
+++ b/v2/ansible/parsing/splitter.py
@@ -211,7 +211,7 @@ def split_args(args):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
- if idx == 0 and not inside_quotes and was_inside_quotes:
+ if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
@@ -251,8 +251,7 @@ def split_args(args):
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
- if not params[-1].endswith('\n'):
- params[-1] += '\n'
+ params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
From f812582d9c3c8b5d69891fb8fcf99b5b8728eac9 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 31 Mar 2015 08:47:30 -0400
Subject: [PATCH 0206/3617] updated submodule refs
---
lib/ansible/modules/core | 2 +-
lib/ansible/modules/extras | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 7e7eafb3e31ad0..bdef699596d48a 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 7e7eafb3e31ad03b255c633460766e8c93616e65
+Subproject commit bdef699596d48a9fd5bb5dad040c9b5e0765bbf6
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index 400166a655b304..7794042cf65b07 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit 400166a655b304094005aace178d0fab1cfe9763
+Subproject commit 7794042cf65b075c9ca9bf4248df994bff94401f
From fd7bf51c1479f07ef4bc2c59f68ee5d412b0c763 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 31 Mar 2015 08:58:18 -0400
Subject: [PATCH 0207/3617] updated changelog with new cloudstack modules
---
CHANGELOG.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4dc9219f2a7f85..06fe0504fc7ea4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,6 +8,9 @@ Major Changes:
New Modules:
cloudtrail
+ cloudstack_fw
+ cloudstack_iso
+ cloudstack_sshkey
maven_artifact
pushover
zabbix_host
From 4919c225e626e41fbf9d28d228768a6fe17b5290 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 31 Mar 2015 09:22:19 -0400
Subject: [PATCH 0208/3617] updated ref so docs can build
---
lib/ansible/modules/core | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index bdef699596d48a..613961c592ed23 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit bdef699596d48a9fd5bb5dad040c9b5e0765bbf6
+Subproject commit 613961c592ed23ded2d7e3771ad45b01de5a95f3
From f337707ef15a2eb70d068751e447d68236b2884d Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 31 Mar 2015 09:43:09 -0400
Subject: [PATCH 0209/3617] updated ref to pickup latest docfixes
---
lib/ansible/modules/extras | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index 7794042cf65b07..eb04e45311683d 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit 7794042cf65b075c9ca9bf4248df994bff94401f
+Subproject commit eb04e45311683dba1d54c8e5db293a2d3877eb68
From 57ed9947661de6b832ced11363f0df8801b27c00 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 31 Mar 2015 13:44:01 -0400
Subject: [PATCH 0210/3617] updated version
---
VERSION | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index 2e0e38c63a62a4..cd5ac039d67e0b 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.9
+2.0
From eb788dd8f62a574f9df8a74b472094e4e28a778e Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Tue, 31 Mar 2015 12:50:47 -0700
Subject: [PATCH 0211/3617] Just move things around so that new_inventory
doesn't interfere with testing
---
v2/ansible/new_inventory/__init__.py | 4 ++--
v2/ansible/{new_inventory => plugins/inventory}/aggregate.py | 0
2 files changed, 2 insertions(+), 2 deletions(-)
rename v2/ansible/{new_inventory => plugins/inventory}/aggregate.py (100%)
diff --git a/v2/ansible/new_inventory/__init__.py b/v2/ansible/new_inventory/__init__.py
index bcf87c9ef874df..b91d9f05a2825f 100644
--- a/v2/ansible/new_inventory/__init__.py
+++ b/v2/ansible/new_inventory/__init__.py
@@ -23,8 +23,8 @@
from ansible import constants as C
from ansible.inventory.group import Group
-from ansible.inventory.host import Host
-from ansible.inventory.aggregate import InventoryAggregateParser
+from .host import Host
+from ansible.plugins.inventory.aggregate import InventoryAggregateParser
class Inventory:
'''
diff --git a/v2/ansible/new_inventory/aggregate.py b/v2/ansible/plugins/inventory/aggregate.py
similarity index 100%
rename from v2/ansible/new_inventory/aggregate.py
rename to v2/ansible/plugins/inventory/aggregate.py
From 90ca3865551b57482e1235d46f66449049e6f6c6 Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Tue, 31 Mar 2015 20:29:06 -0400
Subject: [PATCH 0212/3617] Add api timeout now that shade spports it
everywhere
---
lib/ansible/module_utils/openstack.py | 1 +
lib/ansible/utils/module_docs_fragments/openstack.py | 5 +++++
v2/ansible/module_utils/openstack.py | 1 +
3 files changed, 7 insertions(+)
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index 35b9026213e988..9e4824a301dabc 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -83,6 +83,7 @@ def openstack_full_argument_spec(**kwargs):
key=dict(default=None),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
+ api_timeout=dict(default=None, type='int'),
endpoint_type=dict(
default='public', choices=['public', 'internal', 'admin']
)
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
index 2979cb68d7b95f..5643b4e6accbd7 100644
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ b/lib/ansible/utils/module_docs_fragments/openstack.py
@@ -60,6 +60,11 @@ class ModuleDocFragment(object):
- How long should ansible wait for the requested resource.
required: false
default: 180
+ api_timeout:
+ description:
+ - How long should the socket layer wait before timing out for API calls.
+ If this is omitted, nothing will be passed to the requests library.
+ required: false
verify:
description:
- Whether or not SSL API requests should be verified.
diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py
index 35b9026213e988..9e4824a301dabc 100644
--- a/v2/ansible/module_utils/openstack.py
+++ b/v2/ansible/module_utils/openstack.py
@@ -83,6 +83,7 @@ def openstack_full_argument_spec(**kwargs):
key=dict(default=None),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
+ api_timeout=dict(default=None, type='int'),
endpoint_type=dict(
default='public', choices=['public', 'internal', 'admin']
)
From 17e086fe8ceb19839281b4398fdf83690dbf695f Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 31 Mar 2015 21:36:18 -0400
Subject: [PATCH 0213/3617] dont break everything when one of the vars in
inject does not template correctly, wait till its used
---
lib/ansible/utils/template.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py
index a58b93997157f6..998e55f1f3ba1c 100644
--- a/lib/ansible/utils/template.py
+++ b/lib/ansible/utils/template.py
@@ -188,7 +188,11 @@ def __getitem__(self, varname):
if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars):
return var
else:
- return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined)
+ try:
+ return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined)
+ except:
+ raise KeyError("undefined variable: %s" % varname)
+
def add_locals(self, locals):
'''
From 0d1e2e74a105fc16baf7fb2ff55cbc3c3d06ae6e Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Tue, 31 Mar 2015 23:07:03 -0400
Subject: [PATCH 0214/3617] converted error on play var initialization into
warning with more information
---
lib/ansible/playbook/play.py | 6 +++++-
lib/ansible/utils/template.py | 11 +++++------
2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index a24c5fff1b5036..78f2f6d9ba8000 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -119,7 +119,11 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
- ds = template(basedir, ds, temp_vars)
+ try:
+ ds = template(basedir, ds, temp_vars)
+ except errors.AnsibleError, e:
+ utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
+
ds['tasks'] = _tasks
ds['handlers'] = _handlers
diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py
index 998e55f1f3ba1c..9426e254eb5826 100644
--- a/lib/ansible/utils/template.py
+++ b/lib/ansible/utils/template.py
@@ -118,7 +118,10 @@ def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_
if isinstance(varname, basestring):
if '{{' in varname or '{%' in varname:
- varname = template_from_string(basedir, varname, templatevars, fail_on_undefined)
+ try:
+ varname = template_from_string(basedir, varname, templatevars, fail_on_undefined)
+ except errors.AnsibleError, e:
+ raise errors.AnsibleError("Failed to template %s: %s" % (varname, str(e)))
if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["):
eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True)
@@ -188,11 +191,7 @@ def __getitem__(self, varname):
if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars):
return var
else:
- try:
- return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined)
- except:
- raise KeyError("undefined variable: %s" % varname)
-
+ return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined)
def add_locals(self, locals):
'''
From 87c99b46758dcdca3ccb2daed72a85b7175036a8 Mon Sep 17 00:00:00 2001
From: Monty Taylor
Date: Wed, 1 Apr 2015 07:54:02 -0400
Subject: [PATCH 0215/3617] Align verify parameter with validate_certs
The rest of ansible uses validate_certs, so make that the main
documented parameter. However, leave verify as an alias since that's the
passthrough value to the underlying libraries.
---
lib/ansible/module_utils/openstack.py | 2 +-
lib/ansible/utils/module_docs_fragments/openstack.py | 4 +++-
v2/ansible/module_utils/openstack.py | 2 +-
3 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py
index 9e4824a301dabc..b58cc534287050 100644
--- a/lib/ansible/module_utils/openstack.py
+++ b/lib/ansible/module_utils/openstack.py
@@ -77,7 +77,7 @@ def openstack_full_argument_spec(**kwargs):
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
- verify=dict(default=True),
+ verify=dict(default=True, aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
key=dict(default=None),
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
index 5643b4e6accbd7..519ad785b9b9dc 100644
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ b/lib/ansible/utils/module_docs_fragments/openstack.py
@@ -65,11 +65,13 @@ class ModuleDocFragment(object):
- How long should the socket layer wait before timing out for API calls.
If this is omitted, nothing will be passed to the requests library.
required: false
- verify:
+ default: None
+ validate_certs:
description:
- Whether or not SSL API requests should be verified.
required: false
default: True
+ aliases: ['verify']
cacert:
description:
- A path to a CA Cert bundle that can be used as part of verifying
diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py
index 9e4824a301dabc..b58cc534287050 100644
--- a/v2/ansible/module_utils/openstack.py
+++ b/v2/ansible/module_utils/openstack.py
@@ -77,7 +77,7 @@ def openstack_full_argument_spec(**kwargs):
auth=dict(default=None),
region_name=dict(default=None),
availability_zone=dict(default=None),
- verify=dict(default=True),
+ verify=dict(default=True, aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
key=dict(default=None),
From 132c0e794dbece25146ed60897af2b1f506fd698 Mon Sep 17 00:00:00 2001
From: Luke
Date: Wed, 1 Apr 2015 08:29:56 -0400
Subject: [PATCH 0216/3617] note added to source section
Added reminder to not use source install method if you're going to be installing ansible for a Tower system
---
docsite/rst/intro_installation.rst | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst
index 303880cac11f84..bad6ea068eff07 100644
--- a/docsite/rst/intro_installation.rst
+++ b/docsite/rst/intro_installation.rst
@@ -103,6 +103,11 @@ when they are implemented, and also easily contribute to the project. Because th
nothing to install, following the development version is significantly easier than most
open source projects.
+.. note::
+
+ If you are intending to use Tower as the Control Machine, do not use a source install. Please use apt/yum/pip for a stable version
+
+
To install from source.
.. code-block:: bash
From 1fa3dbb7d2348bf4c25c116dd808831ef31ae387 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 1 Apr 2015 12:12:34 -0400
Subject: [PATCH 0217/3617] capture IOErrors on backup_local (happens on non
posix filesystems)
fixes #10591
---
lib/ansible/module_utils/basic.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index ad1d43f86ca99b..aaaf85e5e057e5 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -1303,7 +1303,7 @@ def backup_local(self, fn):
try:
shutil.copy2(fn, backupdest)
- except shutil.Error, e:
+ except (shutil.Error, IOError), e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
return backupdest
From c41b917162d5d3acdf2573bbb6d87513cede4ccb Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 30 Mar 2015 21:48:28 -0700
Subject: [PATCH 0218/3617] Add a yaml constructor for unicode strings:
* Changes AnsibleConstructor so that only unicode strings are returned
(no str type)
* Tracks line, column numbers for strings
* Adds unittests for AnsibleLoader (generic for all the yaml parsing)
---
v2/ansible/parsing/yaml/composer.py | 16 ++-
v2/ansible/parsing/yaml/constructor.py | 27 ++++-
v2/ansible/parsing/yaml/objects.py | 3 +
v2/test/parsing/yaml/test_loader.py | 156 +++++++++++++++++++++++++
v2/test/test.yml | 2 -
5 files changed, 199 insertions(+), 5 deletions(-)
create mode 100644 v2/test/parsing/yaml/test_loader.py
delete mode 100644 v2/test/test.yml
diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py
index 0f9c90606f30a2..4f2c9f411b6595 100644
--- a/v2/ansible/parsing/yaml/composer.py
+++ b/v2/ansible/parsing/yaml/composer.py
@@ -20,17 +20,27 @@
__metaclass__ = type
from yaml.composer import Composer
-from yaml.nodes import MappingNode
+from yaml.nodes import MappingNode, ScalarNode
class AnsibleComposer(Composer):
def __init__(self):
self.__mapping_starts = []
super(Composer, self).__init__()
+
def compose_node(self, parent, index):
# the line number where the previous token has ended (plus empty lines)
node = Composer.compose_node(self, parent, index)
- if isinstance(node, MappingNode):
+ if isinstance(node, ScalarNode):
+ # Scalars are pretty easy -- assume they start on the current
+ # token's line (what about multiline strings? Perhaps we also
+ # need to use previous token ended
+ node.__datasource__ = self.name
+ node.__line__ = self.line + 1
+ node.__column__ = self.column + 1
+ elif isinstance(node, MappingNode):
node.__datasource__ = self.name
+
+ # Need extra help to know where the mapping starts
try:
(cur_line, cur_column) = self.__mapping_starts.pop()
except:
@@ -38,7 +48,9 @@ def compose_node(self, parent, index):
cur_column = None
node.__line__ = cur_line
node.__column__ = cur_column
+
return node
+
def compose_mapping_node(self, anchor):
# the column here will point at the position in the file immediately
# after the first key is found, which could be a space or a newline.
diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py
index 730ba85418ffcf..b607f46b05548a 100644
--- a/v2/ansible/parsing/yaml/constructor.py
+++ b/v2/ansible/parsing/yaml/constructor.py
@@ -20,7 +20,8 @@
__metaclass__ = type
from yaml.constructor import Constructor
-from ansible.parsing.yaml.objects import AnsibleMapping
+from ansible.utils.unicode import to_unicode
+from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleUnicode
class AnsibleConstructor(Constructor):
def __init__(self, file_name=None):
@@ -52,6 +53,22 @@ def construct_mapping(self, node, deep=False):
return ret
+ def construct_yaml_str(self, node):
+ # Override the default string handling function
+ # to always return unicode objects
+ value = self.construct_scalar(node)
+ value = to_unicode(value)
+ data = AnsibleUnicode(self.construct_scalar(node))
+
+ data._line_number = node.__line__
+ data._column_number = node.__column__
+ if self._ansible_file_name:
+ data._data_source = self._ansible_file_name
+ else:
+ data._data_source = node.__datasource__
+
+ return data
+
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map)
@@ -60,3 +77,11 @@ def construct_mapping(self, node, deep=False):
u'tag:yaml.org,2002:python/dict',
AnsibleConstructor.construct_yaml_map)
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ AnsibleConstructor.construct_yaml_str)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ AnsibleConstructor.construct_yaml_str)
+
diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py
index 6eff9966f94bf8..69f8c0968d17ec 100644
--- a/v2/ansible/parsing/yaml/objects.py
+++ b/v2/ansible/parsing/yaml/objects.py
@@ -50,3 +50,6 @@ class AnsibleMapping(AnsibleBaseYAMLObject, dict):
''' sub class for dictionaries '''
pass
+class AnsibleUnicode(AnsibleBaseYAMLObject, unicode):
+ ''' sub class for unicode objects '''
+ pass
diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py
new file mode 100644
index 00000000000000..942062798e19a2
--- /dev/null
+++ b/v2/test/parsing/yaml/test_loader.py
@@ -0,0 +1,156 @@
+# coding: utf-8
+# (c) 2015, Toshio Kuratomi
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from cStringIO import StringIO
+from collections import Sequence, Set, Mapping
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch
+
+from ansible.parsing.yaml.loader import AnsibleLoader
+
+class TestDataLoader(unittest.TestCase):
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_parse_number(self):
+ stream = StringIO("""
+ 1
+ """)
+ loader = AnsibleLoader(stream)
+ data = loader.get_single_data()
+ self.assertEqual(data, 1)
+
+ def test_parse_string(self):
+ stream = StringIO("""
+ Ansible
+ """)
+ loader = AnsibleLoader(stream)
+ data = loader.get_single_data()
+ self.assertEqual(data, u'Ansible')
+ self.assertIsInstance(data, unicode)
+
+ def test_parse_utf8_string(self):
+ stream = StringIO("""
+ Cafè Eñyei
+ """)
+ loader = AnsibleLoader(stream)
+ data = loader.get_single_data()
+ self.assertEqual(data, u'Cafè Eñyei')
+ self.assertIsInstance(data, unicode)
+
+ def test_parse_dict(self):
+ stream = StringIO("""
+ webster: daniel
+ oed: oxford
+ """)
+ loader = AnsibleLoader(stream)
+ data = loader.get_single_data()
+ self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'})
+ self.assertEqual(len(data), 2)
+ self.assertIsInstance(data.keys()[0], unicode)
+ self.assertIsInstance(data.values()[0], unicode)
+
+ def test_parse_list(self):
+ stream = StringIO("""
+ - a
+ - b
+ """)
+ loader = AnsibleLoader(stream)
+ data = loader.get_single_data()
+ self.assertEqual(data, [u'a', u'b'])
+ self.assertEqual(len(data), 2)
+ self.assertIsInstance(data[0], unicode)
+
+ def test_parse_play(self):
+ stream = StringIO("""
+ - hosts: localhost
+ vars:
+ number: 1
+ string: Ansible
+ utf8_string: Cafè Eñyei
+ dictionary:
+ webster: daniel
+ oed: oxford
+ list:
+ - a
+ - b
+ - 1
+ - 2
+ tasks:
+ - name: Test case
+ ping:
+ data: "{{ utf8_string }}"
+
+ - name: Test 2
+ ping:
+ data: "Cafè Eñyei"
+
+ - name: Test 3
+ command: "printf 'Cafè Eñyei\\n'"
+ """)
+ loader = AnsibleLoader(stream)
+ data = loader.get_single_data()
+ self.assertEqual(len(data), 1)
+ self.assertIsInstance(data, list)
+ self.assertEqual(frozenset(data[0].keys()), frozenset((u'hosts', u'vars', u'tasks')))
+
+ self.assertEqual(data[0][u'hosts'], u'localhost')
+
+ self.assertEqual(data[0][u'vars'][u'number'], 1)
+ self.assertEqual(data[0][u'vars'][u'string'], u'Ansible')
+ self.assertEqual(data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei')
+ self.assertEqual(data[0][u'vars'][u'dictionary'],
+ {u'webster': u'daniel',
+ u'oed': u'oxford'})
+ self.assertEqual(data[0][u'vars'][u'list'], [u'a', u'b', 1, 2])
+
+ self.assertEqual(data[0][u'tasks'],
+ [{u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}},
+ {u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}},
+ {u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''},
+ ])
+
+ self.walk(data)
+
+ def walk(self, data):
+ # Make sure there's no str in the data
+ self.assertNotIsInstance(data, str)
+
+ # Descend into various container types
+ if isinstance(data, unicode):
+ # strings are a sequence so we have to be explicit here
+ return
+ elif isinstance(data, (Sequence, Set)):
+ for element in data:
+ self.walk(element)
+ elif isinstance(data, Mapping):
+ for k, v in data.items():
+ self.walk(k)
+ self.walk(v)
+
+ # Scalars were all checked so we're good to go
+ return
diff --git a/v2/test/test.yml b/v2/test/test.yml
deleted file mode 100644
index 299b66610d12b0..00000000000000
--- a/v2/test/test.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-- name: Test
-filename: /usr/café//are_doing_this_to_me
From b152275a363bbfc098666a417c982a16808045c2 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 1 Apr 2015 12:18:53 -0700
Subject: [PATCH 0219/3617] Test line numbers and "fix" a bug in the scalar
line counting
---
v2/ansible/parsing/yaml/composer.py | 10 +-
v2/test/parsing/yaml/test_loader.py | 191 ++++++++++++++++++++++++----
2 files changed, 175 insertions(+), 26 deletions(-)
diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py
index 4f2c9f411b6595..faf712253ecada 100644
--- a/v2/ansible/parsing/yaml/composer.py
+++ b/v2/ansible/parsing/yaml/composer.py
@@ -35,8 +35,13 @@ def compose_node(self, parent, index):
# token's line (what about multiline strings? Perhaps we also
# need to use previous token ended
node.__datasource__ = self.name
- node.__line__ = self.line + 1
- node.__column__ = self.column + 1
+ node.__line__ = self.line
+
+ # Need to investigate why this works...
+ if self.indents:
+ node.__column__ = self.indent + 1
+ else:
+ node.__column__ = self.column +1
elif isinstance(node, MappingNode):
node.__datasource__ = self.name
@@ -58,4 +63,3 @@ def compose_mapping_node(self, anchor):
# should be good enough to determine the error location.
self.__mapping_starts.append((self.line + 1, self.column + 1))
return Composer.compose_mapping_node(self, anchor)
-
diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py
index 942062798e19a2..4f08d8ea70c3df 100644
--- a/v2/test/parsing/yaml/test_loader.py
+++ b/v2/test/parsing/yaml/test_loader.py
@@ -20,7 +20,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from cStringIO import StringIO
+from StringIO import StringIO
from collections import Sequence, Set, Mapping
from ansible.compat.tests import unittest
@@ -28,7 +28,7 @@
from ansible.parsing.yaml.loader import AnsibleLoader
-class TestDataLoader(unittest.TestCase):
+class TestAnsibleLoaderBasic(unittest.TestCase):
def setUp(self):
pass
@@ -40,52 +40,78 @@ def test_parse_number(self):
stream = StringIO("""
1
""")
- loader = AnsibleLoader(stream)
+ loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, 1)
+ # No line/column info saved yet
def test_parse_string(self):
stream = StringIO("""
Ansible
""")
- loader = AnsibleLoader(stream)
+ loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Ansible')
self.assertIsInstance(data, unicode)
+ self.assertEqual(data._line_number, 2)
+ self.assertEqual(data._column_number, 17)
+ self.assertEqual(data._data_source, 'myfile.yml')
+
def test_parse_utf8_string(self):
stream = StringIO("""
Cafè Eñyei
""")
- loader = AnsibleLoader(stream)
+ loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Cafè Eñyei')
self.assertIsInstance(data, unicode)
+ self.assertEqual(data._line_number, 2)
+ self.assertEqual(data._column_number, 17)
+ self.assertEqual(data._data_source, 'myfile.yml')
+
def test_parse_dict(self):
stream = StringIO("""
webster: daniel
oed: oxford
""")
- loader = AnsibleLoader(stream)
+ loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'})
self.assertEqual(len(data), 2)
self.assertIsInstance(data.keys()[0], unicode)
self.assertIsInstance(data.values()[0], unicode)
+ # Note: this is the beginning of the first value.
+ # May be changed in the future to beginning of the first key
+ self.assertEqual(data._line_number, 2)
+ self.assertEqual(data._column_number, 25)
+ self.assertEqual(data._data_source, 'myfile.yml')
+
+ self.assertEqual(data[u'webster']._line_number, 2)
+ self.assertEqual(data[u'webster']._column_number, 17)
+ self.assertEqual(data[u'webster']._data_source, 'myfile.yml')
+
+ self.assertEqual(data[u'oed']._line_number, 3)
+ self.assertEqual(data[u'oed']._column_number, 17)
+ self.assertEqual(data[u'oed']._data_source, 'myfile.yml')
+
def test_parse_list(self):
stream = StringIO("""
- a
- b
""")
- loader = AnsibleLoader(stream)
+ loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, [u'a', u'b'])
self.assertEqual(len(data), 2)
self.assertIsInstance(data[0], unicode)
+ # No line/column info saved yet
- def test_parse_play(self):
+class TestAnsibleLoaderPlay(unittest.TestCase):
+
+ def setUp(self):
stream = StringIO("""
- hosts: localhost
vars:
@@ -112,29 +138,35 @@ def test_parse_play(self):
- name: Test 3
command: "printf 'Cafè Eñyei\\n'"
""")
- loader = AnsibleLoader(stream)
- data = loader.get_single_data()
- self.assertEqual(len(data), 1)
- self.assertIsInstance(data, list)
- self.assertEqual(frozenset(data[0].keys()), frozenset((u'hosts', u'vars', u'tasks')))
+ self.play_filename = '/path/to/myplay.yml'
+ stream.name = self.play_filename
+ self.loader = AnsibleLoader(stream)
+ self.data = self.loader.get_single_data()
+
+ def tearDown(self):
+ pass
+
+ def test_data_complete(self):
+ return
+ self.assertEqual(len(self.data), 1)
+ self.assertIsInstance(self.data, list)
+ self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks')))
- self.assertEqual(data[0][u'hosts'], u'localhost')
+ self.assertEqual(self.data[0][u'hosts'], u'localhost')
- self.assertEqual(data[0][u'vars'][u'number'], 1)
- self.assertEqual(data[0][u'vars'][u'string'], u'Ansible')
- self.assertEqual(data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei')
- self.assertEqual(data[0][u'vars'][u'dictionary'],
+ self.assertEqual(self.data[0][u'vars'][u'number'], 1)
+ self.assertEqual(self.data[0][u'vars'][u'string'], u'Ansible')
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei')
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'],
{u'webster': u'daniel',
u'oed': u'oxford'})
- self.assertEqual(data[0][u'vars'][u'list'], [u'a', u'b', 1, 2])
+ self.assertEqual(self.data[0][u'vars'][u'list'], [u'a', u'b', 1, 2])
- self.assertEqual(data[0][u'tasks'],
+ self.assertEqual(self.data[0][u'tasks'],
[{u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}},
{u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}},
{u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''},
- ])
-
- self.walk(data)
+ ])
def walk(self, data):
# Make sure there's no str in the data
@@ -154,3 +186,116 @@ def walk(self, data):
# Scalars were all checked so we're good to go
return
+
+ def test_no_str_in_data(self):
+ # Checks that no strings are str type
+ self.walk(self.data)
+
+ def check_vars(self):
+ # Numbers don't have line/col information yet
+ #self.assertEqual(self.data[0][u'vars'][u'number']._line_number, 4)
+ #self.assertEqual(self.data[0][u'vars'][u'number']._column_number, 21)
+ #self.assertEqual(self.data[0][u'vars'][u'number']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'vars'][u'string']._line_number, 5)
+ self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 21)
+ self.assertEqual(self.data[0][u'vars'][u'string']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string']._line_number, 6)
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 21)
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'vars'][u'dictionary']._line_number, 8)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 31)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._line_number, 8)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 23)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._line_number, 9)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 23)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._data_source, self.play_filename)
+
+ # Lists don't yet have line/col information
+ #self.assertEqual(self.data[0][u'vars'][u'list']._line_number, 10)
+ #self.assertEqual(self.data[0][u'vars'][u'list']._column_number, 21)
+ #self.assertEqual(self.data[0][u'vars'][u'list']._data_source, self.play_filename)
+
+ def check_tasks(self):
+ #
+ # First Task
+ #
+ self.assertEqual(self.data[0][u'tasks'][0]._line_number, 16)
+ self.assertEqual(self.data[0][u'tasks'][0]._column_number, 28)
+ self.assertEqual(self.data[0][u'tasks'][0]._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'tasks'][0][u'name']._line_number, 16)
+ self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][0][u'name']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping']._line_number, 18)
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 30)
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping']._data_source, self.play_filename)
+
+ #self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18)
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 25)
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._data_source, self.play_filename)
+
+ #
+ # Second Task
+ #
+ self.assertEqual(self.data[0][u'tasks'][1]._line_number, 20)
+ self.assertEqual(self.data[0][u'tasks'][1]._column_number, 28)
+ self.assertEqual(self.data[0][u'tasks'][1]._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'tasks'][1][u'name']._line_number, 20)
+ self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][1][u'name']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping']._line_number, 22)
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 30)
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping']._data_source, self.play_filename)
+
+ #self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22)
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 25)
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._data_source, self.play_filename)
+
+ #
+ # Third Task
+ #
+ self.assertEqual(self.data[0][u'tasks'][2]._line_number, 24)
+ self.assertEqual(self.data[0][u'tasks'][2]._column_number, 28)
+ self.assertEqual(self.data[0][u'tasks'][2]._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'tasks'][2][u'name']._line_number, 24)
+ self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][2][u'name']._data_source, self.play_filename)
+
+ #self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25)
+ self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][2][u'command']._data_source, self.play_filename)
+
+ def test_line_numbers(self):
+ # Check the line/column numbers are correct
+ # Note: Remember, currently dicts begin at the start of their first entry's value
+ self.assertEqual(self.data[0]._line_number, 2)
+ self.assertEqual(self.data[0]._column_number, 25)
+ self.assertEqual(self.data[0]._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'hosts']._line_number, 2)
+ self.assertEqual(self.data[0][u'hosts']._column_number, 19)
+ self.assertEqual(self.data[0][u'hosts']._data_source, self.play_filename)
+
+ self.assertEqual(self.data[0][u'vars']._line_number, 4)
+ self.assertEqual(self.data[0][u'vars']._column_number, 28)
+ self.assertEqual(self.data[0][u'vars']._data_source, self.play_filename)
+
+ self.check_vars()
+
+ # Lists don't yet have line/col info
+ #self.assertEqual(self.data[0][u'tasks']._line_number, 17)
+ #self.assertEqual(self.data[0][u'tasks']._column_number, 28)
+ #self.assertEqual(self.data[0][u'tasks']._data_source, self.play_filename)
+
+ self.check_tasks()
From 05f1bed12bd25bf88d87bf9fcbc46bec52772309 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 1 Apr 2015 13:51:01 -0700
Subject: [PATCH 0220/3617] Use the node's start_mark to determine line and
column.
* Elminates a lot of logic in the AnsibleComposer class.
* Update tests with new column offsets. The rule should now be
consistently: Column is the start of the entry's value (so for
strings, the first non-space after the entry beginning, for dicts, the
first character of the first key)
---
v2/ansible/parsing/yaml/composer.py | 33 ++----------------
v2/test/parsing/yaml/test_loader.py | 54 ++++++++++++++---------------
2 files changed, 29 insertions(+), 58 deletions(-)
diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py
index faf712253ecada..6bdee92fc38180 100644
--- a/v2/ansible/parsing/yaml/composer.py
+++ b/v2/ansible/parsing/yaml/composer.py
@@ -24,42 +24,15 @@
class AnsibleComposer(Composer):
def __init__(self):
- self.__mapping_starts = []
super(Composer, self).__init__()
def compose_node(self, parent, index):
# the line number where the previous token has ended (plus empty lines)
node = Composer.compose_node(self, parent, index)
- if isinstance(node, ScalarNode):
- # Scalars are pretty easy -- assume they start on the current
- # token's line (what about multiline strings? Perhaps we also
- # need to use previous token ended
+ if isinstance(node, (ScalarNode, MappingNode)):
node.__datasource__ = self.name
node.__line__ = self.line
-
- # Need to investigate why this works...
- if self.indents:
- node.__column__ = self.indent + 1
- else:
- node.__column__ = self.column +1
- elif isinstance(node, MappingNode):
- node.__datasource__ = self.name
-
- # Need extra help to know where the mapping starts
- try:
- (cur_line, cur_column) = self.__mapping_starts.pop()
- except:
- cur_line = None
- cur_column = None
- node.__line__ = cur_line
- node.__column__ = cur_column
+ node.__column__ = node.start_mark.column + 1
+ node.__line__ = node.start_mark.line + 1
return node
-
- def compose_mapping_node(self, anchor):
- # the column here will point at the position in the file immediately
- # after the first key is found, which could be a space or a newline.
- # We could back this up to find the beginning of the key, but this
- # should be good enough to determine the error location.
- self.__mapping_starts.append((self.line + 1, self.column + 1))
- return Composer.compose_mapping_node(self, anchor)
diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py
index 4f08d8ea70c3df..aba103d37f6757 100644
--- a/v2/test/parsing/yaml/test_loader.py
+++ b/v2/test/parsing/yaml/test_loader.py
@@ -83,18 +83,17 @@ def test_parse_dict(self):
self.assertIsInstance(data.keys()[0], unicode)
self.assertIsInstance(data.values()[0], unicode)
- # Note: this is the beginning of the first value.
- # May be changed in the future to beginning of the first key
+ # Beginning of the first key
self.assertEqual(data._line_number, 2)
- self.assertEqual(data._column_number, 25)
+ self.assertEqual(data._column_number, 17)
self.assertEqual(data._data_source, 'myfile.yml')
self.assertEqual(data[u'webster']._line_number, 2)
- self.assertEqual(data[u'webster']._column_number, 17)
+ self.assertEqual(data[u'webster']._column_number, 26)
self.assertEqual(data[u'webster']._data_source, 'myfile.yml')
self.assertEqual(data[u'oed']._line_number, 3)
- self.assertEqual(data[u'oed']._column_number, 17)
+ self.assertEqual(data[u'oed']._column_number, 22)
self.assertEqual(data[u'oed']._data_source, 'myfile.yml')
def test_parse_list(self):
@@ -147,7 +146,6 @@ def tearDown(self):
pass
def test_data_complete(self):
- return
self.assertEqual(len(self.data), 1)
self.assertIsInstance(self.data, list)
self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks')))
@@ -198,23 +196,23 @@ def check_vars(self):
#self.assertEqual(self.data[0][u'vars'][u'number']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'vars'][u'string']._line_number, 5)
- self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 21)
+ self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 29)
self.assertEqual(self.data[0][u'vars'][u'string']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'vars'][u'utf8_string']._line_number, 6)
- self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 21)
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 34)
self.assertEqual(self.data[0][u'vars'][u'utf8_string']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'vars'][u'dictionary']._line_number, 8)
- self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 31)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 23)
self.assertEqual(self.data[0][u'vars'][u'dictionary']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._line_number, 8)
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 23)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 32)
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._line_number, 9)
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 23)
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 28)
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._data_source, self.play_filename)
# Lists don't yet have line/col information
@@ -227,68 +225,68 @@ def check_tasks(self):
# First Task
#
self.assertEqual(self.data[0][u'tasks'][0]._line_number, 16)
- self.assertEqual(self.data[0][u'tasks'][0]._column_number, 28)
+ self.assertEqual(self.data[0][u'tasks'][0]._column_number, 23)
self.assertEqual(self.data[0][u'tasks'][0]._data_source, self.play_filename)
self.assertEqual(self.data[0][u'tasks'][0][u'name']._line_number, 16)
- self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 29)
self.assertEqual(self.data[0][u'tasks'][0][u'name']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'tasks'][0][u'ping']._line_number, 18)
- self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 30)
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 25)
self.assertEqual(self.data[0][u'tasks'][0][u'ping']._data_source, self.play_filename)
- #self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18)
- self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 25)
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18)
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 31)
self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._data_source, self.play_filename)
#
# Second Task
#
self.assertEqual(self.data[0][u'tasks'][1]._line_number, 20)
- self.assertEqual(self.data[0][u'tasks'][1]._column_number, 28)
+ self.assertEqual(self.data[0][u'tasks'][1]._column_number, 23)
self.assertEqual(self.data[0][u'tasks'][1]._data_source, self.play_filename)
self.assertEqual(self.data[0][u'tasks'][1][u'name']._line_number, 20)
- self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 29)
self.assertEqual(self.data[0][u'tasks'][1][u'name']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'tasks'][1][u'ping']._line_number, 22)
- self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 30)
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 25)
self.assertEqual(self.data[0][u'tasks'][1][u'ping']._data_source, self.play_filename)
- #self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22)
- self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 25)
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22)
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 31)
self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._data_source, self.play_filename)
#
# Third Task
#
self.assertEqual(self.data[0][u'tasks'][2]._line_number, 24)
- self.assertEqual(self.data[0][u'tasks'][2]._column_number, 28)
+ self.assertEqual(self.data[0][u'tasks'][2]._column_number, 23)
self.assertEqual(self.data[0][u'tasks'][2]._data_source, self.play_filename)
self.assertEqual(self.data[0][u'tasks'][2][u'name']._line_number, 24)
- self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 29)
self.assertEqual(self.data[0][u'tasks'][2][u'name']._data_source, self.play_filename)
- #self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25)
- self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 23)
+ self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25)
+ self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 32)
self.assertEqual(self.data[0][u'tasks'][2][u'command']._data_source, self.play_filename)
def test_line_numbers(self):
# Check the line/column numbers are correct
# Note: Remember, currently dicts begin at the start of their first entry's value
self.assertEqual(self.data[0]._line_number, 2)
- self.assertEqual(self.data[0]._column_number, 25)
+ self.assertEqual(self.data[0]._column_number, 19)
self.assertEqual(self.data[0]._data_source, self.play_filename)
self.assertEqual(self.data[0][u'hosts']._line_number, 2)
- self.assertEqual(self.data[0][u'hosts']._column_number, 19)
+ self.assertEqual(self.data[0][u'hosts']._column_number, 26)
self.assertEqual(self.data[0][u'hosts']._data_source, self.play_filename)
self.assertEqual(self.data[0][u'vars']._line_number, 4)
- self.assertEqual(self.data[0][u'vars']._column_number, 28)
+ self.assertEqual(self.data[0][u'vars']._column_number, 21)
self.assertEqual(self.data[0][u'vars']._data_source, self.play_filename)
self.check_vars()
From e697de6076bea96584b1109eda2287b889aaef09 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 1 Apr 2015 14:54:22 -0700
Subject: [PATCH 0221/3617] Move AnsibleBaseBaseYAMLObject's position_info into
a property
---
v2/ansible/errors/__init__.py | 2 +-
v2/ansible/parsing/__init__.py | 2 +-
v2/ansible/parsing/yaml/constructor.py | 22 ++---
v2/ansible/parsing/yaml/objects.py | 17 ++--
v2/ansible/playbook/helpers.py | 2 +-
v2/ansible/playbook/playbook_include.py | 2 +-
v2/ansible/playbook/role/definition.py | 2 +-
v2/ansible/playbook/task.py | 2 +-
v2/test/errors/test_errors.py | 14 +--
v2/test/parsing/yaml/test_loader.py | 125 ++++++------------------
10 files changed, 54 insertions(+), 136 deletions(-)
diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py
index 7effe41df7cf4d..bdd6e524489c72 100644
--- a/v2/ansible/errors/__init__.py
+++ b/v2/ansible/errors/__init__.py
@@ -92,7 +92,7 @@ def _get_extended_error(self):
error_message = ''
try:
- (src_file, line_number, col_number) = self._obj.get_position_info()
+ (src_file, line_number, col_number) = self._obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('', '') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py
index f8a3e967465da7..75465bdfa3ed1f 100644
--- a/v2/ansible/parsing/__init__.py
+++ b/v2/ansible/parsing/__init__.py
@@ -146,7 +146,7 @@ def _handle_error(self, yaml_exc, file_name, show_content):
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
- err_obj.set_position_info(file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
+ err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py
index b607f46b05548a..0043b8a2f044d5 100644
--- a/v2/ansible/parsing/yaml/constructor.py
+++ b/v2/ansible/parsing/yaml/constructor.py
@@ -33,23 +33,20 @@ def construct_yaml_map(self, node):
yield data
value = self.construct_mapping(node)
data.update(value)
- data._line_number = value._line_number
- data._column_number = value._column_number
- data._data_source = value._data_source
+ data.ansible_pos = value.ansible_pos
def construct_mapping(self, node, deep=False):
ret = AnsibleMapping(super(Constructor, self).construct_mapping(node, deep))
- ret._line_number = node.__line__
- ret._column_number = node.__column__
# in some cases, we may have pre-read the data and then
# passed it to the load() call for YAML, in which case we
# want to override the default datasource (which would be
# '') to the actual filename we read in
if self._ansible_file_name:
- ret._data_source = self._ansible_file_name
+ data_source = self._ansible_file_name
else:
- ret._data_source = node.__datasource__
+ data_source = node.__datasource__
+ ret.ansible_pos = (data_source, node.__line__, node.__column__)
return ret
@@ -58,16 +55,15 @@ def construct_yaml_str(self, node):
# to always return unicode objects
value = self.construct_scalar(node)
value = to_unicode(value)
- data = AnsibleUnicode(self.construct_scalar(node))
+ ret = AnsibleUnicode(self.construct_scalar(node))
- data._line_number = node.__line__
- data._column_number = node.__column__
if self._ansible_file_name:
- data._data_source = self._ansible_file_name
+ data_source = self._ansible_file_name
else:
- data._data_source = node.__datasource__
+ data_source = node.__datasource__
+ ret.ansible_pos = (data_source, node.__line__, node.__column__)
- return data
+ return ret
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py
index 69f8c0968d17ec..15850dd4f8749d 100644
--- a/v2/ansible/parsing/yaml/objects.py
+++ b/v2/ansible/parsing/yaml/objects.py
@@ -29,22 +29,19 @@ class AnsibleBaseYAMLObject:
_line_number = 0
_column_number = 0
- def get_position_info(self):
+ def _get_ansible_position(self):
return (self._data_source, self._line_number, self._column_number)
- def set_position_info(self, src, line, col):
+ def _set_ansible_position(self, obj):
+ try:
+ (src, line, col) = obj
+ except (TypeError, ValueError):
+ raise AssertionError('ansible_pos can only be set with a tuple/list of three values: source, line number, column number')
self._data_source = src
self._line_number = line
self._column_number = col
- def copy_position_info(self, obj):
- ''' copies the position info from another object '''
- assert isinstance(obj, AnsibleBaseYAMLObject)
-
- (src, line, col) = obj.get_position_info()
- self._data_source = src
- self._line_number = line
- self._column_number = col
+ ansible_pos = property(_get_ansible_position, _set_ansible_position)
class AnsibleMapping(AnsibleBaseYAMLObject, dict):
''' sub class for dictionaries '''
diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py
index 0e147205578406..4277e201b7bb66 100644
--- a/v2/ansible/playbook/helpers.py
+++ b/v2/ansible/playbook/helpers.py
@@ -74,7 +74,7 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler
#if 'include' in task:
# cur_basedir = None
# if isinstance(task, AnsibleBaseYAMLObject) and loader:
- # pos_info = task.get_position_info()
+ # pos_info = task.ansible_pos
# new_basedir = os.path.dirname(pos_info[0])
# cur_basedir = loader.get_basedir()
# loader.set_basedir(new_basedir)
diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py
index f7eae230f7c0e8..2e4964fce9617b 100644
--- a/v2/ansible/playbook/playbook_include.py
+++ b/v2/ansible/playbook/playbook_include.py
@@ -80,7 +80,7 @@ def preprocess_data(self, ds):
# items reduced to a standard structure
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.copy_position_info(ds)
+ new_ds.ansible_pos = ds.ansible_pos
for (k,v) in ds.iteritems():
if k == 'include':
diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py
index fb96a0e55f9c83..0cb1e45760dfa8 100644
--- a/v2/ansible/playbook/role/definition.py
+++ b/v2/ansible/playbook/role/definition.py
@@ -66,7 +66,7 @@ def preprocess_data(self, ds):
# can preserve file:line:column information if it exists
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.copy_position_info(ds)
+ new_ds.ansible_pos = ds.ansible_pos
# first we pull the role name out of the data structure,
# and then use that to determine the role path (which may
diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py
index 0f5e7674866bbd..b36c24167a426f 100644
--- a/v2/ansible/playbook/task.py
+++ b/v2/ansible/playbook/task.py
@@ -159,7 +159,7 @@ def preprocess_data(self, ds):
# attributes of the task class
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
- new_ds.copy_position_info(ds)
+ new_ds.ansible_pos = ds.ansible_pos
# use the args parsing class to determine the action, args,
# and the delegate_to value from the various possible forms
diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py
index 3e8e0dd7bacad4..3993ea5061b73f 100644
--- a/v2/test/errors/test_errors.py
+++ b/v2/test/errors/test_errors.py
@@ -44,9 +44,7 @@ def test_basic_error(self):
@patch.object(AnsibleError, '_get_error_lines_from_file')
def test_error_with_object(self, mock_method):
- self.obj._data_source = 'foo.yml'
- self.obj._line_number = 1
- self.obj._column_number = 1
+ self.obj.ansible_pos = ('foo.yml', 1, 1)
mock_method.return_value = ('this is line 1\n', '')
e = AnsibleError(self.message, self.obj)
@@ -59,16 +57,12 @@ def test_get_error_lines_from_file(self):
with patch('{0}.open'.format(BUILTINS), m):
# this line will be found in the file
- self.obj._data_source = 'foo.yml'
- self.obj._line_number = 1
- self.obj._column_number = 1
+ self.obj.ansible_pos = ('foo.yml', 1, 1)
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
# this line will not be found, as it is out of the index range
- self.obj._data_source = 'foo.yml'
- self.obj._line_number = 2
- self.obj._column_number = 1
+ self.obj.ansible_pos = ('foo.yml', 2, 1)
e = AnsibleError(self.message, self.obj)
self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)")
-
+
diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py
index aba103d37f6757..f9144fb2925400 100644
--- a/v2/test/parsing/yaml/test_loader.py
+++ b/v2/test/parsing/yaml/test_loader.py
@@ -54,9 +54,7 @@ def test_parse_string(self):
self.assertEqual(data, u'Ansible')
self.assertIsInstance(data, unicode)
- self.assertEqual(data._line_number, 2)
- self.assertEqual(data._column_number, 17)
- self.assertEqual(data._data_source, 'myfile.yml')
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_utf8_string(self):
stream = StringIO("""
@@ -67,9 +65,7 @@ def test_parse_utf8_string(self):
self.assertEqual(data, u'Cafè Eñyei')
self.assertIsInstance(data, unicode)
- self.assertEqual(data._line_number, 2)
- self.assertEqual(data._column_number, 17)
- self.assertEqual(data._data_source, 'myfile.yml')
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_dict(self):
stream = StringIO("""
@@ -84,17 +80,10 @@ def test_parse_dict(self):
self.assertIsInstance(data.values()[0], unicode)
# Beginning of the first key
- self.assertEqual(data._line_number, 2)
- self.assertEqual(data._column_number, 17)
- self.assertEqual(data._data_source, 'myfile.yml')
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
- self.assertEqual(data[u'webster']._line_number, 2)
- self.assertEqual(data[u'webster']._column_number, 26)
- self.assertEqual(data[u'webster']._data_source, 'myfile.yml')
-
- self.assertEqual(data[u'oed']._line_number, 3)
- self.assertEqual(data[u'oed']._column_number, 22)
- self.assertEqual(data[u'oed']._data_source, 'myfile.yml')
+ self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26))
+ self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22))
def test_parse_list(self):
stream = StringIO("""
@@ -191,109 +180,51 @@ def test_no_str_in_data(self):
def check_vars(self):
# Numbers don't have line/col information yet
- #self.assertEqual(self.data[0][u'vars'][u'number']._line_number, 4)
- #self.assertEqual(self.data[0][u'vars'][u'number']._column_number, 21)
- #self.assertEqual(self.data[0][u'vars'][u'number']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'vars'][u'string']._line_number, 5)
- self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 29)
- self.assertEqual(self.data[0][u'vars'][u'string']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'vars'][u'utf8_string']._line_number, 6)
- self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 34)
- self.assertEqual(self.data[0][u'vars'][u'utf8_string']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'vars'][u'dictionary']._line_number, 8)
- self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 23)
- self.assertEqual(self.data[0][u'vars'][u'dictionary']._data_source, self.play_filename)
+ #self.assertEqual(self.data[0][u'vars'][u'number'].ansible_pos, (self.play_filename, 4, 21))
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._line_number, 8)
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 32)
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._line_number, 9)
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 28)
- self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._data_source, self.play_filename)
+ self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29))
+ self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34))
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23))
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32))
+ self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28))
# Lists don't yet have line/col information
- #self.assertEqual(self.data[0][u'vars'][u'list']._line_number, 10)
- #self.assertEqual(self.data[0][u'vars'][u'list']._column_number, 21)
- #self.assertEqual(self.data[0][u'vars'][u'list']._data_source, self.play_filename)
+ #self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 10, 21))
def check_tasks(self):
#
# First Task
#
- self.assertEqual(self.data[0][u'tasks'][0]._line_number, 16)
- self.assertEqual(self.data[0][u'tasks'][0]._column_number, 23)
- self.assertEqual(self.data[0][u'tasks'][0]._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][0][u'name']._line_number, 16)
- self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 29)
- self.assertEqual(self.data[0][u'tasks'][0][u'name']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][0][u'ping']._line_number, 18)
- self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 25)
- self.assertEqual(self.data[0][u'tasks'][0][u'ping']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18)
- self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 31)
- self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._data_source, self.play_filename)
+ self.assertEqual(self.data[0][u'tasks'][0].ansible_pos, (self.play_filename, 16, 23))
+ self.assertEqual(self.data[0][u'tasks'][0][u'name'].ansible_pos, (self.play_filename, 16, 29))
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'].ansible_pos, (self.play_filename, 18, 25))
+ self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data'].ansible_pos, (self.play_filename, 18, 31))
#
# Second Task
#
- self.assertEqual(self.data[0][u'tasks'][1]._line_number, 20)
- self.assertEqual(self.data[0][u'tasks'][1]._column_number, 23)
- self.assertEqual(self.data[0][u'tasks'][1]._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][1][u'name']._line_number, 20)
- self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 29)
- self.assertEqual(self.data[0][u'tasks'][1][u'name']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][1][u'ping']._line_number, 22)
- self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 25)
- self.assertEqual(self.data[0][u'tasks'][1][u'ping']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22)
- self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 31)
- self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._data_source, self.play_filename)
+ self.assertEqual(self.data[0][u'tasks'][1].ansible_pos, (self.play_filename, 20, 23))
+ self.assertEqual(self.data[0][u'tasks'][1][u'name'].ansible_pos, (self.play_filename, 20, 29))
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'].ansible_pos, (self.play_filename, 22, 25))
+ self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data'].ansible_pos, (self.play_filename, 22, 31))
#
# Third Task
#
- self.assertEqual(self.data[0][u'tasks'][2]._line_number, 24)
- self.assertEqual(self.data[0][u'tasks'][2]._column_number, 23)
- self.assertEqual(self.data[0][u'tasks'][2]._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][2][u'name']._line_number, 24)
- self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 29)
- self.assertEqual(self.data[0][u'tasks'][2][u'name']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25)
- self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 32)
- self.assertEqual(self.data[0][u'tasks'][2][u'command']._data_source, self.play_filename)
+ self.assertEqual(self.data[0][u'tasks'][2].ansible_pos, (self.play_filename, 24, 23))
+ self.assertEqual(self.data[0][u'tasks'][2][u'name'].ansible_pos, (self.play_filename, 24, 29))
+ self.assertEqual(self.data[0][u'tasks'][2][u'command'].ansible_pos, (self.play_filename, 25, 32))
def test_line_numbers(self):
# Check the line/column numbers are correct
- # Note: Remember, currently dicts begin at the start of their first entry's value
- self.assertEqual(self.data[0]._line_number, 2)
- self.assertEqual(self.data[0]._column_number, 19)
- self.assertEqual(self.data[0]._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'hosts']._line_number, 2)
- self.assertEqual(self.data[0][u'hosts']._column_number, 26)
- self.assertEqual(self.data[0][u'hosts']._data_source, self.play_filename)
-
- self.assertEqual(self.data[0][u'vars']._line_number, 4)
- self.assertEqual(self.data[0][u'vars']._column_number, 21)
- self.assertEqual(self.data[0][u'vars']._data_source, self.play_filename)
+ # Note: Remember, currently dicts begin at the start of their first entry
+ self.assertEqual(self.data[0].ansible_pos, (self.play_filename, 2, 19))
+ self.assertEqual(self.data[0][u'hosts'].ansible_pos, (self.play_filename, 2, 26))
+ self.assertEqual(self.data[0][u'vars'].ansible_pos, (self.play_filename, 4, 21))
self.check_vars()
# Lists don't yet have line/col info
- #self.assertEqual(self.data[0][u'tasks']._line_number, 17)
- #self.assertEqual(self.data[0][u'tasks']._column_number, 28)
- #self.assertEqual(self.data[0][u'tasks']._data_source, self.play_filename)
+ #self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 17, 28))
self.check_tasks()
From 69cf95bd0e969af247d74365c6edc5564113beaa Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 1 Apr 2015 15:00:50 -0700
Subject: [PATCH 0222/3617] Add __init__ to the yaml test dir
---
v2/test/parsing/yaml/__init__.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
create mode 100644 v2/test/parsing/yaml/__init__.py
diff --git a/v2/test/parsing/yaml/__init__.py b/v2/test/parsing/yaml/__init__.py
new file mode 100644
index 00000000000000..e69de29bb2d1d6
From 785c0c0c8ca8d90f3bccc7206f0c267977f77882 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Wed, 25 Mar 2015 13:51:40 -0500
Subject: [PATCH 0223/3617] V2 fixing bugs
---
v2/ansible/executor/host_log.py | 43 -----
v2/ansible/executor/host_log_manager.py | 29 ---
v2/ansible/executor/play_iterator.py | 206 +++++++++++++--------
v2/ansible/executor/playbook_executor.py | 35 +++-
v2/ansible/executor/stats.py | 51 +++++
v2/ansible/executor/task_executor.py | 15 +-
v2/ansible/executor/task_queue_manager.py | 47 +++--
v2/ansible/executor/task_queue_manager.py: | 0
v2/ansible/parsing/__init__.py | 7 +-
v2/ansible/playbook/block.py | 20 +-
v2/ansible/playbook/helpers.py | 46 ++---
v2/ansible/playbook/play.py | 2 +-
v2/ansible/playbook/role/__init__.py | 2 +-
v2/ansible/playbook/task.py | 5 +-
v2/ansible/plugins/__init__.py | 5 +-
v2/ansible/plugins/action/copy.py | 2 +-
v2/ansible/plugins/callback/__init__.py | 6 +-
v2/ansible/plugins/callback/default.py | 77 ++++----
v2/ansible/plugins/callback/minimal.py | 2 +
v2/ansible/plugins/strategies/__init__.py | 107 +++++------
v2/ansible/plugins/strategies/linear.py | 35 ++--
v2/ansible/utils/cli.py | 2 +
v2/ansible/utils/color.py | 17 ++
v2/ansible/utils/display.py | 12 ++
v2/ansible/vars/__init__.py | 15 +-
v2/samples/include.yml | 4 +-
v2/samples/localhost_include.yml | 3 +
v2/samples/test_blocks_of_blocks.yml | 5 +
v2/samples/test_include.yml | 2 +-
v2/test/mock/loader.py | 3 +
v2/test/playbook/test_block.py | 6 -
v2/test/playbook/test_playbook.py | 9 +-
v2/test/playbook/test_task_include.py | 64 -------
v2/test/vars/test_variable_manager.py | 53 ++++--
34 files changed, 508 insertions(+), 429 deletions(-)
delete mode 100644 v2/ansible/executor/host_log.py
delete mode 100644 v2/ansible/executor/host_log_manager.py
create mode 100644 v2/ansible/executor/stats.py
create mode 100644 v2/ansible/executor/task_queue_manager.py:
create mode 100644 v2/samples/localhost_include.yml
delete mode 100644 v2/test/playbook/test_task_include.py
diff --git a/v2/ansible/executor/host_log.py b/v2/ansible/executor/host_log.py
deleted file mode 100644
index 495ad79f7d4185..00000000000000
--- a/v2/ansible/executor/host_log.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-class HostLog:
-
- def __init__(self, host):
- self.host = host
-
- def add_task_result(self, task_result):
- pass
-
- def has_failures(self):
- assert False
-
- def has_changes(self):
- assert False
-
- def get_tasks(self, are_executed=None, are_changed=None, are_successful=None):
- assert False
-
- def get_current_running_task(self)
- # atomic decorator likely required?
- assert False
-
-
diff --git a/v2/ansible/executor/host_log_manager.py b/v2/ansible/executor/host_log_manager.py
deleted file mode 100644
index 727d06ce5912a7..00000000000000
--- a/v2/ansible/executor/host_log_manager.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-class HostLogManager:
-
- def __init__(self):
- pass
-
- def get_log_for_host(self, host):
- assert False
-
diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py
index 4a149243d9118d..d6fe3750955943 100644
--- a/v2/ansible/executor/play_iterator.py
+++ b/v2/ansible/executor/play_iterator.py
@@ -20,6 +20,7 @@
__metaclass__ = type
from ansible.errors import *
+from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.boolean import boolean
@@ -38,9 +39,10 @@ def __init__(self, blocks):
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
+ self.child_state = None
def __repr__(self):
- return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s" % (
+ return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, child state? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
@@ -49,6 +51,7 @@ def __repr__(self):
self.run_state,
self.fail_state,
self.pending_setup,
+ self.child_state,
)
def get_current_block(self):
@@ -64,6 +67,7 @@ def copy(self):
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
+ new_state.child_state = self.child_state
return new_state
class PlayIterator:
@@ -104,75 +108,35 @@ def get_host_state(self, host):
except KeyError:
raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
- def get_next_task_for_host(self, host, peek=False, lock_step=True):
+ def get_next_task_for_host(self, host, peek=False):
+
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
return None
- else:
- while True:
- try:
- cur_block = s._blocks[s.cur_block]
- except IndexError:
- s.run_state = self.ITERATING_COMPLETE
- break
-
- if s.run_state == self.ITERATING_SETUP:
- s.run_state = self.ITERATING_TASKS
- if self._play._gather_facts == 'smart' and not host.gathered_facts or boolean(self._play._gather_facts):
- # mark the host as having gathered facts
- host.set_gathered_facts(True)
-
- task = Task()
- task.action = 'setup'
- task.set_loader(self._play._loader)
-
- elif s.run_state == self.ITERATING_TASKS:
- # clear the pending setup flag, since we're past that and it didn't fail
- if s.pending_setup:
- s.pending_setup = False
-
- if s.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
- s.run_state = self.ITERATING_RESCUE
- elif s.cur_regular_task >= len(cur_block.block):
- s.run_state = self.ITERATING_ALWAYS
- else:
- task = cur_block.block[s.cur_regular_task]
- s.cur_regular_task += 1
- break
- elif s.run_state == self.ITERATING_RESCUE:
- if s.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
- s.run_state = self.ITERATING_ALWAYS
- elif s.cur_rescue_task >= len(cur_block.rescue):
- if len(cur_block.rescue) > 0:
- s.fail_state = self.FAILED_NONE
- s.run_state = self.ITERATING_ALWAYS
- else:
- task = cur_block.rescue[s.cur_rescue_task]
- s.cur_rescue_task += 1
- break
- elif s.run_state == self.ITERATING_ALWAYS:
- if s.cur_always_task >= len(cur_block.always):
- if s.fail_state != self.FAILED_NONE:
- s.run_state = self.ITERATING_COMPLETE
- break
- else:
- s.cur_block += 1
- s.cur_regular_task = 0
- s.cur_rescue_task = 0
- s.cur_always_task = 0
- s.run_state = self.ITERATING_TASKS
- else:
- task= cur_block.always[s.cur_always_task]
- s.cur_always_task += 1
- break
+ elif s.run_state == self.ITERATING_SETUP:
+ s.run_state = self.ITERATING_TASKS
+ s.pending_setup = True
+ if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
+ if not peek:
+ # mark the host as having gathered facts
+ host.set_gathered_facts(True)
+
+ task = Task()
+ task.action = 'setup'
+ task.args = {}
+ task.set_loader(self._play._loader)
+ else:
+ s.pending_setup = False
+
+ if not task:
+ (s, task) = self._get_next_task_from_state(s, peek=peek)
if task and task._role:
# if we had a current role, mark that role as completed
if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
s.cur_role._completed = True
-
s.cur_role = task._role
if not peek:
@@ -180,6 +144,86 @@ def get_next_task_for_host(self, host, peek=False, lock_step=True):
return (s, task)
+
+ def _get_next_task_from_state(self, state, peek):
+
+ task = None
+
+ # if we previously encountered a child block and we have a
+ # saved child state, try and get the next task from there
+ if state.child_state:
+ (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
+ if task:
+ return (state.child_state, task)
+ else:
+ state.child_state = None
+
+ # try and find the next task, given the current state.
+ while True:
+ # try to get the current block from the list of blocks, and
+ # if we run past the end of the list we know we're done with
+ # this block
+ try:
+ block = state._blocks[state.cur_block]
+ except IndexError:
+ state.run_state = self.ITERATING_COMPLETE
+ return (state, None)
+
+ if state.run_state == self.ITERATING_TASKS:
+ # clear the pending setup flag, since we're past that and it didn't fail
+ if state.pending_setup:
+ state.pending_setup = False
+
+ if state.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
+ state.run_state = self.ITERATING_RESCUE
+ elif state.cur_regular_task >= len(block.block):
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ task = block.block[state.cur_regular_task]
+ state.cur_regular_task += 1
+
+ elif state.run_state == self.ITERATING_RESCUE:
+ if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
+ state.run_state = self.ITERATING_ALWAYS
+ elif state.cur_rescue_task >= len(block.rescue):
+ if len(block.rescue) > 0:
+ state.fail_state = self.FAILED_NONE
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ task = block.rescue[state.cur_rescue_task]
+ state.cur_rescue_task += 1
+
+ elif state.run_state == self.ITERATING_ALWAYS:
+ if state.cur_always_task >= len(block.always):
+ if state.fail_state != self.FAILED_NONE:
+ state.run_state = self.ITERATING_COMPLETE
+ else:
+ state.cur_block += 1
+ state.cur_regular_task = 0
+ state.cur_rescue_task = 0
+ state.cur_always_task = 0
+ state.run_state = self.ITERATING_TASKS
+ state.child_state = None
+ else:
+ task = block.always[state.cur_always_task]
+ state.cur_always_task += 1
+
+ elif state.run_state == self.ITERATING_COMPLETE:
+ return (state, None)
+
+ # if the current task is actually a child block, we dive into it
+ if isinstance(task, Block):
+ state.child_state = HostState(blocks=[task])
+ state.child_state.run_state = self.ITERATING_TASKS
+ state.child_state.cur_role = state.cur_role
+ (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
+
+ # if something above set the task, break out of the loop now
+ if task:
+ break
+
+ return (state, task)
+
def mark_host_failed(self, host):
s = self.get_host_state(host)
if s.pending_setup:
@@ -206,25 +250,41 @@ def get_original_task(self, host, task):
the different processes, and not all data structures are preserved. This method
allows us to find the original task passed into the executor engine.
'''
+ def _search_block(block, task):
+ for t in block.block:
+ if isinstance(t, Block):
+ res = _search_block(t, task)
+ if res:
+ return res
+ elif t._uuid == task._uuid:
+ return t
+ for t in block.rescue:
+ if isinstance(t, Block):
+ res = _search_block(t, task)
+ if res:
+ return res
+ elif t._uuid == task._uuid:
+ return t
+ for t in block.always:
+ if isinstance(t, Block):
+ res = _search_block(t, task)
+ if res:
+ return res
+ elif t._uuid == task._uuid:
+ return t
+ return None
+
s = self.get_host_state(host)
for block in s._blocks:
- if block.block:
- for t in block.block:
- if t._uuid == task._uuid:
- return t
- if block.rescue:
- for t in block.rescue:
- if t._uuid == task._uuid:
- return t
- if block.always:
- for t in block.always:
- if t._uuid == task._uuid:
- return t
+ res = _search_block(block, task)
+ if res:
+ return res
+
return None
def add_tasks(self, host, task_list):
s = self.get_host_state(host)
- target_block = s._blocks[s.cur_block].copy()
+ target_block = s._blocks[s.cur_block].copy(exclude_parent=True)
if s.run_state == self.ITERATING_TASKS:
before = target_block.block[:s.cur_regular_task]
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 88ec05b9e8679e..324e6b01af9dfb 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -26,6 +26,7 @@
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
+from ansible.utils.color import colorize, hostcolor
from ansible.utils.debug import debug
class PlaybookExecutor:
@@ -70,8 +71,8 @@ def run(self):
for batch in self._get_serialized_batches(new_play):
if len(batch) == 0:
- self._tqm._callback.playbook_on_play_start(new_play.name)
- self._tqm._callback.playbook_on_no_hosts_matched()
+ self._tqm.send_callback('v2_playbook_on_play_start', new_play)
+ self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
result = 0
break
# restrict the inventory to the hosts in the serialized batch
@@ -90,6 +91,36 @@ def run(self):
raise
self._cleanup()
+
+ # FIXME: this stat summary stuff should be cleaned up and moved
+ # to a new method, if it even belongs here...
+ self._tqm._display.banner("PLAY RECAP")
+
+ hosts = sorted(self._tqm._stats.processed.keys())
+ for h in hosts:
+ t = self._tqm._stats.summarize(h)
+
+ self._tqm._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize('ok', t['ok'], 'green'),
+ colorize('changed', t['changed'], 'yellow'),
+ colorize('unreachable', t['unreachable'], 'red'),
+ colorize('failed', t['failures'], 'red')),
+ screen_only=True
+ )
+
+ self._tqm._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize('ok', t['ok'], None),
+ colorize('changed', t['changed'], None),
+ colorize('unreachable', t['unreachable'], None),
+ colorize('failed', t['failures'], None)),
+ log_only=True
+ )
+
+ self._tqm._display.display("", screen_only=True)
+ # END STATS STUFF
+
return result
def _cleanup(self, signum=None, framenum=None):
diff --git a/v2/ansible/executor/stats.py b/v2/ansible/executor/stats.py
new file mode 100644
index 00000000000000..626b2959a4721e
--- /dev/null
+++ b/v2/ansible/executor/stats.py
@@ -0,0 +1,51 @@
+# (c) 2012-2014, Michael DeHaan
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+class AggregateStats:
+ ''' holds stats about per-host activity during playbook runs '''
+
+ def __init__(self):
+
+ self.processed = {}
+ self.failures = {}
+ self.ok = {}
+ self.dark = {}
+ self.changed = {}
+ self.skipped = {}
+
+ def increment(self, what, host):
+ ''' helper function to bump a statistic '''
+
+ self.processed[host] = 1
+ prev = (getattr(self, what)).get(host, 0)
+ getattr(self, what)[host] = prev+1
+
+ def summarize(self, host):
+ ''' return information about a particular host '''
+
+ return dict(
+ ok = self.ok.get(host, 0),
+ failures = self.failures.get(host, 0),
+ unreachable = self.dark.get(host,0),
+ changed = self.changed.get(host, 0),
+ skipped = self.skipped.get(host, 0)
+ )
+
diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py
index 7eaba0061ef29e..3a3aa73f1080c6 100644
--- a/v2/ansible/executor/task_executor.py
+++ b/v2/ansible/executor/task_executor.py
@@ -237,10 +237,14 @@ def _execute(self, variables=None):
if self._task.poll > 0:
result = self._poll_async_result(result=result)
- # update the local copy of vars with the registered value, if specified
+ # update the local copy of vars with the registered value, if specified,
+ # or any facts which may have been generated by the module execution
if self._task.register:
vars_copy[self._task.register] = result
+ if 'ansible_facts' in result:
+ vars_copy.update(result['ansible_facts'])
+
# create a conditional object to evaluate task conditions
cond = Conditional(loader=self._loader)
@@ -266,6 +270,15 @@ def _execute(self, variables=None):
if attempt < retries - 1:
time.sleep(delay)
+ # do the final update of the local variables here, for both registered
+ # values and any facts which may have been created
+ if self._task.register:
+ variables[self._task.register] = result
+
+ if 'ansible_facts' in result:
+ variables.update(result['ansible_facts'])
+
+ # and return
debug("attempt loop complete, returning result")
return result
diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py
index 7c77f8e3a70941..0693e9dc56ccaa 100644
--- a/v2/ansible/executor/task_queue_manager.py
+++ b/v2/ansible/executor/task_queue_manager.py
@@ -29,9 +29,11 @@
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.process.result import ResultProcess
+from ansible.executor.stats import AggregateStats
from ansible.plugins import callback_loader, strategy_loader
from ansible.utils.debug import debug
+from ansible.utils.display import Display
__all__ = ['TaskQueueManager']
@@ -53,6 +55,9 @@ def __init__(self, inventory, callback, variable_manager, loader, options):
self._variable_manager = variable_manager
self._loader = loader
self._options = options
+ self._stats = AggregateStats()
+
+ self._display = Display()
# a special flag to help us exit cleanly
self._terminated = False
@@ -66,9 +71,14 @@ def __init__(self, inventory, callback, variable_manager, loader, options):
self._final_q = multiprocessing.Queue()
- # FIXME: hard-coded the default callback plugin here, which
- # should be configurable.
- self._callback = callback_loader.get(callback)
+ # load all available callback plugins
+ # FIXME: we need an option to white-list callback plugins
+ self._callback_plugins = []
+ for callback_plugin in callback_loader.all(class_only=True):
+ if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
+ self._callback_plugins.append(callback_plugin(self._display))
+ else:
+ self._callback_plugins.append(callback_plugin())
# create the pool of worker threads, based on the number of forks specified
try:
@@ -131,16 +141,11 @@ def run(self, play):
'''
connection_info = ConnectionInformation(play, self._options)
- self._callback.set_connection_info(connection_info)
-
- # run final validation on the play now, to make sure fields are templated
- # FIXME: is this even required? Everything is validated and merged at the
- # task level, so else in the play needs to be templated
- #all_vars = self._vmw.get_vars(loader=self._dlw, play=play)
- #all_vars = self._vmw.get_vars(loader=self._loader, play=play)
- #play.post_validate(all_vars=all_vars)
+ for callback_plugin in self._callback_plugins:
+ if hasattr(callback_plugin, 'set_connection_info'):
+ callback_plugin.set_connection_info(connection_info)
- self._callback.playbook_on_play_start(play.name)
+ self.send_callback('v2_playbook_on_play_start', play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(play.handlers)
@@ -172,9 +177,6 @@ def cleanup(self):
def get_inventory(self):
return self._inventory
- def get_callback(self):
- return self._callback
-
def get_variable_manager(self):
return self._variable_manager
@@ -201,3 +203,18 @@ def get_workers(self):
def terminate(self):
self._terminated = True
+
+ def send_callback(self, method_name, *args, **kwargs):
+ for callback_plugin in self._callback_plugins:
+ # a plugin that set self.disabled to True will not be called
+ # see osx_say.py example for such a plugin
+ if getattr(callback_plugin, 'disabled', False):
+ continue
+ methods = [
+ getattr(callback_plugin, method_name, None),
+ getattr(callback_plugin, 'on_any', None)
+ ]
+ for method in methods:
+ if method is not None:
+ method(*args, **kwargs)
+
diff --git a/v2/ansible/executor/task_queue_manager.py: b/v2/ansible/executor/task_queue_manager.py:
new file mode 100644
index 00000000000000..e69de29bb2d1d6
diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py
index f8a3e967465da7..673fa95a551b37 100644
--- a/v2/ansible/parsing/__init__.py
+++ b/v2/ansible/parsing/__init__.py
@@ -99,11 +99,14 @@ def load_from_file(self, file_name):
def path_exists(self, path):
return os.path.exists(path)
+ def is_file(self, path):
+ return os.path.isfile(path)
+
def is_directory(self, path):
return os.path.isdir(path)
- def is_file(self, path):
- return os.path.isfile(path)
+ def list_directory(self, path):
+ return os.path.listdir(path)
def _safe_load(self, stream, file_name=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py
index 03957bfe2f6691..7341e4d71471d8 100644
--- a/v2/ansible/playbook/block.py
+++ b/v2/ansible/playbook/block.py
@@ -43,6 +43,7 @@ def __init__(self, parent_block=None, role=None, task_include=None, use_handlers
self._task_include = task_include
self._use_handlers = use_handlers
self._dep_chain = []
+ self._vars = dict()
super(Block, self).__init__()
@@ -56,9 +57,12 @@ def get_vars(self):
if self._role:
all_vars.update(self._role.get_vars())
+ if self._parent_block:
+ all_vars.update(self._parent_block.get_vars())
if self._task_include:
all_vars.update(self._task_include.get_vars())
+ all_vars.update(self._vars)
return all_vars
@staticmethod
@@ -131,25 +135,29 @@ def _load_always(self, attr, ds):
# use_handlers=self._use_handlers,
# )
- def copy(self):
+ def copy(self, exclude_parent=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
- new_task = task.copy(exclude_block=True)
- new_task._block = new_block
+ if isinstance(task, Block):
+ new_task = task.copy(exclude_parent=True)
+ new_task._parent_block = new_block
+ else:
+ new_task = task.copy(exclude_block=True)
+ new_task._block = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._use_handlers = self._use_handlers
- new_me._dep_chain = self._dep_chain[:]
+ new_me._dep_chain = self._dep_chain[:]
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._parent_block = None
- if self._parent_block:
+ if self._parent_block and not exclude_parent:
new_me._parent_block = self._parent_block.copy()
new_me._role = None
@@ -260,7 +268,7 @@ def _get_parent_attribute(self, attr):
value = self._attributes[attr]
if not value:
if self._parent_block:
- value = getattr(self._block, attr)
+ value = getattr(self._parent_block, attr)
elif self._role:
value = getattr(self._role, attr)
if not value and len(self._dep_chain):
diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py
index 0e147205578406..cc262b4fb51b94 100644
--- a/v2/ansible/playbook/helpers.py
+++ b/v2/ansible/playbook/helpers.py
@@ -60,9 +60,9 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler
'''
# we import here to prevent a circular dependency with imports
+ from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
- #from ansible.playbook.task_include import TaskInclude
assert type(ds) == list
@@ -71,27 +71,17 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler
if not isinstance(task, dict):
raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds)
- #if 'include' in task:
- # cur_basedir = None
- # if isinstance(task, AnsibleBaseYAMLObject) and loader:
- # pos_info = task.get_position_info()
- # new_basedir = os.path.dirname(pos_info[0])
- # cur_basedir = loader.get_basedir()
- # loader.set_basedir(new_basedir)
-
- # t = TaskInclude.load(
- # task,
- # block=block,
- # role=role,
- # task_include=task_include,
- # use_handlers=use_handlers,
- # loader=loader
- # )
-
- # if cur_basedir and loader:
- # loader.set_basedir(cur_basedir)
- #else:
- if True:
+ if 'block' in task:
+ t = Block.load(
+ task,
+ parent_block=block,
+ role=role,
+ task_include=task_include,
+ use_handlers=use_handlers,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+ else:
if use_handlers:
t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
else:
@@ -120,15 +110,3 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader
return roles
-def compile_block_list(block_list):
- '''
- Given a list of blocks, compile them into a flat list of tasks
- '''
-
- task_list = []
-
- for block in block_list:
- task_list.extend(block.compile())
-
- return task_list
-
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index cbe4e038617a82..5814650adb6810 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -24,7 +24,7 @@
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
-from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list
+from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py
index 21bcd21803e423..72dd2a27d3f311 100644
--- a/v2/ansible/playbook/role/__init__.py
+++ b/v2/ansible/playbook/role/__init__.py
@@ -32,7 +32,7 @@
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
-from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
+from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.role.metadata import RoleMetadata
from ansible.playbook.taggable import Taggable
diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py
index ab66898242cb5a..66afbec7a3e70c 100644
--- a/v2/ansible/playbook/task.py
+++ b/v2/ansible/playbook/task.py
@@ -78,7 +78,7 @@ class Task(Base, Conditional, Taggable, Become):
# FIXME: this should not be a Task
_meta = FieldAttribute(isa='string')
- _name = FieldAttribute(isa='string')
+ _name = FieldAttribute(isa='string', default='')
_no_log = FieldAttribute(isa='bool')
_notify = FieldAttribute(isa='list')
@@ -167,7 +167,6 @@ def munge(self, ds):
args_parser = ModuleArgsParser(task_ds=ds)
(action, args, delegate_to) = args_parser.parse()
-
new_ds['action'] = action
new_ds['args'] = args
new_ds['delegate_to'] = delegate_to
@@ -199,6 +198,8 @@ def post_validate(self, all_vars=dict(), fail_on_undefined=True):
def get_vars(self):
all_vars = self.vars.copy()
+ if self._block:
+ all_vars.update(self._block.get_vars())
if self._task_include:
all_vars.update(self._task_include.get_vars())
diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py
index 31b684e70dd1e4..bf074b78978ca2 100644
--- a/v2/ansible/plugins/__init__.py
+++ b/v2/ansible/plugins/__init__.py
@@ -240,7 +240,10 @@ def all(self, *args, **kwargs):
continue
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
- yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
+ if kwargs.get('class_only', False):
+ yield getattr(self._module_cache[path], self.class_name)
+ else:
+ yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
action_loader = PluginLoader(
'ActionModule',
diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py
index 088a806b61b0ae..a9a078b28964ba 100644
--- a/v2/ansible/plugins/action/copy.py
+++ b/v2/ansible/plugins/action/copy.py
@@ -231,7 +231,7 @@ def run(self, tmp=None, task_vars=dict()):
self._remove_tempfile_if_content_defined(content, content_tempfile)
# fix file permissions when the copy is done as a different user
- if (self._connection_info.become and self._connection_info.become_user != 'root':
+ if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', tmp_src, tmp)
if raw:
diff --git a/v2/ansible/plugins/callback/__init__.py b/v2/ansible/plugins/callback/__init__.py
index c6905229f934fd..2c2e7e74c65779 100644
--- a/v2/ansible/plugins/callback/__init__.py
+++ b/v2/ansible/plugins/callback/__init__.py
@@ -19,7 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from ansible.utils.display import Display
+#from ansible.utils.display import Display
__all__ = ["CallbackBase"]
@@ -34,8 +34,8 @@ class CallbackBase:
# FIXME: the list of functions here needs to be updated once we have
# finalized the list of callback methods used in the default callback
- def __init__(self):
- self._display = Display()
+ def __init__(self, display):
+ self._display = display
def set_connection_info(self, conn_info):
# FIXME: this is a temporary hack, as the connection info object
diff --git a/v2/ansible/plugins/callback/default.py b/v2/ansible/plugins/callback/default.py
index 6200aee7d43f58..bb87dc4a942c41 100644
--- a/v2/ansible/plugins/callback/default.py
+++ b/v2/ansible/plugins/callback/default.py
@@ -30,25 +30,15 @@ class CallbackModule(CallbackBase):
to stdout when new callback events are received.
'''
- def _print_banner(self, msg, color=None):
- '''
- Prints a header-looking line with stars taking up to 80 columns
- of width (3 columns, minimum)
- '''
- msg = msg.strip()
- star_len = (80 - len(msg))
- if star_len < 0:
- star_len = 3
- stars = "*" * star_len
- self._display.display("\n%s %s" % (msg, stars), color=color)
-
- def on_any(self, *args, **kwargs):
+ CALLBACK_VERSION = 2.0
+
+ def v2_on_any(self, *args, **kwargs):
pass
- def runner_on_failed(self, task, result, ignore_errors=False):
+ def v2_runner_on_failed(self, result, ignore_errors=False):
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red')
- def runner_on_ok(self, task, result):
+ def v2_runner_on_ok(self, result):
if result._task.action == 'include':
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
@@ -68,7 +58,7 @@ def runner_on_ok(self, task, result):
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
self._display.display(msg, color=color)
- def runner_on_skipped(self, task, result):
+ def v2_runner_on_skipped(self, result):
msg = "skipping: [%s]" % result._host.get_name()
if self._display._verbosity > 0 or 'verbose_always' in result._result:
indent = None
@@ -78,57 +68,66 @@ def runner_on_skipped(self, task, result):
msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
self._display.display(msg, color='cyan')
- def runner_on_unreachable(self, task, result):
+ def v2_runner_on_unreachable(self, result):
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red')
- def runner_on_no_hosts(self, task):
+ def v2_runner_on_no_hosts(self, task):
+ pass
+
+ def v2_runner_on_async_poll(self, result):
pass
- def runner_on_async_poll(self, host, res, jid, clock):
+ def v2_runner_on_async_ok(self, result):
pass
- def runner_on_async_ok(self, host, res, jid):
+ def v2_runner_on_async_failed(self, result):
pass
- def runner_on_async_failed(self, host, res, jid):
+ def v2_runner_on_file_diff(self, result, diff):
pass
- def playbook_on_start(self):
+ def v2_playbook_on_start(self):
pass
- def playbook_on_notify(self, host, handler):
+ def v2_playbook_on_notify(self, result, handler):
pass
- def playbook_on_no_hosts_matched(self):
+ def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color='cyan')
- def playbook_on_no_hosts_remaining(self):
- self._print_banner("NO MORE HOSTS LEFT")
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.banner("NO MORE HOSTS LEFT")
- def playbook_on_task_start(self, name, is_conditional):
- self._print_banner("TASK [%s]" % name.strip())
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._display.banner("TASK [%s]" % task.get_name().strip())
- def playbook_on_cleanup_task_start(self, name):
- self._print_banner("CLEANUP TASK [%s]" % name.strip())
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
- def playbook_on_handler_task_start(self, name):
- self._print_banner("RUNNING HANDLER [%s]" % name.strip())
+ def v2_playbook_on_handler_task_start(self, task):
+ self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
- def playbook_on_setup(self):
+ def v2_playbook_on_setup(self):
pass
- def playbook_on_import_for_host(self, host, imported_file):
+ def v2_playbook_on_import_for_host(self, result, imported_file):
pass
- def playbook_on_not_import_for_host(self, host, missing_file):
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
pass
- def playbook_on_play_start(self, name):
- self._print_banner("PLAY [%s]" % name.strip())
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if not name:
+ msg = "PLAY"
+ else:
+ msg = "PLAY [%s]" % name
+
+ self._display.banner(name)
- def playbook_on_stats(self, stats):
+ def v2_playbook_on_stats(self, stats):
pass
diff --git a/v2/ansible/plugins/callback/minimal.py b/v2/ansible/plugins/callback/minimal.py
index 0b20eee64d5c0d..8ba883307b89f6 100644
--- a/v2/ansible/plugins/callback/minimal.py
+++ b/v2/ansible/plugins/callback/minimal.py
@@ -31,6 +31,8 @@ class CallbackModule(CallbackBase):
to stdout when new callback events are received.
'''
+ CALLBACK_VERSION = 2.0
+
def _print_banner(self, msg):
'''
Prints a header-looking line with stars taking up to 80 columns
diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py
index 196868ba96c723..59c0b9b84eef6a 100644
--- a/v2/ansible/plugins/strategies/__init__.py
+++ b/v2/ansible/plugins/strategies/__init__.py
@@ -28,7 +28,7 @@
from ansible.inventory.group import Group
from ansible.playbook.handler import Handler
-from ansible.playbook.helpers import load_list_of_blocks, compile_block_list
+from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role import ROLE_CACHE, hash_params
from ansible.plugins import module_loader
from ansible.utils.debug import debug
@@ -49,7 +49,7 @@ def __init__(self, tqm):
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm.get_notified_handlers()
- self._callback = tqm.get_callback()
+ #self._callback = tqm.get_callback()
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
@@ -73,6 +73,9 @@ def run(self, iterator, connection_info, result=True):
debug("running handlers")
result &= self.run_handlers(iterator, connection_info)
+ # send the stats callback
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+
if not result:
if num_unreachable > 0:
return 3
@@ -84,7 +87,7 @@ def run(self, iterator, connection_info, result=True):
return 0
def get_hosts_remaining(self, play):
- return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.get_name() not in self._tqm._unreachable_hosts]
+ return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
@@ -132,17 +135,23 @@ def _process_pending_results(self, iterator):
task = task_result._task
if result[0] == 'host_task_failed':
if not task.ignore_errors:
- debug("marking %s as failed" % host.get_name())
+ debug("marking %s as failed" % host.name)
iterator.mark_host_failed(host)
- self._tqm._failed_hosts[host.get_name()] = True
- self._callback.runner_on_failed(task, task_result)
+ self._tqm._failed_hosts[host.name] = True
+ self._tqm._stats.increment('failures', host.name)
+ self._tqm.send_callback('v2_runner_on_failed', task_result)
elif result[0] == 'host_unreachable':
- self._tqm._unreachable_hosts[host.get_name()] = True
- self._callback.runner_on_unreachable(task, task_result)
+ self._tqm._unreachable_hosts[host.name] = True
+ self._tqm._stats.increment('dark', host.name)
+ self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif result[0] == 'host_task_skipped':
- self._callback.runner_on_skipped(task, task_result)
+ self._tqm._stats.increment('skipped', host.name)
+ self._tqm.send_callback('v2_runner_on_skipped', task_result)
elif result[0] == 'host_task_ok':
- self._callback.runner_on_ok(task, task_result)
+ self._tqm._stats.increment('ok', host.name)
+ if 'changed' in task_result._result and task_result._result['changed']:
+ self._tqm._stats.increment('changed', host.name)
+ self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if host.name in self._blocked_hosts:
@@ -160,22 +169,6 @@ def _process_pending_results(self, iterator):
ret_results.append(task_result)
- #elif result[0] == 'include':
- # host = result[1]
- # task = result[2]
- # include_file = result[3]
- # include_vars = result[4]
- #
- # if isinstance(task, Handler):
- # # FIXME: figure out how to make includes work for handlers
- # pass
- # else:
- # original_task = iterator.get_original_task(host, task)
- # if original_task and original_task._role:
- # include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file)
- # new_tasks = self._load_included_file(original_task, include_file, include_vars)
- # iterator.add_tasks(host, new_tasks)
-
elif result[0] == 'add_host':
task_result = result[1]
new_host_info = task_result.get('add_host', dict())
@@ -322,14 +315,11 @@ def _load_included_file(self, included_file):
loader=self._loader
)
-
- task_list = compile_block_list(block_list)
-
# set the vars for this task from those specified as params to the include
- for t in task_list:
- t.vars = included_file._args.copy()
+ for b in block_list:
+ b._vars = included_file._args.copy()
- return task_list
+ return block_list
def cleanup(self, iterator, connection_info):
'''
@@ -361,7 +351,7 @@ def cleanup(self, iterator, connection_info):
while work_to_do:
work_to_do = False
for host in failed_hosts:
- host_name = host.get_name()
+ host_name = host.name
if host_name in self._tqm._failed_hosts:
iterator.mark_host_failed(host)
@@ -377,7 +367,7 @@ def cleanup(self, iterator, connection_info):
self._blocked_hosts[host_name] = True
task = iterator.get_next_task_for_host(host)
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
- self._callback.playbook_on_cleanup_task_start(task.get_name())
+ self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task)
self._queue_task(host, task, task_vars, connection_info)
self._process_pending_results(iterator)
@@ -398,31 +388,28 @@ def run_handlers(self, iterator, connection_info):
# FIXME: getting the handlers from the iterators play should be
# a method on the iterator, which may also filter the list
# of handlers based on the notified list
- handlers = compile_block_list(iterator._play.handlers)
-
- debug("handlers are: %s" % handlers)
- for handler in handlers:
- handler_name = handler.get_name()
-
- if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
- if not len(self.get_hosts_remaining(iterator._play)):
- self._callback.playbook_on_no_hosts_remaining()
- result = False
- break
-
- self._callback.playbook_on_handler_task_start(handler_name)
- for host in self._notified_handlers[handler_name]:
- if not handler.has_triggered(host):
- task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
- self._queue_task(host, handler, task_vars, connection_info)
- handler.flag_for_host(host)
-
- self._process_pending_results(iterator)
-
- self._wait_on_pending_results(iterator)
-
- # wipe the notification list
- self._notified_handlers[handler_name] = []
- debug("done running handlers, result is: %s" % result)
+ for handler_block in iterator._play.handlers:
+ debug("handlers are: %s" % handlers)
+ # FIXME: handlers need to support the rescue/always portions of blocks too,
+ # but this may take some work in the iterator and gets tricky when
+ # we consider the ability of meta tasks to flush handlers
+ for handler in handler_block.block:
+ handler_name = handler.get_name()
+ if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
+ if not len(self.get_hosts_remaining(iterator._play)):
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
+ break
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
+ for host in self._notified_handlers[handler_name]:
+ if not handler.has_triggered(host):
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
+ self._queue_task(host, handler, task_vars, connection_info)
+ handler.flag_for_host(host)
+ self._process_pending_results(iterator)
+ self._wait_on_pending_results(iterator)
+ # wipe the notification list
+ self._notified_handlers[handler_name] = []
+ debug("done running handlers, result is: %s" % result)
return result
diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py
index b503d6ebd51022..fcda46a7af0686 100644
--- a/v2/ansible/plugins/strategies/linear.py
+++ b/v2/ansible/plugins/strategies/linear.py
@@ -21,6 +21,7 @@
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
+from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
@@ -52,6 +53,9 @@ def _get_next_task_lockstep(self, hosts, iterator):
lowest_cur_block = len(iterator._blocks)
for (k, v) in host_tasks.iteritems():
+ if v is None:
+ continue
+
(s, t) = v
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
@@ -131,7 +135,7 @@ def run(self, iterator, connection_info):
debug("done getting the remaining hosts for this loop")
if len(hosts_left) == 0:
debug("out of hosts to run on")
- self._callback.playbook_on_no_hosts_remaining()
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
@@ -184,7 +188,6 @@ def run(self, iterator, connection_info):
meta_action = task.args.get('_raw_params')
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
- print("%s => NOOP" % host)
continue
elif meta_action == 'flush_handlers':
self.run_handlers(iterator, connection_info)
@@ -192,7 +195,7 @@ def run(self, iterator, connection_info):
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
else:
if not callback_sent:
- self._callback.playbook_on_task_start(task.get_name(), False)
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
callback_sent = True
self._blocked_hosts[host.get_name()] = True
@@ -234,6 +237,10 @@ def __repr__(self):
include_results = [ res._result ]
for include_result in include_results:
+ # if the task result was skipped or failed, continue
+ if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
+ continue
+
original_task = iterator.get_original_task(res._host, res._task)
if original_task and original_task._role:
include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
@@ -263,27 +270,31 @@ def __repr__(self):
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
- all_tasks = dict((host, []) for host in hosts_left)
+ all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
- new_tasks = self._load_included_file(included_file)
+ new_blocks = self._load_included_file(included_file)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
# FIXME: callback here?
print(e)
- noop_tasks = [noop_task for t in new_tasks]
- for host in hosts_left:
- if host in included_file._hosts:
- all_tasks[host].extend(new_tasks)
- else:
- all_tasks[host].extend(noop_tasks)
+ for new_block in new_blocks:
+ noop_block = Block(parent_block=task._block)
+ noop_block.block = [noop_task for t in new_block.block]
+ noop_block.always = [noop_task for t in new_block.always]
+ noop_block.rescue = [noop_task for t in new_block.rescue]
+ for host in hosts_left:
+ if host in included_file._hosts:
+ all_blocks[host].append(new_block)
+ else:
+ all_blocks[host].append(noop_block)
for host in hosts_left:
- iterator.add_tasks(host, all_tasks[host])
+ iterator.add_tasks(host, all_blocks[host])
debug("results queue empty")
except (IOError, EOFError), e:
diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py
index 09f5ef4a30f9de..f771452a9d3417 100644
--- a/v2/ansible/utils/cli.py
+++ b/v2/ansible/utils/cli.py
@@ -68,6 +68,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False,
default=None)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
+ parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
if subset_opts:
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
diff --git a/v2/ansible/utils/color.py b/v2/ansible/utils/color.py
index ebcb4317f7013f..a87717073ebf67 100644
--- a/v2/ansible/utils/color.py
+++ b/v2/ansible/utils/color.py
@@ -73,3 +73,20 @@ def stringc(text, color):
# --- end "pretty"
+def colorize(lead, num, color):
+ """ Print 'lead' = 'num' in 'color' """
+ if num != 0 and ANSIBLE_COLOR and color is not None:
+ return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
+ else:
+ return "%s=%-4s" % (lead, str(num))
+
+def hostcolor(host, stats, color=True):
+ if ANSIBLE_COLOR and color:
+ if stats['failures'] != 0 or stats['unreachable'] != 0:
+ return "%-37s" % stringc(host, 'red')
+ elif stats['changed'] != 0:
+ return "%-37s" % stringc(host, 'yellow')
+ else:
+ return "%-37s" % stringc(host, 'green')
+ return "%-26s" % host
+
diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py
index 3976198703955a..758a62fceea7b5 100644
--- a/v2/ansible/utils/display.py
+++ b/v2/ansible/utils/display.py
@@ -112,3 +112,15 @@ def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self._warning(msg)
+ def banner(self, msg, color=None):
+ '''
+ Prints a header-looking line with stars taking up to 80 columns
+ of width (3 columns, minimum)
+ '''
+ msg = msg.strip()
+ star_len = (80 - len(msg))
+ if star_len < 0:
+ star_len = 3
+ stars = "*" * star_len
+ self.display("\n%s %s" % (msg, stars), color=color)
+
diff --git a/v2/ansible/vars/__init__.py b/v2/ansible/vars/__init__.py
index f9e7cba9cd008d..eb75d9c9929b8a 100644
--- a/v2/ansible/vars/__init__.py
+++ b/v2/ansible/vars/__init__.py
@@ -162,10 +162,9 @@ def get_vars(self, loader, play=None, host=None, task=None):
all_vars = self._combine_vars(all_vars, self._group_vars_files['all'])
for group in host.get_groups():
- group_name = group.get_name()
all_vars = self._combine_vars(all_vars, group.get_vars())
- if group_name in self._group_vars_files and group_name != 'all':
- all_vars = self._combine_vars(all_vars, self._group_vars_files[group_name])
+ if group.name in self._group_vars_files and group.name != 'all':
+ all_vars = self._combine_vars(all_vars, self._group_vars_files[group.name])
host_name = host.get_name()
if host_name in self._host_vars_files:
@@ -228,7 +227,7 @@ def _get_inventory_basename(self, path):
'''
(name, ext) = os.path.splitext(os.path.basename(path))
- if ext not in ('yml', 'yaml'):
+ if ext not in ('.yml', '.yaml'):
return os.path.basename(path)
else:
return name
@@ -239,11 +238,11 @@ def _load_inventory_file(self, path, loader):
basename of the file without the extension
'''
- if os.path.isdir(path):
+ if loader.is_directory(path):
data = dict()
try:
- names = os.listdir(path)
+ names = loader.list_directory(path)
except os.error, err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
@@ -270,7 +269,7 @@ def add_host_vars_file(self, path, loader):
the extension, for matching against a given inventory host name
'''
- if os.path.exists(path):
+ if loader.path_exists(path):
(name, data) = self._load_inventory_file(path, loader)
self._host_vars_files[name] = data
@@ -281,7 +280,7 @@ def add_group_vars_file(self, path, loader):
the extension, for matching against a given inventory host name
'''
- if os.path.exists(path):
+ if loader.path_exists(path):
(name, data) = self._load_inventory_file(path, loader)
self._group_vars_files[name] = data
diff --git a/v2/samples/include.yml b/v2/samples/include.yml
index 2ffdc3dd76561d..3a2e88f8985976 100644
--- a/v2/samples/include.yml
+++ b/v2/samples/include.yml
@@ -1,4 +1,4 @@
- debug: msg="this is the include, a=={{a}}"
-- debug: msg="this is the second debug in the include"
-- debug: msg="this is the third debug in the include, and a is still {{a}}"
+#- debug: msg="this is the second debug in the include"
+#- debug: msg="this is the third debug in the include, and a is still {{a}}"
diff --git a/v2/samples/localhost_include.yml b/v2/samples/localhost_include.yml
new file mode 100644
index 00000000000000..eca8b5716caa5a
--- /dev/null
+++ b/v2/samples/localhost_include.yml
@@ -0,0 +1,3 @@
+- debug: msg="this is the localhost include"
+- include: common_include.yml
+
diff --git a/v2/samples/test_blocks_of_blocks.yml b/v2/samples/test_blocks_of_blocks.yml
index 8092a9ad8b3fcd..7933cb61833bf5 100644
--- a/v2/samples/test_blocks_of_blocks.yml
+++ b/v2/samples/test_blocks_of_blocks.yml
@@ -6,3 +6,8 @@
- block:
- block:
- debug: msg="are we there yet?"
+ always:
+ - debug: msg="a random always block"
+ - fail:
+ rescue:
+ - debug: msg="rescuing from the fail"
diff --git a/v2/samples/test_include.yml b/v2/samples/test_include.yml
index c81e5ecd5a951d..60befd9911d50d 100644
--- a/v2/samples/test_include.yml
+++ b/v2/samples/test_include.yml
@@ -19,7 +19,7 @@
always:
- include: include.yml a=always
- handlers:
+ #handlers:
#- name: foo
# include: include.yml a="this is a handler"
diff --git a/v2/test/mock/loader.py b/v2/test/mock/loader.py
index b79dfa509db405..cf9d7ea72d0fe0 100644
--- a/v2/test/mock/loader.py
+++ b/v2/test/mock/loader.py
@@ -47,6 +47,9 @@ def is_file(self, path):
def is_directory(self, path):
return path in self._known_directories
+ def list_directory(self, path):
+ return [x for x in self._known_directories]
+
def _add_known_directory(self, directory):
if directory not in self._known_directories:
self._known_directories.append(directory)
diff --git a/v2/test/playbook/test_block.py b/v2/test/playbook/test_block.py
index 9c1d06cbcb8c1a..348681527bb7b4 100644
--- a/v2/test/playbook/test_block.py
+++ b/v2/test/playbook/test_block.py
@@ -75,9 +75,3 @@ def test_load_implicit_block(self):
self.assertEqual(len(b.block), 1)
assert isinstance(b.block[0], Task)
- def test_block_compile(self):
- ds = [dict(action='foo')]
- b = Block.load(ds)
- tasks = b.compile()
- self.assertEqual(len(tasks), 1)
- self.assertIsInstance(tasks[0], Task)
diff --git a/v2/test/playbook/test_playbook.py b/v2/test/playbook/test_playbook.py
index f3ba6785f3f59a..1e72421818be6e 100644
--- a/v2/test/playbook/test_playbook.py
+++ b/v2/test/playbook/test_playbook.py
@@ -24,6 +24,7 @@
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook import Playbook
+from ansible.vars import VariableManager
from test.mock.loader import DictDataLoader
@@ -36,7 +37,8 @@ def tearDown(self):
pass
def test_empty_playbook(self):
- p = Playbook()
+ fake_loader = DictDataLoader({})
+ p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
@@ -61,6 +63,7 @@ def test_bad_playbook_files(self):
""",
})
- self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", fake_loader)
- self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", fake_loader)
+ vm = VariableManager()
+ self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
+ self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
diff --git a/v2/test/playbook/test_task_include.py b/v2/test/playbook/test_task_include.py
deleted file mode 100644
index 55f7461f050a2a..00000000000000
--- a/v2/test/playbook/test_task_include.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.compat.tests import unittest
-from ansible.errors import AnsibleParserError
-from ansible.parsing.yaml.objects import AnsibleMapping
-from ansible.playbook.task_include import TaskInclude
-
-from test.mock.loader import DictDataLoader
-
-class TestTaskInclude(unittest.TestCase):
-
- def setUp(self):
- self._fake_loader = DictDataLoader({
- "foo.yml": """
- - shell: echo "hello world"
- """
- })
-
- pass
-
- def tearDown(self):
- pass
-
- def test_empty_task_include(self):
- ti = TaskInclude()
-
- def test_basic_task_include(self):
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml'), loader=self._fake_loader)
- tasks = ti.compile()
-
- def test_task_include_with_loop(self):
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', with_items=['a', 'b', 'c']), loader=self._fake_loader)
-
- def test_task_include_with_conditional(self):
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', when="1 == 1"), loader=self._fake_loader)
-
- def test_task_include_with_tags(self):
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags="foo"), loader=self._fake_loader)
- ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags=["foo", "bar"]), loader=self._fake_loader)
-
- def test_task_include_errors(self):
- self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include=''), loader=self._fake_loader)
- self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml', vars="1"), loader=self._fake_loader)
- self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml a=1', vars=dict(b=2)), loader=self._fake_loader)
-
diff --git a/v2/test/vars/test_variable_manager.py b/v2/test/vars/test_variable_manager.py
index 63a80a7a1c5f59..f8d815eb6f78a7 100644
--- a/v2/test/vars/test_variable_manager.py
+++ b/v2/test/vars/test_variable_manager.py
@@ -35,8 +35,10 @@ def tearDown(self):
pass
def test_basic_manager(self):
+ fake_loader = DictDataLoader({})
+
v = VariableManager()
- self.assertEqual(v.get_vars(), dict())
+ self.assertEqual(v.get_vars(loader=fake_loader), dict())
self.assertEqual(
v._merge_dicts(
@@ -52,23 +54,26 @@ def test_basic_manager(self):
)
- def test_manager_extra_vars(self):
+ def test_variable_manager_extra_vars(self):
+ fake_loader = DictDataLoader({})
+
extra_vars = dict(a=1, b=2, c=3)
v = VariableManager()
v.set_extra_vars(extra_vars)
- self.assertEqual(v.get_vars(), extra_vars)
- self.assertIsNot(v.extra_vars, extra_vars)
+ for (key, val) in extra_vars.iteritems():
+ self.assertEqual(v.get_vars(loader=fake_loader).get(key), val)
+ self.assertIsNot(v.extra_vars.get(key), val)
- def test_manager_host_vars_file(self):
+ def test_variable_manager_host_vars_file(self):
fake_loader = DictDataLoader({
"host_vars/hostname1.yml": """
foo: bar
"""
})
- v = VariableManager(loader=fake_loader)
- v.add_host_vars_file("host_vars/hostname1.yml")
+ v = VariableManager()
+ v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader)
self.assertIn("hostname1", v._host_vars_files)
self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar"))
@@ -77,37 +82,43 @@ def test_manager_host_vars_file(self):
mock_host.get_vars.return_value = dict()
mock_host.get_groups.return_value = ()
- self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar"))
+ self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar")
- def test_manager_group_vars_file(self):
+ def test_variable_manager_group_vars_file(self):
fake_loader = DictDataLoader({
"group_vars/somegroup.yml": """
foo: bar
"""
})
- v = VariableManager(loader=fake_loader)
- v.add_group_vars_file("group_vars/somegroup.yml")
+ v = VariableManager()
+ v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader)
self.assertIn("somegroup", v._group_vars_files)
self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar"))
+ mock_group = MagicMock()
+ mock_group.name.return_value = "somegroup"
+ mock_group.get_ancestors.return_value = ()
+
mock_host = MagicMock()
mock_host.get_name.return_value = "hostname1"
mock_host.get_vars.return_value = dict()
- mock_host.get_groups.return_value = ["somegroup"]
+ mock_host.get_groups.return_value = (mock_group)
+
+ self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar")
- self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar"))
+ def test_variable_manager_play_vars(self):
+ fake_loader = DictDataLoader({})
- def test_manager_play_vars(self):
mock_play = MagicMock()
mock_play.get_vars.return_value = dict(foo="bar")
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = []
v = VariableManager()
- self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar"))
+ self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar")
- def test_manager_play_vars_files(self):
+ def test_variable_manager_play_vars_files(self):
fake_loader = DictDataLoader({
"/path/to/somefile.yml": """
foo: bar
@@ -119,13 +130,15 @@ def test_manager_play_vars_files(self):
mock_play.get_roles.return_value = []
mock_play.get_vars_files.return_value = ['/path/to/somefile.yml']
- v = VariableManager(loader=fake_loader)
- self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar"))
+ v = VariableManager()
+ self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar")
+
+ def test_variable_manager_task_vars(self):
+ fake_loader = DictDataLoader({})
- def test_manager_task_vars(self):
mock_task = MagicMock()
mock_task.get_vars.return_value = dict(foo="bar")
v = VariableManager()
- self.assertEqual(v.get_vars(task=mock_task), dict(foo="bar"))
+ self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task).get("foo"), "bar")
From 34aba2dd9a18d8e2cea5c8cdb7eb70b5f9fc0bbd Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Thu, 2 Apr 2015 11:26:42 -0500
Subject: [PATCH 0224/3617] Fixing dupe option for -u in v2
---
v2/ansible/utils/cli.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py
index 20998cb43fa504..6500234c74125e 100644
--- a/v2/ansible/utils/cli.py
+++ b/v2/ansible/utils/cli.py
@@ -70,8 +70,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False,
default=None)
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
- help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
if subset_opts:
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
From 811a906332eed12e9d3d976032341a6912b56247 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Thu, 2 Apr 2015 11:54:45 -0500
Subject: [PATCH 0225/3617] Fixing the synchronize action plugin for v2
---
v2/ansible/executor/task_executor.py | 22 +++++++++
v2/ansible/plugins/action/synchronize.py | 57 ++++++++++--------------
2 files changed, 46 insertions(+), 33 deletions(-)
diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py
index 6d19349ba4dbd0..256d26f8dcf843 100644
--- a/v2/ansible/executor/task_executor.py
+++ b/v2/ansible/executor/task_executor.py
@@ -73,7 +73,29 @@ def run(self):
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
+
+ # loop through the item results, and remember the changed/failed
+ # result flags based on any item there.
+ changed = False
+ failed = False
+ for item in item_results:
+ if 'changed' in item:
+ changed = True
+ if 'failed' in item:
+ failed = True
+
+ # create the overall result item, and set the changed/failed
+ # flags there to reflect the overall result of the loop
res = dict(results=item_results)
+
+ if changed:
+ res['changed'] = True
+
+ if failed:
+ res['failed'] = True
+ res['msg'] = 'One or more items failed'
+ else:
+ res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
diff --git a/v2/ansible/plugins/action/synchronize.py b/v2/ansible/plugins/action/synchronize.py
index 298d6a19599399..81e335b0098414 100644
--- a/v2/ansible/plugins/action/synchronize.py
+++ b/v2/ansible/plugins/action/synchronize.py
@@ -23,20 +23,18 @@
class ActionModule(ActionBase):
- def _get_absolute_path(self, path, task_vars):
- if 'vars' in task_vars:
- if '_original_file' in task_vars['vars']:
- # roles
- original_path = path
- path = self._loader.path_dwim_relative(task_vars['_original_file'], 'files', path, self.runner.basedir)
- if original_path and original_path[-1] == '/' and path[-1] != '/':
- # make sure the dwim'd path ends in a trailing "/"
- # if the original path did
- path += '/'
+ def _get_absolute_path(self, path):
+ if self._task._role is not None:
+ original_path = path
+ path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
+ if original_path and original_path[-1] == '/' and path[-1] != '/':
+ # make sure the dwim'd path ends in a trailing "/"
+ # if the original path did
+ path += '/'
return path
- def _process_origin(self, host, path, user, task_vars):
+ def _process_origin(self, host, path, user):
if not host in ['127.0.0.1', 'localhost']:
if user:
@@ -46,10 +44,10 @@ def _process_origin(self, host, path, user, task_vars):
else:
if not ':' in path:
if not path.startswith('/'):
- path = self._get_absolute_path(path=path, task_vars=task_vars)
+ path = self._get_absolute_path(path=path)
return path
- def _process_remote(self, host, path, user, task_vars):
+ def _process_remote(self, host, task, path, user):
transport = self._connection_info.connection
return_data = None
if not host in ['127.0.0.1', 'localhost'] or transport != "local":
@@ -62,7 +60,7 @@ def _process_remote(self, host, path, user, task_vars):
if not ':' in return_data:
if not return_data.startswith('/'):
- return_data = self._get_absolute_path(path=return_data, task_vars=task_vars)
+ return_data = self._get_absolute_path(path=return_data)
return return_data
@@ -76,7 +74,7 @@ def run(self, tmp=None, task_vars=dict()):
# IF original transport is not local, override transport and disable sudo.
if original_transport != 'local':
task_vars['ansible_connection'] = 'local'
- self.transport_overridden = True
+ transport_overridden = True
self.runner.sudo = False
src = self._task.args.get('src', None)
@@ -90,8 +88,6 @@ def run(self, tmp=None, task_vars=dict()):
dest_host = task_vars.get('ansible_ssh_host', task_vars.get('inventory_hostname'))
# allow ansible_ssh_host to be templated
- # FIXME: does this still need to be templated?
- #dest_host = template.template(self.runner.basedir, dest_host, task_vars, fail_on_undefined=True)
dest_is_local = dest_host in ['127.0.0.1', 'localhost']
# CHECK FOR NON-DEFAULT SSH PORT
@@ -113,13 +109,13 @@ def run(self, tmp=None, task_vars=dict()):
# FIXME: not sure if this is in connection info yet or not...
#if conn.delegate != conn.host:
# if 'hostvars' in task_vars:
- # if conn.delegate in task_vars['hostvars'] and self.original_transport != 'local':
+ # if conn.delegate in task_vars['hostvars'] and original_transport != 'local':
# # use a delegate host instead of localhost
# use_delegate = True
# COMPARE DELEGATE, HOST AND TRANSPORT
process_args = False
- if not dest_host is src_host and self.original_transport != 'local':
+ if not dest_host is src_host and original_transport != 'local':
# interpret and task_vars remote host info into src or dest
process_args = True
@@ -127,7 +123,7 @@ def run(self, tmp=None, task_vars=dict()):
if process_args or use_delegate:
user = None
- if boolean(options.get('set_remote_user', 'yes')):
+ if boolean(task_vars.get('set_remote_user', 'yes')):
if use_delegate:
user = task_vars['hostvars'][conn.delegate].get('ansible_ssh_user')
@@ -146,31 +142,26 @@ def run(self, tmp=None, task_vars=dict()):
# use the mode to define src and dest's url
if self._task.args.get('mode', 'push') == 'pull':
# src is a remote path: @, dest is a local path
- src = self._process_remote(src_host, src, user, task_vars)
- dest = self._process_origin(dest_host, dest, user, task_vars)
+ src = self._process_remote(src_host, src, user)
+ dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: @
- src = self._process_origin(src_host, src, user, task_vars)
- dest = self._process_remote(dest_host, dest, user, task_vars)
+ src = self._process_origin(src_host, src, user)
+ dest = self._process_remote(dest_host, dest, user)
# Allow custom rsync path argument.
rsync_path = self._task.args.get('rsync_path', None)
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
- if not rsync_path and self.transport_overridden and self._connection_info.sudo and not dest_is_local:
- self._task.args['rsync_path'] = 'sudo rsync'
+ if not rsync_path and transport_overridden and self._connection_info.become and self._connection_info.become_method == 'sudo' and not dest_is_local:
+ rsync_path = 'sudo rsync'
# make sure rsync path is quoted.
if rsync_path:
- rsync_path = '"%s"' % rsync_path
-
- # FIXME: noop stuff still needs to be figured out
- #module_args = ""
- #if self.runner.noop_on_check(task_vars):
- # module_args = "CHECKMODE=True"
+ self._task.args['rsync_path'] = '"%s"' % rsync_path
# run the module and store the result
- result = self.runner._execute_module('synchronize', module_args=, complex_args=options, task_vars=task_vars)
+ result = self._execute_module('synchronize')
return result
From bfae708bbf70a7e9bf1eda5c5983368fed5c9420 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Wed, 1 Apr 2015 16:25:37 -0700
Subject: [PATCH 0226/3617] Port v2 to the PyYAML C extension
---
v2/ansible/parsing/__init__.py | 22 ++++++++++++---
v2/ansible/parsing/yaml/constructor.py | 36 ++++++++++++------------
v2/ansible/parsing/yaml/loader.py | 38 ++++++++++++++++++--------
3 files changed, 61 insertions(+), 35 deletions(-)
diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py
index 31a97af5089179..bce5b2b667824a 100644
--- a/v2/ansible/parsing/__init__.py
+++ b/v2/ansible/parsing/__init__.py
@@ -29,7 +29,7 @@
from ansible.parsing.vault import VaultLib
from ansible.parsing.splitter import unquote
from ansible.parsing.yaml.loader import AnsibleLoader
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
from ansible.utils.path import unfrackpath
class DataLoader():
@@ -70,13 +70,27 @@ def load(self, data, file_name='', show_content=True):
# we first try to load this data as JSON
return json.loads(data)
except:
+ # if loading JSON failed for any reason, we go ahead
+ # and try to parse it as YAML instead
+
+ if isinstance(data, AnsibleUnicode):
+ # The PyYAML's libyaml bindings use PyUnicode_CheckExact so
+ # they are unable to cope with our subclass.
+ # Unwrap and re-wrap the unicode so we can keep track of line
+ # numbers
+ new_data = unicode(data)
+ else:
+ new_data = data
try:
- # if loading JSON failed for any reason, we go ahead
- # and try to parse it as YAML instead
- return self._safe_load(data, file_name=file_name)
+ new_data = self._safe_load(new_data, file_name=file_name)
except YAMLError as yaml_exc:
self._handle_error(yaml_exc, file_name, show_content)
+ if isinstance(data, AnsibleUnicode):
+ new_data = AnsibleUnicode(new_data)
+ new_data.ansible_pos = data.ansible_pos
+ return new_data
+
def load_from_file(self, file_name):
''' Loads data from a file, which can contain either JSON or YAML. '''
diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py
index 0043b8a2f044d5..aed2553c05b9af 100644
--- a/v2/ansible/parsing/yaml/constructor.py
+++ b/v2/ansible/parsing/yaml/constructor.py
@@ -20,7 +20,6 @@
__metaclass__ = type
from yaml.constructor import Constructor
-from ansible.utils.unicode import to_unicode
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleUnicode
class AnsibleConstructor(Constructor):
@@ -33,20 +32,11 @@ def construct_yaml_map(self, node):
yield data
value = self.construct_mapping(node)
data.update(value)
- data.ansible_pos = value.ansible_pos
+ data.ansible_pos = self._node_position_info(node)
def construct_mapping(self, node, deep=False):
ret = AnsibleMapping(super(Constructor, self).construct_mapping(node, deep))
-
- # in some cases, we may have pre-read the data and then
- # passed it to the load() call for YAML, in which case we
- # want to override the default datasource (which would be
- # '') to the actual filename we read in
- if self._ansible_file_name:
- data_source = self._ansible_file_name
- else:
- data_source = node.__datasource__
- ret.ansible_pos = (data_source, node.__line__, node.__column__)
+ ret.ansible_pos = self._node_position_info(node)
return ret
@@ -54,17 +44,25 @@ def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
value = self.construct_scalar(node)
- value = to_unicode(value)
- ret = AnsibleUnicode(self.construct_scalar(node))
+ ret = AnsibleUnicode(value)
- if self._ansible_file_name:
- data_source = self._ansible_file_name
- else:
- data_source = node.__datasource__
- ret.ansible_pos = (data_source, node.__line__, node.__column__)
+ ret.ansible_pos = self._node_position_info(node)
return ret
+ def _node_position_info(self, node):
+ # the line number where the previous token has ended (plus empty lines)
+ column = node.start_mark.column + 1
+ line = node.start_mark.line + 1
+
+ # in some cases, we may have pre-read the data and then
+ # passed it to the load() call for YAML, in which case we
+ # want to override the default datasource (which would be
+ # '') to the actual filename we read in
+ datasource = self._ansible_file_name or node.start_mark.name
+
+ return (datasource, line, column)
+
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map)
diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py
index 0d1300781901b7..4e0049ed2a8f3e 100644
--- a/v2/ansible/parsing/yaml/loader.py
+++ b/v2/ansible/parsing/yaml/loader.py
@@ -19,20 +19,34 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from yaml.reader import Reader
-from yaml.scanner import Scanner
-from yaml.parser import Parser
+try:
+ from _yaml import CParser, CEmitter
+ HAVE_PYYAML_C = True
+except ImportError:
+ HAVE_PYYAML_C = False
+
from yaml.resolver import Resolver
-from ansible.parsing.yaml.composer import AnsibleComposer
from ansible.parsing.yaml.constructor import AnsibleConstructor
-class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver):
- def __init__(self, stream, file_name=None):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- AnsibleComposer.__init__(self)
- AnsibleConstructor.__init__(self, file_name=file_name)
- Resolver.__init__(self)
+if HAVE_PYYAML_C:
+ class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
+ def __init__(self, stream, file_name=None):
+ CParser.__init__(self, stream)
+ AnsibleConstructor.__init__(self, file_name=file_name)
+ Resolver.__init__(self)
+else:
+ from yaml.reader import Reader
+ from yaml.scanner import Scanner
+ from yaml.parser import Parser
+
+ from ansible.parsing.yaml.composer import AnsibleComposer
+ class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver):
+ def __init__(self, stream, file_name=None):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ AnsibleComposer.__init__(self)
+ AnsibleConstructor.__init__(self, file_name=file_name)
+ Resolver.__init__(self)
From ac6b7045dbc45b7d6f42bf46a2df3a6c9a8c1aaf Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 2 Apr 2015 11:09:44 -0700
Subject: [PATCH 0227/3617] A little py3 compat, side effect of making this
work under profile
---
v2/bin/ansible-playbook | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index f1b590958b343b..8e80966ed7fe19 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+from __future__ import print_function
import os
import stat
@@ -19,7 +20,8 @@ from ansible.utils.vault import read_vault_file
from ansible.vars import VariableManager
# Implement an ansible.utils.warning() function later
-warning = getattr(__builtins__, 'print')
+def warning(*args, **kwargs):
+ print(*args, **kwargs)
#---------------------------------------------------------------------------------------------------
@@ -136,10 +138,10 @@ if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
except AnsibleError, e:
#display("ERROR: %s" % e, color='red', stderr=True)
- print e
+ print(e)
sys.exit(1)
except KeyboardInterrupt, ke:
#display("ERROR: interrupted", color='red', stderr=True)
- print "keyboard interrupt"
+ print("keyboard interrupt")
sys.exit(1)
From d277c6b82187a8cdbb23fec4467a00069681c646 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 2 Apr 2015 11:38:37 -0700
Subject: [PATCH 0228/3617] Few more py3 cleanups
---
v2/bin/ansible-playbook | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 8e80966ed7fe19..d9771249794fd0 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -1,5 +1,6 @@
#!/usr/bin/env python
-from __future__ import print_function
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
import stat
@@ -136,11 +137,11 @@ if __name__ == "__main__":
#display(" ", log_only=True)
try:
sys.exit(main(sys.argv[1:]))
- except AnsibleError, e:
+ except AnsibleError as e:
#display("ERROR: %s" % e, color='red', stderr=True)
print(e)
sys.exit(1)
- except KeyboardInterrupt, ke:
+ except KeyboardInterrupt:
#display("ERROR: interrupted", color='red', stderr=True)
print("keyboard interrupt")
sys.exit(1)
From 369bf0d214095fd02614702ecf25ebc0cb712f98 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 2 Apr 2015 12:35:50 -0700
Subject: [PATCH 0229/3617] No longer need AnsibleComposer
---
v2/ansible/parsing/yaml/composer.py | 38 -----------------------------
v2/ansible/parsing/yaml/loader.py | 7 +++---
2 files changed, 3 insertions(+), 42 deletions(-)
delete mode 100644 v2/ansible/parsing/yaml/composer.py
diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py
deleted file mode 100644
index 6bdee92fc38180..00000000000000
--- a/v2/ansible/parsing/yaml/composer.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# (c) 2012-2014, Michael DeHaan
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from yaml.composer import Composer
-from yaml.nodes import MappingNode, ScalarNode
-
-class AnsibleComposer(Composer):
- def __init__(self):
- super(Composer, self).__init__()
-
- def compose_node(self, parent, index):
- # the line number where the previous token has ended (plus empty lines)
- node = Composer.compose_node(self, parent, index)
- if isinstance(node, (ScalarNode, MappingNode)):
- node.__datasource__ = self.name
- node.__line__ = self.line
- node.__column__ = node.start_mark.column + 1
- node.__line__ = node.start_mark.line + 1
-
- return node
diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py
index 4e0049ed2a8f3e..e8547ff0d141fe 100644
--- a/v2/ansible/parsing/yaml/loader.py
+++ b/v2/ansible/parsing/yaml/loader.py
@@ -36,17 +36,16 @@ def __init__(self, stream, file_name=None):
AnsibleConstructor.__init__(self, file_name=file_name)
Resolver.__init__(self)
else:
+ from yaml.composer import Composer
from yaml.reader import Reader
from yaml.scanner import Scanner
from yaml.parser import Parser
- from ansible.parsing.yaml.composer import AnsibleComposer
-
- class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver):
+ class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
def __init__(self, stream, file_name=None):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
- AnsibleComposer.__init__(self)
+ Composer.__init__(self)
AnsibleConstructor.__init__(self, file_name=file_name)
Resolver.__init__(self)
From 2cddb093f5b245474514c2137684d67a37fde1e7 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 2 Apr 2015 12:37:02 -0700
Subject: [PATCH 0230/3617] Add AnsibleList for keeping track of line numbers
in lists parsed from yaml
---
v2/ansible/parsing/yaml/constructor.py | 11 ++++++++++-
v2/ansible/parsing/yaml/objects.py | 4 ++++
v2/test/parsing/yaml/test_loader.py | 18 +++++++++++++-----
3 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py
index aed2553c05b9af..97f9c71ef8bd44 100644
--- a/v2/ansible/parsing/yaml/constructor.py
+++ b/v2/ansible/parsing/yaml/constructor.py
@@ -20,7 +20,7 @@
__metaclass__ = type
from yaml.constructor import Constructor
-from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleUnicode
+from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
class AnsibleConstructor(Constructor):
def __init__(self, file_name=None):
@@ -50,6 +50,12 @@ def construct_yaml_str(self, node):
return ret
+ def construct_yaml_seq(self, node):
+ data = AnsibleSequence()
+ yield data
+ data.extend(self.construct_sequence(node))
+ data.ansible_pos = self._node_position_info(node)
+
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
column = node.start_mark.column + 1
@@ -79,3 +85,6 @@ def _node_position_info(self, node):
u'tag:yaml.org,2002:python/unicode',
AnsibleConstructor.construct_yaml_str)
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ AnsibleConstructor.construct_yaml_seq)
diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py
index 15850dd4f8749d..fe37eaab94a8df 100644
--- a/v2/ansible/parsing/yaml/objects.py
+++ b/v2/ansible/parsing/yaml/objects.py
@@ -50,3 +50,7 @@ class AnsibleMapping(AnsibleBaseYAMLObject, dict):
class AnsibleUnicode(AnsibleBaseYAMLObject, unicode):
''' sub class for unicode objects '''
pass
+
+class AnsibleSequence(AnsibleBaseYAMLObject, list):
+ ''' sub class for lists '''
+ pass
diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py
index f9144fb2925400..4c56962610099d 100644
--- a/v2/test/parsing/yaml/test_loader.py
+++ b/v2/test/parsing/yaml/test_loader.py
@@ -95,7 +95,11 @@ def test_parse_list(self):
self.assertEqual(data, [u'a', u'b'])
self.assertEqual(len(data), 2)
self.assertIsInstance(data[0], unicode)
- # No line/column info saved yet
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
+
+ self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19))
+ self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19))
class TestAnsibleLoaderPlay(unittest.TestCase):
@@ -184,12 +188,17 @@ def check_vars(self):
self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29))
self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34))
+
self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23))
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32))
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28))
- # Lists don't yet have line/col information
- #self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 10, 21))
+ self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 11, 23))
+ self.assertEqual(self.data[0][u'vars'][u'list'][0].ansible_pos, (self.play_filename, 11, 25))
+ self.assertEqual(self.data[0][u'vars'][u'list'][1].ansible_pos, (self.play_filename, 12, 25))
+ # Numbers don't have line/col info yet
+ #self.assertEqual(self.data[0][u'vars'][u'list'][2].ansible_pos, (self.play_filename, 13, 25))
+ #self.assertEqual(self.data[0][u'vars'][u'list'][3].ansible_pos, (self.play_filename, 14, 25))
def check_tasks(self):
#
@@ -224,7 +233,6 @@ def test_line_numbers(self):
self.check_vars()
- # Lists don't yet have line/col info
- #self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 17, 28))
+ self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 16, 21))
self.check_tasks()
From 5808b68d35e19762b34cc8aad1557fa2f482381b Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 2 Apr 2015 12:41:30 -0700
Subject: [PATCH 0231/3617] Update module pointers
---
lib/ansible/modules/core | 2 +-
lib/ansible/modules/extras | 2 +-
v2/ansible/modules/extras | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 613961c592ed23..04c34cfa02185a 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 613961c592ed23ded2d7e3771ad45b01de5a95f3
+Subproject commit 04c34cfa02185a8d74165f5bdc96371ec6df37a8
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index eb04e45311683d..21fce8ac730346 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit eb04e45311683dba1d54c8e5db293a2d3877eb68
+Subproject commit 21fce8ac730346b4e77427e3582553f2dc93c675
diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras
index 46e316a20a92b5..21fce8ac730346 160000
--- a/v2/ansible/modules/extras
+++ b/v2/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit 46e316a20a92b5a54b982eddb301eb3d57da397e
+Subproject commit 21fce8ac730346b4e77427e3582553f2dc93c675
From fa076591c97ea922fef16495d9e9be46b39a7ad8 Mon Sep 17 00:00:00 2001
From: Matt Martz
Date: Thu, 2 Apr 2015 15:30:37 -0500
Subject: [PATCH 0232/3617] Don't recommend installing ansible via homebrew
---
docsite/rst/intro_installation.rst | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst
index bad6ea068eff07..4a4504388a56fe 100644
--- a/docsite/rst/intro_installation.rst
+++ b/docsite/rst/intro_installation.rst
@@ -242,17 +242,14 @@ You may also wish to install from ports, run:
$ sudo make -C /usr/ports/sysutils/ansible install
-.. _from_brew:
+.. _on_macos:
-Latest Releases Via Homebrew (Mac OSX)
+Latest Releases on Mac OSX
++++++++++++++++++++++++++++++++++++++
-To install on a Mac, make sure you have Homebrew, then run:
+The preferred way to install ansible on a Mac is via pip.
-.. code-block:: bash
-
- $ brew update
- $ brew install ansible
+The instructions can be found in `Latest Releases Via Pip`_ section.
.. _from_pkgutil:
From 469a1250b6a487fbe9f1df35a9cf02a3292518cd Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Thu, 2 Apr 2015 16:21:45 -0500
Subject: [PATCH 0233/3617] Moving new patch action plugin over to v2
---
v2/ansible/plugins/action/patch.py | 66 ++++++++++++++++++++++++++++++
1 file changed, 66 insertions(+)
create mode 100644 v2/ansible/plugins/action/patch.py
diff --git a/v2/ansible/plugins/action/patch.py b/v2/ansible/plugins/action/patch.py
new file mode 100644
index 00000000000000..717cc359f4e806
--- /dev/null
+++ b/v2/ansible/plugins/action/patch.py
@@ -0,0 +1,66 @@
+# (c) 2015, Brian Coca
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', 'no'))
+
+ if src is None:
+ return dict(failed=True, msg="src is required")
+ elif remote_src:
+ # everyting is remote, so we just execute the module
+ # without changing any of the module arguments
+ return self._execute_module()
+
+ if self._task._role is not None:
+ src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
+ else:
+ src = self._loader.path_dwim(src)
+
+ # create the remote tmp dir if needed, and put the source file there
+ if tmp is None or "-tmp-" not in tmp:
+ tmp = self._make_tmp_path()
+
+ tmp_src = self._shell.join_path(tmp, os.path.basename(src))
+ self._connection.put_file(src, tmp_src)
+
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ # FIXME: noop stuff here
+ #if not self.runner.noop_on_check(inject):
+ # self._remote_chmod('a+r', tmp_src, tmp)
+ self._remote_chmod('a+r', tmp_src, tmp)
+
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=tmp_src,
+ )
+ )
+
+ return self._execute_module('patch', module_args=new_module_args)
From 92e400eb6d8063711e090722b9a2e3bd0bd39c43 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 2 Apr 2015 21:08:17 -0400
Subject: [PATCH 0234/3617] fixed minor issues with openstack docs not being
valid yaml
---
lib/ansible/utils/module_docs_fragments/openstack.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
index 519ad785b9b9dc..f989b3dcb80f8e 100644
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ b/lib/ansible/utils/module_docs_fragments/openstack.py
@@ -24,7 +24,7 @@ class ModuleDocFragment(object):
cloud:
description:
- Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
- required: false
+ required: false
auth:
description:
- Dictionary containing auth information as needed by the cloud's auth
@@ -87,12 +87,11 @@ class ModuleDocFragment(object):
required: false
endpoint_type:
description:
- - Endpoint URL type to fetch from the service catalog.
+ - Endpoint URL type to fetch from the service catalog.
choices: [public, internal, admin]
required: false
default: public
-requirements:
- - shade
+requirements: [shade]
notes:
- The standard OpenStack environment variables, such as C(OS_USERNAME)
may be user instead of providing explicit values.
From 7a81167b0697ad261c5b98f5b31c2c5842a96ad8 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Thu, 2 Apr 2015 23:59:48 -0400
Subject: [PATCH 0235/3617] brought v2 find plugins up 2 date with v1, also
added exception handling for whne there is a permissions issue
---
v2/ansible/plugins/__init__.py | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py
index bf074b78978ca2..7da575162ad3a7 100644
--- a/v2/ansible/plugins/__init__.py
+++ b/v2/ansible/plugins/__init__.py
@@ -26,6 +26,7 @@
import glob
import imp
from ansible import constants as C
+from ansible.utils import warnings
from ansible import errors
MODULE_CACHE = {}
@@ -160,17 +161,14 @@ def add_directory(self, directory, with_subdir=False):
self._extra_dirs.append(directory)
self._paths = None
- def find_plugin(self, name, suffixes=None, transport=''):
+ def find_plugin(self, name, suffixes=None):
''' Find a plugin named name '''
if not suffixes:
if self.class_name:
suffixes = ['.py']
else:
- if transport == 'winrm':
- suffixes = ['.ps1', '']
- else:
- suffixes = ['.py', '']
+ suffixes = ['.py', '']
potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
for full_name in potential_names:
@@ -180,18 +178,21 @@ def find_plugin(self, name, suffixes=None, transport=''):
found = None
for path in [p for p in self._get_paths() if p not in self._searched_paths]:
if os.path.isdir(path):
- for potential_file in os.listdir(path):
+ try:
+ full_paths = (os.path.join(path, f) for f in os.listdir(path))
+ except OSError,e:
+ warnings("Error accessing plugin paths: %s" % str(e))
+ for full_path in (f for f in full_paths if os.path.isfile(f)):
for suffix in suffixes:
- if potential_file.endswith(suffix):
- full_path = os.path.join(path, potential_file)
+ if full_path.endswith(suffix):
full_name = os.path.basename(full_path)
break
else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
continue
-
+
if full_name not in self._plugin_path_cache:
self._plugin_path_cache[full_name] = full_path
-
+
self._searched_paths.add(path)
for full_name in potential_names:
if full_name in self._plugin_path_cache:
From 25f071b64c11a2142723fa698adba46e297fcbe7 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 00:01:32 -0400
Subject: [PATCH 0236/3617] fixed called to find plugin, transport is not
needed as suffixes are passed
---
v2/ansible/plugins/action/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py
index e56003021588bf..2d258dd5250a9b 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/v2/ansible/plugins/action/__init__.py
@@ -83,7 +83,7 @@ def _configure_module(self, module_name, module_args):
# Search module path(s) for named module.
module_suffixes = getattr(self._connection, 'default_suffixes', None)
- module_path = self._module_loader.find_plugin(module_name, module_suffixes, transport=self._connection.get_transport())
+ module_path = self._module_loader.find_plugin(module_name, module_suffixes)
if module_path is None:
module_path2 = self._module_loader.find_plugin('ping', module_suffixes)
if module_path2 is not None:
From 0f8bc038ec57ab93dddb4a748b38f4c054acc6e3 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 00:25:09 -0400
Subject: [PATCH 0237/3617] changed to use display as utils.warning doesnt
exist in v2
---
v2/ansible/plugins/__init__.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py
index 7da575162ad3a7..a55059f1b7b7bc 100644
--- a/v2/ansible/plugins/__init__.py
+++ b/v2/ansible/plugins/__init__.py
@@ -26,7 +26,7 @@
import glob
import imp
from ansible import constants as C
-from ansible.utils import warnings
+from ansible.utils.display import Display
from ansible import errors
MODULE_CACHE = {}
@@ -181,7 +181,8 @@ def find_plugin(self, name, suffixes=None):
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
except OSError,e:
- warnings("Error accessing plugin paths: %s" % str(e))
+ d = Display()
+ d.warning("Error accessing plugin paths: %s" % str(e))
for full_path in (f for f in full_paths if os.path.isfile(f)):
for suffix in suffixes:
if full_path.endswith(suffix):
From 2ade17e2f5b9ac48f3e4330617a64adbd04adca4 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 04:50:44 -0400
Subject: [PATCH 0238/3617] v2 changed empty inventory to warning that only
localhost is available
---
v2/bin/ansible | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/v2/bin/ansible b/v2/bin/ansible
index 8eb5c97a6f5568..2b2df3df8f2d84 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -29,6 +29,7 @@ from ansible.inventory import Inventory
from ansible.parsing import DataLoader
from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
+from ansible.utils.display import Display
from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
from ansible.utils.vault import read_vault_file
from ansible.vars import VariableManager
@@ -98,7 +99,8 @@ class Cli(object):
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0:
- raise AnsibleError("provided hosts list is empty")
+ d = Display()
+ d.warning("provided hosts list is empty, only localhost is available")
if options.listhosts:
for host in hosts:
From 20b4492704450c11036476b8ab651fe57e97b11c Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 04:51:16 -0400
Subject: [PATCH 0239/3617] started implementing 'list options'
---
v2/bin/ansible-playbook | 21 +++++++++++++++++----
1 file changed, 17 insertions(+), 4 deletions(-)
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index d9771249794fd0..3a3793affc6795 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -58,13 +58,16 @@ def main(args):
validate_conflicts(parser,options)
+ # Note: slightly wrong, this is written so that implicit localhost
# Manage passwords
sshpass = None
becomepass = None
vault_pass = None
- normalize_become_options(options)
- (sshpass, becomepass, vault_pass) = ask_passwords(options)
+ # don't deal with privilege escalation when we don't need to
+ if not options.listhosts and not options.listtasks and not options.listtags:
+ normalize_become_options(options)
+ (sshpass, becomepass, vault_pass) = ask_passwords(options)
if options.vault_password_file:
# read vault_pass from a file
@@ -109,7 +112,6 @@ def main(args):
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory)
variable_manager.set_inventory(inventory)
- # Note: slightly wrong, this is written so that implicit localhost
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
@@ -129,7 +131,18 @@ def main(args):
# create the playbook executor, which manages running the plays
# via a task queue manager
pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=options)
- return pbex.run()
+
+ if options.listhosts:
+ print('TODO: implement')
+ sys.exit(0)
+ elif options.listtasks:
+ print('TODO: implement')
+ sys.exit(0)
+ elif options.listtags:
+ print('TODO: implement')
+ sys.exit(0)
+ else:
+ return pbex.run()
if __name__ == "__main__":
#display(" ", log_only=True)
From e719bf765d49ac7ac14ae056bfe0605756651259 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 09:20:19 -0400
Subject: [PATCH 0240/3617] switched to use cross platform os.sep, added diff
output to copy
---
v2/ansible/plugins/action/copy.py | 12 +++++-------
v2/ansible/plugins/action/fetch.py | 2 +-
v2/ansible/plugins/action/template.py | 2 +-
3 files changed, 7 insertions(+), 9 deletions(-)
diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py
index 89c2fde7b3f1e9..ece8b5b11b0973 100644
--- a/v2/ansible/plugins/action/copy.py
+++ b/v2/ansible/plugins/action/copy.py
@@ -31,7 +31,7 @@
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum
from ansible.utils.unicode import to_bytes
-
+from ansible.parsing.vault import VaultLib
class ActionModule(ActionBase):
@@ -55,7 +55,7 @@ def run(self, tmp=None, task_vars=dict()):
# Check if the source ends with a "/"
source_trailing_slash = False
if source:
- source_trailing_slash = source.endswith("/")
+ source_trailing_slash = source.endswith(os.sep)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
@@ -145,6 +145,7 @@ def run(self, tmp=None, task_vars=dict()):
dest = self._remote_expand_user(dest, tmp)
for source_full, source_rel in source_files:
+
# Generate a hash of the local file.
local_checksum = checksum(source_full)
@@ -284,11 +285,8 @@ def run(self, tmp=None, task_vars=dict()):
else:
result = dict(dest=dest, src=source, changed=changed)
- # FIXME: move diffs into the result?
- #if len(diffs) == 1:
- # return ReturnData(conn=conn, result=result, diff=diffs[0])
- #else:
- # return ReturnData(conn=conn, result=result)
+ if len(diffs) == 1:
+ result['diff']=diffs[0]
return result
diff --git a/v2/ansible/plugins/action/fetch.py b/v2/ansible/plugins/action/fetch.py
index e63fd88ea5c3c4..7b549f5ecbce48 100644
--- a/v2/ansible/plugins/action/fetch.py
+++ b/v2/ansible/plugins/action/fetch.py
@@ -82,7 +82,7 @@ def run(self, tmp=None, task_vars=dict()):
dest = os.path.expanduser(dest)
if flat:
- if dest.endswith("/"):
+ if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py
index 1f7a6955a3220b..76b2e78a737d62 100644
--- a/v2/ansible/plugins/action/template.py
+++ b/v2/ansible/plugins/action/template.py
@@ -91,7 +91,7 @@ def run(self, tmp=None, task_vars=dict()):
dest = self._remote_expand_user(dest, tmp)
directory_prepended = False
- if dest.endswith("/"): # CCTODO: Fix path for Windows hosts.
+ if dest.endswith(os.sep):
directory_prepended = True
base = os.path.basename(source)
dest = os.path.join(dest, base)
From d5eb4df23ee7fd8086eae988a85c42204832777d Mon Sep 17 00:00:00 2001
From: Matt Martz
Date: Fri, 3 Apr 2015 09:42:20 -0500
Subject: [PATCH 0241/3617] Add ability to specify using ssh_args in
synchronize
---
lib/ansible/runner/action_plugins/synchronize.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py
index f8e57ae314e395..fb82194b00a1b2 100644
--- a/lib/ansible/runner/action_plugins/synchronize.py
+++ b/lib/ansible/runner/action_plugins/synchronize.py
@@ -19,6 +19,7 @@
import os.path
from ansible import utils
+from ansible import constants
from ansible.runner.return_data import ReturnData
import ansible.utils.template as template
@@ -104,9 +105,11 @@ def run(self, conn, tmp, module_name, module_args,
src = options.get('src', None)
dest = options.get('dest', None)
+ use_ssh_args = options.pop('use_ssh_args', None)
src = template.template(self.runner.basedir, src, inject)
dest = template.template(self.runner.basedir, dest, inject)
+ use_ssh_args = template.template(self.runner.basedir, use_ssh_args, inject)
try:
options['local_rsync_path'] = inject['ansible_rsync_path']
@@ -187,6 +190,8 @@ def run(self, conn, tmp, module_name, module_args,
options['dest'] = dest
if 'mode' in options:
del options['mode']
+ if use_ssh_args:
+ options['ssh_args'] = constants.ANSIBLE_SSH_ARGS
# Allow custom rsync path argument.
rsync_path = options.get('rsync_path', None)
From ada86dafaf5b4ee7f5d5b6cb203f982bcb1f9d19 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 13:02:42 -0400
Subject: [PATCH 0242/3617] added listhosts draft fixed assert from list to new
yaml ansible object taskqueue is now None when just listing
---
v2/ansible/executor/playbook_executor.py | 95 +++++++++++++++---------
v2/ansible/playbook/helpers.py | 8 +-
v2/bin/ansible-playbook | 7 +-
3 files changed, 68 insertions(+), 42 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 324e6b01af9dfb..64f3f676210711 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -43,7 +43,10 @@ def __init__(self, playbooks, inventory, variable_manager, loader, options):
self._loader = loader
self._options = options
- self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options)
+ if options.listhosts or options.listtasks or options.listtags:
+ self._tqm = None
+ else:
+ self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options)
def run(self):
@@ -58,7 +61,7 @@ def run(self):
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
-
+
# FIXME: playbook entries are just plays, so we should rename them
for play in pb.get_entries():
self._inventory.remove_restriction()
@@ -83,43 +86,40 @@ def run(self):
break
if result != 0:
- # FIXME: do something here, to signify the playbook execution failed
- self._cleanup()
- return result
- except:
+ raise AnsibleError("Play failed!: %d" % result)
+ finally:
self._cleanup()
- raise
- self._cleanup()
-
- # FIXME: this stat summary stuff should be cleaned up and moved
- # to a new method, if it even belongs here...
- self._tqm._display.banner("PLAY RECAP")
-
- hosts = sorted(self._tqm._stats.processed.keys())
- for h in hosts:
- t = self._tqm._stats.summarize(h)
-
- self._tqm._display.display("%s : %s %s %s %s" % (
- hostcolor(h, t),
- colorize('ok', t['ok'], 'green'),
- colorize('changed', t['changed'], 'yellow'),
- colorize('unreachable', t['unreachable'], 'red'),
- colorize('failed', t['failures'], 'red')),
- screen_only=True
- )
-
- self._tqm._display.display("%s : %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize('ok', t['ok'], None),
- colorize('changed', t['changed'], None),
- colorize('unreachable', t['unreachable'], None),
- colorize('failed', t['failures'], None)),
- log_only=True
- )
-
- self._tqm._display.display("", screen_only=True)
- # END STATS STUFF
+ if result == 0:
+ #TODO: move to callback
+ # FIXME: this stat summary stuff should be cleaned up and moved
+ # to a new method, if it even belongs here...
+ self._tqm._display.banner("PLAY RECAP")
+
+ hosts = sorted(self._tqm._stats.processed.keys())
+ for h in hosts:
+ t = self._tqm._stats.summarize(h)
+
+ self._tqm._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize('ok', t['ok'], 'green'),
+ colorize('changed', t['changed'], 'yellow'),
+ colorize('unreachable', t['unreachable'], 'red'),
+ colorize('failed', t['failures'], 'red')),
+ screen_only=True
+ )
+
+ self._tqm._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize('ok', t['ok'], None),
+ colorize('changed', t['changed'], None),
+ colorize('unreachable', t['unreachable'], None),
+ colorize('failed', t['failures'], None)),
+ log_only=True
+ )
+
+ self._tqm._display.display("", screen_only=True)
+ # END STATS STUFF
return result
@@ -160,3 +160,24 @@ def _get_serialized_batches(self, play):
serialized_batches.append(play_hosts)
return serialized_batches
+
+ def listhosts(self):
+
+ playlist = []
+ try:
+ for playbook_path in self._playbooks:
+ pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
+ for play in pb.get_entries():
+
+ # Use templated copies in case hosts: depends on variables
+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
+ new_play = play.copy()
+ new_play.post_validate(all_vars, fail_on_undefined=False)
+
+ playlist.append(set(self._inventory.get_hosts(new_play.hosts)))
+ except AnsibleError:
+ raise
+ except Exception, e:
+ raise AnsibleParserError("Failed to process plays: %s" % str(e))
+
+ return playlist
diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py
index cc262b4fb51b94..dd346c636f03c0 100644
--- a/v2/ansible/playbook/helpers.py
+++ b/v2/ansible/playbook/helpers.py
@@ -21,7 +21,7 @@
from types import NoneType
from ansible.errors import AnsibleParserError
-from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence
def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
@@ -34,7 +34,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
- assert type(ds) in (list, NoneType)
+ assert ds is None or isinstance(ds, AnsibleSequence), 'block has bad type: %s' % type(ds)
block_list = []
if ds:
@@ -64,7 +64,7 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler
from ansible.playbook.handler import Handler
from ansible.playbook.task import Task
- assert type(ds) == list
+ assert isinstance(ds, list), 'task has bad type: %s' % type(ds)
task_list = []
for task in ds:
@@ -101,7 +101,7 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader
# we import here to prevent a circular dependency with imports
from ansible.playbook.role.include import RoleInclude
- assert isinstance(ds, list)
+ assert isinstance(ds, list), 'roles has bad type: %s' % type(ds)
roles = []
for role_def in ds:
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 3a3793affc6795..57380590c472a5 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -133,7 +133,12 @@ def main(args):
pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=options)
if options.listhosts:
- print('TODO: implement')
+ i = 1
+ for play in pbex.listhosts():
+ print("\nplay #%d" % i)
+ for host in sorted(play):
+ print(" %s" % host)
+ i = i + 1
sys.exit(0)
elif options.listtasks:
print('TODO: implement')
From 41d9bfde07853a6b2113ea1ec2fe154a189ce693 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Fri, 3 Apr 2015 12:17:01 -0500
Subject: [PATCH 0243/3617] Moving the Display() instantiation outside of v2
classes
---
v2/ansible/executor/playbook_executor.py | 13 +++++++------
v2/ansible/executor/task_queue_manager.py | 6 ++----
v2/ansible/playbook/helpers.py | 2 +-
v2/bin/ansible | 4 +++-
v2/bin/ansible-playbook | 4 +++-
5 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 64f3f676210711..97232cefe8f1fb 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -36,17 +36,18 @@ class PlaybookExecutor:
basis for bin/ansible-playbook operation.
'''
- def __init__(self, playbooks, inventory, variable_manager, loader, options):
+ def __init__(self, playbooks, inventory, variable_manager, loader, display, options):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
+ self._display = display
self._options = options
if options.listhosts or options.listtasks or options.listtags:
self._tqm = None
else:
- self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options)
+ self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, display=display, options=options)
def run(self):
@@ -94,13 +95,13 @@ def run(self):
#TODO: move to callback
# FIXME: this stat summary stuff should be cleaned up and moved
# to a new method, if it even belongs here...
- self._tqm._display.banner("PLAY RECAP")
+ self._display.banner("PLAY RECAP")
hosts = sorted(self._tqm._stats.processed.keys())
for h in hosts:
t = self._tqm._stats.summarize(h)
- self._tqm._display.display("%s : %s %s %s %s" % (
+ self._display.display("%s : %s %s %s %s" % (
hostcolor(h, t),
colorize('ok', t['ok'], 'green'),
colorize('changed', t['changed'], 'yellow'),
@@ -109,7 +110,7 @@ def run(self):
screen_only=True
)
- self._tqm._display.display("%s : %s %s %s %s" % (
+ self._display.display("%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize('ok', t['ok'], None),
colorize('changed', t['changed'], None),
@@ -118,7 +119,7 @@ def run(self):
log_only=True
)
- self._tqm._display.display("", screen_only=True)
+ self._display.display("", screen_only=True)
# END STATS STUFF
return result
diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py
index 0693e9dc56ccaa..28904676eb28d4 100644
--- a/v2/ansible/executor/task_queue_manager.py
+++ b/v2/ansible/executor/task_queue_manager.py
@@ -33,7 +33,6 @@
from ansible.plugins import callback_loader, strategy_loader
from ansible.utils.debug import debug
-from ansible.utils.display import Display
__all__ = ['TaskQueueManager']
@@ -49,16 +48,15 @@ class TaskQueueManager:
which dispatches the Play's tasks to hosts.
'''
- def __init__(self, inventory, callback, variable_manager, loader, options):
+ def __init__(self, inventory, callback, variable_manager, loader, display, options):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
+ self._display = display
self._options = options
self._stats = AggregateStats()
- self._display = Display()
-
# a special flag to help us exit cleanly
self._terminated = False
diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py
index dd346c636f03c0..7242322b88faf2 100644
--- a/v2/ansible/playbook/helpers.py
+++ b/v2/ansible/playbook/helpers.py
@@ -34,7 +34,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use
# we import here to prevent a circular dependency with imports
from ansible.playbook.block import Block
- assert ds is None or isinstance(ds, AnsibleSequence), 'block has bad type: %s' % type(ds)
+ assert ds is None or isinstance(ds, list), 'block has bad type: %s' % type(ds)
block_list = []
if ds:
diff --git a/v2/bin/ansible b/v2/bin/ansible
index 2b2df3df8f2d84..79d5f0a28b34ee 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -31,6 +31,7 @@ from ansible.parsing.splitter import parse_kv
from ansible.playbook.play import Play
from ansible.utils.display import Display
from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
+from ansible.utils.display import Display
from ansible.utils.vault import read_vault_file
from ansible.vars import VariableManager
@@ -131,7 +132,8 @@ class Cli(object):
# now create a task queue manager to execute the play
try:
- tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, options=options)
+ display = Display()
+ tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, display=display, options=options)
result = tqm.run(play)
tqm.cleanup()
except AnsibleError:
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 57380590c472a5..c1ee70d059c701 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -15,6 +15,7 @@ from ansible.parsing.splitter import parse_kv
from ansible.playbook import Playbook
from ansible.playbook.task import Task
from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords
+from ansible.utils.display import Display
from ansible.utils.unicode import to_unicode
from ansible.utils.vars import combine_vars
from ansible.utils.vault import read_vault_file
@@ -130,7 +131,8 @@ def main(args):
# create the playbook executor, which manages running the plays
# via a task queue manager
- pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=options)
+ display = Display()
+ pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options)
if options.listhosts:
i = 1
From a811c8841e2e0da5de6b6df056e6c84b6166a432 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 13:41:39 -0400
Subject: [PATCH 0244/3617] now listhosts shows the same info as v1
---
v2/ansible/executor/playbook_executor.py | 16 ++++++++++++++--
v2/bin/ansible-playbook | 8 +++-----
2 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 97232cefe8f1fb..bab6ea4e05d4b5 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -162,10 +162,11 @@ def _get_serialized_batches(self, play):
return serialized_batches
- def listhosts(self):
+ def list_hosts_per_play(self):
playlist = []
try:
+ i = 1
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
for play in pb.get_entries():
@@ -175,10 +176,21 @@ def listhosts(self):
new_play = play.copy()
new_play.post_validate(all_vars, fail_on_undefined=False)
- playlist.append(set(self._inventory.get_hosts(new_play.hosts)))
+ pname = play.get_name().strip()
+ if pname == 'PLAY: ':
+ pname = 'play #%d' % i
+
+ playlist.append( {
+ 'name': pname,
+ 'pattern': play.hosts,
+ 'hosts': set(self._inventory.get_hosts(new_play.hosts)),
+ } )
+ i = i + 1
+
except AnsibleError:
raise
except Exception, e:
+ #TODO: log exception
raise AnsibleParserError("Failed to process plays: %s" % str(e))
return playlist
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index c1ee70d059c701..4dc6d6bad94a4f 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -135,12 +135,10 @@ def main(args):
pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options)
if options.listhosts:
- i = 1
- for play in pbex.listhosts():
- print("\nplay #%d" % i)
- for host in sorted(play):
+ for p in pbex.list_hosts_per_play():
+ print("\n %s (%s): host count=%d" % (p['name'], p['pattern'], len(p['hosts'])))
+ for host in p['hosts']:
print(" %s" % host)
- i = i + 1
sys.exit(0)
elif options.listtasks:
print('TODO: implement')
From 3c6fdebfe38d3b3d6c4a33e251fd6de3333f50ba Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Fri, 3 Apr 2015 13:49:00 -0400
Subject: [PATCH 0245/3617] made listhosts play output name more consistent
internally
---
v2/ansible/executor/playbook_executor.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index bab6ea4e05d4b5..24b9f8c17baf40 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -178,7 +178,7 @@ def list_hosts_per_play(self):
pname = play.get_name().strip()
if pname == 'PLAY: ':
- pname = 'play #%d' % i
+ pname = 'PLAY: #%d' % i
playlist.append( {
'name': pname,
From 22608939eb918504faf25850f71d568756256847 Mon Sep 17 00:00:00 2001
From: Bill Nottingham
Date: Fri, 3 Apr 2015 14:23:04 -0400
Subject: [PATCH 0246/3617] Update intro_windows.rst
Refer to PowerShell consistently.
---
docsite/rst/intro_windows.rst | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst
index b5e6be8234046e..544c6fba754b81 100644
--- a/docsite/rst/intro_windows.rst
+++ b/docsite/rst/intro_windows.rst
@@ -11,7 +11,7 @@ Windows: How Does It Work
As you may have already read, Ansible manages Linux/Unix machines using SSH by default.
Starting in version 1.7, Ansible also contains support for managing Windows machines. This uses
-native powershell remoting, rather than SSH.
+native PowerShell remoting, rather than SSH.
Ansible will still be run from a Linux control machine, and uses the "winrm" Python module to talk to remote hosts.
@@ -67,7 +67,7 @@ communication channel that leverages Windows remoting::
ansible windows [-i inventory] -m win_ping --ask-vault-pass
If you haven't done anything to prep your systems yet, this won't work yet. This is covered in a later
-section about how to enable powershell remoting - and if necessary - how to upgrade powershell to
+section about how to enable PowerShell remoting - and if necessary - how to upgrade PowerShell to
a version that is 3 or higher.
You'll run this command again later though, to make sure everything is working.
@@ -77,21 +77,21 @@ You'll run this command again later though, to make sure everything is working.
Windows System Prep
```````````````````
-In order for Ansible to manage your windows machines, you will have to enable Powershell remoting configured.
+In order for Ansible to manage your windows machines, you will have to enable PowerShell remoting configured.
-To automate setup of WinRM, you can run `this powershell script `_ on the remote machine.
+To automate setup of WinRM, you can run `this PowerShell script `_ on the remote machine.
Admins may wish to modify this setup slightly, for instance to increase the timeframe of
the certificate.
.. _getting_to_powershell_three_or_higher:
-Getting to Powershell 3.0 or higher
+Getting to PowerShell 3.0 or higher
```````````````````````````````````
-Powershell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script.
+PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script.
-Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a powershell console as an administrator. You will now be running Powershell 3 and can try connectivity again using the win_ping technique referenced above.
+Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above.
.. _what_windows_modules_are_available:
@@ -105,7 +105,7 @@ Browse this index to see what is available.
In many cases, it may not be necessary to even write or use an Ansible module.
-In particular, the "script" module can be used to run arbitrary powershell scripts, allowing Windows administrators familiar with powershell a very native way to do things, as in the following playbook::
+In particular, the "script" module can be used to run arbitrary PowerShell scripts, allowing Windows administrators familiar with PowerShell a very native way to do things, as in the following playbook::
- hosts: windows
tasks:
@@ -121,10 +121,10 @@ Developers: Supported modules and how it works
Developing ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix.
What if you want to write Windows modules for ansible though?
-For Windows, ansible modules are implemented in Powershell. Skim those Linux/Unix module development chapters before proceeding.
+For Windows, ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding.
Windows modules live in a "windows/" subfolder in the Ansible "library/" subtree. For example, if a module is named
-"library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual powershell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense.
+"library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense.
Modules (ps1 files) should start as follows::
@@ -169,7 +169,7 @@ Windows Playbook Examples
Look to the list of windows modules for most of what is possible, though also some modules like "raw" and "script" also work on Windows, as do "fetch" and "slurp".
-Here is an example of pushing and running a powershell script::
+Here is an example of pushing and running a PowerShell script::
- name: test script module
hosts: windows
@@ -223,7 +223,7 @@ form of new modules, tweaks to existing modules, documentation, or something els
:doc:`playbooks`
Learning ansible's configuration management language
`List of Windows Modules `_
- Windows specific module list, all implemented in powershell
+ Windows specific module list, all implemented in PowerShell
`Mailing List `_
Questions? Help? Ideas? Stop by the list on Google Groups
`irc.freenode.net `_
From 7e3b3b6ebe79b56ed2f56347bf7842cb2a9c52d9 Mon Sep 17 00:00:00 2001
From: Bill Nottingham
Date: Fri, 3 Apr 2015 14:26:45 -0400
Subject: [PATCH 0247/3617] Update intro_windows.rst
Add a bit about what Windows versions PS3 is actually available for.
---
docsite/rst/intro_windows.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst
index 544c6fba754b81..d96478b0a267f0 100644
--- a/docsite/rst/intro_windows.rst
+++ b/docsite/rst/intro_windows.rst
@@ -89,7 +89,7 @@ the certificate.
Getting to PowerShell 3.0 or higher
```````````````````````````````````
-PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script.
+PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. Note that PowerShell 3.0 is only supported on Windows 7 SP1, Windows Server 2008 SP1, and later releases of Windows.
Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above.
From 349ecf6efe54e9144285d1f4170ef0d8ef241ff2 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 3 Apr 2015 11:35:01 -0700
Subject: [PATCH 0248/3617] Add a vault test to data_loader test and some
additional yaml tests to parsing/yaml/test_loader
---
v2/test/parsing/test_data_loader.py | 22 ++++++++++++++-
v2/test/parsing/yaml/test_loader.py | 43 +++++++++++++++++++++++++++++
2 files changed, 64 insertions(+), 1 deletion(-)
diff --git a/v2/test/parsing/test_data_loader.py b/v2/test/parsing/test_data_loader.py
index 370046dbf34655..75ceb662f7327c 100644
--- a/v2/test/parsing/test_data_loader.py
+++ b/v2/test/parsing/test_data_loader.py
@@ -22,7 +22,7 @@
from yaml.scanner import ScannerError
from ansible.compat.tests import unittest
-from ansible.compat.tests.mock import patch
+from ansible.compat.tests.mock import patch, mock_open
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
@@ -62,3 +62,23 @@ def test_parse_fail_from_file(self, mock_def):
""", True)
self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt')
+class TestDataLoaderWithVault(unittest.TestCase):
+
+ def setUp(self):
+ self._loader = DataLoader(vault_password='ansible')
+
+ def tearDown(self):
+ pass
+
+ @patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True)
+ def test_parse_from_vault_1_1_file(self):
+ vaulted_data = """$ANSIBLE_VAULT;1.1;AES256
+33343734386261666161626433386662623039356366656637303939306563376130623138626165
+6436333766346533353463636566313332623130383662340a393835656134633665333861393331
+37666233346464636263636530626332623035633135363732623332313534306438393366323966
+3135306561356164310a343937653834643433343734653137383339323330626437313562306630
+3035
+"""
+ with patch('__builtin__.open', mock_open(read_data=vaulted_data)):
+ output = self._loader.load_from_file('dummy_vault.txt')
+ self.assertEqual(output, dict(foo='bar'))
diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py
index 4c56962610099d..9a4746b99dfeab 100644
--- a/v2/test/parsing/yaml/test_loader.py
+++ b/v2/test/parsing/yaml/test_loader.py
@@ -101,6 +101,49 @@ def test_parse_list(self):
self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19))
self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19))
+ def test_parse_short_dict(self):
+ stream = StringIO("""{"foo": "bar"}""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, dict(foo=u'bar'))
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
+ self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9))
+
+ stream = StringIO("""foo: bar""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, dict(foo=u'bar'))
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
+ self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6))
+
+ def test_error_conditions(self):
+ stream = StringIO("""{""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ self.assertRaises(loader.get_single_data)
+
+ def test_front_matter(self):
+ stream = StringIO("""---\nfoo: bar""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, dict(foo=u'bar'))
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1))
+ self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6))
+
+ # Initial indent (See: #6348)
+ stream = StringIO(""" - foo: bar\n baz: qux""")
+ loader = AnsibleLoader(stream, 'myfile.yml')
+ data = loader.get_single_data()
+ self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}])
+
+ self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2))
+ self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4))
+ self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9))
+ self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9))
+
+
class TestAnsibleLoaderPlay(unittest.TestCase):
def setUp(self):
From 2eb2a41d059f5c025055ba5795825fc8f422ea96 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 4 Apr 2015 10:24:03 -0400
Subject: [PATCH 0249/3617] renamed get_entries to get_plays
---
v2/ansible/playbook/__init__.py | 2 +-
v2/test/playbook/test_playbook.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py
index 1c033559075404..40e6638f23921e 100644
--- a/v2/ansible/playbook/__init__.py
+++ b/v2/ansible/playbook/__init__.py
@@ -81,5 +81,5 @@ def _load_playbook_data(self, file_name, variable_manager):
def get_loader(self):
return self._loader
- def get_entries(self):
+ def get_plays(self):
return self._entries[:]
diff --git a/v2/test/playbook/test_playbook.py b/v2/test/playbook/test_playbook.py
index 1e72421818be6e..dfb52dc7b12726 100644
--- a/v2/test/playbook/test_playbook.py
+++ b/v2/test/playbook/test_playbook.py
@@ -47,7 +47,7 @@ def test_basic_playbook(self):
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
- entries = p.get_entries()
+ plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
From e6e69c089414835d448bbffffd21c4775f2b23f0 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 4 Apr 2015 10:25:55 -0400
Subject: [PATCH 0250/3617] finished implementing list-hosts, started adding
list-tasks/list-tags but getting just task names and have to adjust for
having blocks.
---
v2/ansible/executor/playbook_executor.py | 170 ++++++++++++-----------
v2/ansible/playbook/play.py | 10 ++
v2/bin/ansible-playbook | 36 +++--
3 files changed, 123 insertions(+), 93 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 24b9f8c17baf40..865b06f1088dcc 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -59,12 +59,18 @@ def run(self):
signal.signal(signal.SIGINT, self._cleanup)
result = 0
+ entrylist = []
+ entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
- # FIXME: playbook entries are just plays, so we should rename them
- for play in pb.get_entries():
+ if self._tqm is None: # we are doing a listing
+ entry = {'playbook': playbook_path}
+ entry['plays'] = []
+
+ i = 1
+ for play in pb.get_plays():
self._inventory.remove_restriction()
# Create a temporary copy of the play here, so we can run post_validate
@@ -73,54 +79,91 @@ def run(self):
new_play = play.copy()
new_play.post_validate(all_vars, fail_on_undefined=False)
- for batch in self._get_serialized_batches(new_play):
- if len(batch) == 0:
- self._tqm.send_callback('v2_playbook_on_play_start', new_play)
- self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
- result = 0
- break
- # restrict the inventory to the hosts in the serialized batch
- self._inventory.restrict_to_hosts(batch)
- # and run it...
- result = self._tqm.run(play=play)
+ if self._tqm is None:
+ # we are just doing a listing
+
+ pname = new_play.get_name().strip()
+ if pname == 'PLAY: ':
+ pname = 'PLAY: #%d' % i
+ p = { 'name': pname }
+
+ if self._options.listhosts:
+ p['pattern']=play.hosts
+ p['hosts']=set(self._inventory.get_hosts(new_play.hosts))
+
+ #TODO: play tasks are really blocks, need to figure out how to get task objects from them
+ elif self._options.listtasks:
+ p['tasks'] = []
+ for task in play.get_tasks():
+ p['tasks'].append(task)
+ #p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags})
+
+ elif self._options.listtags:
+ p['tags'] = set(new_play.tags)
+ for task in play.get_tasks():
+ p['tags'].update(task)
+ #p['tags'].update(task.tags)
+ entry['plays'].append(p)
+
+ else:
+ # we are actually running plays
+ for batch in self._get_serialized_batches(new_play):
+ if len(batch) == 0:
+ self._tqm.send_callback('v2_playbook_on_play_start', new_play)
+ self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
+ result = 0
+ break
+ # restrict the inventory to the hosts in the serialized batch
+ self._inventory.restrict_to_hosts(batch)
+ # and run it...
+ result = self._tqm.run(play=play)
+ if result != 0:
+ break
+
if result != 0:
- break
+ raise AnsibleError("Play failed!: %d" % result)
+
+ i = i + 1 # per play
+
+ if entry:
+ entrylist.append(entry) # per playbook
+
+ if entrylist:
+ return entrylist
- if result != 0:
- raise AnsibleError("Play failed!: %d" % result)
finally:
- self._cleanup()
-
- if result == 0:
- #TODO: move to callback
- # FIXME: this stat summary stuff should be cleaned up and moved
- # to a new method, if it even belongs here...
- self._display.banner("PLAY RECAP")
-
- hosts = sorted(self._tqm._stats.processed.keys())
- for h in hosts:
- t = self._tqm._stats.summarize(h)
-
- self._display.display("%s : %s %s %s %s" % (
- hostcolor(h, t),
- colorize('ok', t['ok'], 'green'),
- colorize('changed', t['changed'], 'yellow'),
- colorize('unreachable', t['unreachable'], 'red'),
- colorize('failed', t['failures'], 'red')),
- screen_only=True
- )
-
- self._display.display("%s : %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize('ok', t['ok'], None),
- colorize('changed', t['changed'], None),
- colorize('unreachable', t['unreachable'], None),
- colorize('failed', t['failures'], None)),
- log_only=True
- )
-
- self._display.display("", screen_only=True)
- # END STATS STUFF
+ if self._tqm is not None:
+ self._cleanup()
+
+ #TODO: move to callback
+ # FIXME: this stat summary stuff should be cleaned up and moved
+ # to a new method, if it even belongs here...
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(self._tqm._stats.processed.keys())
+ for h in hosts:
+ t = self._tqm._stats.summarize(h)
+
+ self._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize('ok', t['ok'], 'green'),
+ colorize('changed', t['changed'], 'yellow'),
+ colorize('unreachable', t['unreachable'], 'red'),
+ colorize('failed', t['failures'], 'red')),
+ screen_only=True
+ )
+
+ self._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize('ok', t['ok'], None),
+ colorize('changed', t['changed'], None),
+ colorize('unreachable', t['unreachable'], None),
+ colorize('failed', t['failures'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+ # END STATS STUFF
return result
@@ -161,36 +204,3 @@ def _get_serialized_batches(self, play):
serialized_batches.append(play_hosts)
return serialized_batches
-
- def list_hosts_per_play(self):
-
- playlist = []
- try:
- i = 1
- for playbook_path in self._playbooks:
- pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
- for play in pb.get_entries():
-
- # Use templated copies in case hosts: depends on variables
- all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
- new_play = play.copy()
- new_play.post_validate(all_vars, fail_on_undefined=False)
-
- pname = play.get_name().strip()
- if pname == 'PLAY: ':
- pname = 'PLAY: #%d' % i
-
- playlist.append( {
- 'name': pname,
- 'pattern': play.hosts,
- 'hosts': set(self._inventory.get_hosts(new_play.hosts)),
- } )
- i = i + 1
-
- except AnsibleError:
- raise
- except Exception, e:
- #TODO: log exception
- raise AnsibleParserError("Failed to process plays: %s" % str(e))
-
- return playlist
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index a472d070899b3e..34c4d3e5608fde 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -27,6 +27,7 @@
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
+from ansible.playbook.block import Block
from ansible.utils.vars import combine_vars
@@ -233,6 +234,15 @@ def get_handlers(self):
def get_roles(self):
return self.roles[:]
+ def get_tasks(self):
+ tasklist = []
+ for task in self.pre_tasks + self.tasks + self.post_tasks:
+ if isinstance(task, Block):
+ tasklist.append(task.block + task.rescue + task.always)
+ else:
+ tasklist.append(task)
+ return tasklist
+
def serialize(self):
data = super(Play, self).serialize()
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 4dc6d6bad94a4f..e2cca104844be2 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -134,20 +134,30 @@ def main(args):
display = Display()
pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options)
- if options.listhosts:
- for p in pbex.list_hosts_per_play():
- print("\n %s (%s): host count=%d" % (p['name'], p['pattern'], len(p['hosts'])))
- for host in p['hosts']:
- print(" %s" % host)
- sys.exit(0)
- elif options.listtasks:
- print('TODO: implement')
- sys.exit(0)
- elif options.listtags:
- print('TODO: implement')
- sys.exit(0)
+ results = pbex.run()
+
+ if isinstance(results, list):
+ for p in results:
+
+ print('')
+ print('playbook: %s' % p['playbook'])
+ print('')
+
+ for play in p['plays']:
+ if options.listhosts:
+ print("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts'])))
+ for host in play['hosts']:
+ print(" %s" % host)
+ if options.listtasks: #TODO: do we want to display block info?
+ print("\n %s: task count=%d" % (play['name'], len(play['tasks'])))
+ for task in play['tasks']:
+ print(" %s" % task)
+ if options.listtags:
+ print("\n %s: tags count=%d" % (play['name'], len(play['tags'])))
+ for tag in play['tags']:
+ print(" %s" % tag)
else:
- return pbex.run()
+ return results
if __name__ == "__main__":
#display(" ", log_only=True)
From af97e732a07cb5fc24f314894dbfe9f7b47e5c90 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 4 Apr 2015 15:14:40 -0400
Subject: [PATCH 0251/3617] updated ansible-playbook to use display, fixed
issues breaking display class
---
v2/ansible/executor/playbook_executor.py | 2 +-
v2/ansible/playbook/play.py | 2 +-
v2/ansible/utils/display.py | 7 ++--
v2/bin/ansible-playbook | 48 ++++++++++--------------
4 files changed, 25 insertions(+), 34 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 865b06f1088dcc..94bdbf01e1f1ff 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -16,7 +16,7 @@
# along with Ansible. If not, see .
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
+from __future__ import (absolute_import, division)
__metaclass__ = type
import signal
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index 34c4d3e5608fde..eeabfce062a4d6 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -16,7 +16,7 @@
# along with Ansible. If not, see .
# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
+from __future__ import (absolute_import, division)
__metaclass__ = type
from ansible.errors import AnsibleError, AnsibleParserError
diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py
index 758a62fceea7b5..dd44d61dd30eca 100644
--- a/v2/ansible/utils/display.py
+++ b/v2/ansible/utils/display.py
@@ -18,6 +18,7 @@
# FIXME: copied mostly from old code, needs py3 improvements
import textwrap
+import sys
from ansible import constants as C
from ansible.errors import *
@@ -97,15 +98,15 @@ def deprecated(self, msg, version, removed=False):
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in deprecations:
- self._display(new_msg, color='purple', stderr=True)
+ self.display(new_msg, color='purple', stderr=True)
self._deprecations[new_msg] = 1
def warning(self, msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
- if new_msg not in warns:
- self._display(new_msg, color='bright purple', stderr=True)
+ if new_msg not in self._warns:
+ self.display(new_msg, color='bright purple', stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index e2cca104844be2..49748129e125a2 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -21,13 +21,9 @@ from ansible.utils.vars import combine_vars
from ansible.utils.vault import read_vault_file
from ansible.vars import VariableManager
-# Implement an ansible.utils.warning() function later
-def warning(*args, **kwargs):
- print(*args, **kwargs)
-
#---------------------------------------------------------------------------------------------------
-def main(args):
+def main(display, args):
''' run ansible-playbook operations '''
# create parser for CLI options
@@ -122,16 +118,14 @@ def main(args):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
- warning("provided hosts list is empty, only localhost is available")
+ display.warning("provided hosts list is empty, only localhost is available")
no_hosts = True
inventory.subset(options.subset)
if len(inventory.list_hosts()) == 0 and no_hosts is False:
# Invalid limit
raise errors.AnsibleError("Specified --limit does not match any hosts")
- # create the playbook executor, which manages running the plays
- # via a task queue manager
- display = Display()
+ # create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options)
results = pbex.run()
@@ -139,38 +133,34 @@ def main(args):
if isinstance(results, list):
for p in results:
- print('')
- print('playbook: %s' % p['playbook'])
- print('')
-
+ display.display('\nplaybook: %s\n' % p['playbook'])
for play in p['plays']:
if options.listhosts:
- print("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts'])))
+ display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts'])))
for host in play['hosts']:
- print(" %s" % host)
+ display.display(" %s" % host)
if options.listtasks: #TODO: do we want to display block info?
- print("\n %s: task count=%d" % (play['name'], len(play['tasks'])))
+ display.display("\n %s" % (play['name']))
for task in play['tasks']:
- print(" %s" % task)
- if options.listtags:
- print("\n %s: tags count=%d" % (play['name'], len(play['tags'])))
+ display.display(" %s" % task)
+ if options.listtags: #TODO: fix once we figure out block handling above
+ display.display("\n %s: tags count=%d" % (play['name'], len(play['tags'])))
for tag in play['tags']:
- print(" %s" % tag)
+ display.display(" %s" % tag)
+ return 0
else:
return results
if __name__ == "__main__":
- #display(" ", log_only=True)
- #display(" ".join(sys.argv), log_only=True)
- #display(" ", log_only=True)
+
+ display = Display()
+ display.display(" ".join(sys.argv), log_only=True)
+
try:
- sys.exit(main(sys.argv[1:]))
+ sys.exit(main(display, sys.argv[1:]))
except AnsibleError as e:
- #display("ERROR: %s" % e, color='red', stderr=True)
- print(e)
+ display.display("[ERROR]: %s" % e, color='red', stderr=True)
sys.exit(1)
except KeyboardInterrupt:
- #display("ERROR: interrupted", color='red', stderr=True)
- print("keyboard interrupt")
+ display.display("[ERROR]: interrupted", color='red', stderr=True)
sys.exit(1)
-
From 5531b843602d04c95c2d5aed7bf5bb1580f93889 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 4 Apr 2015 15:21:42 -0400
Subject: [PATCH 0252/3617] moved ad-hoc to use display
---
v2/bin/ansible | 33 ++++++++++++++++-----------------
1 file changed, 16 insertions(+), 17 deletions(-)
diff --git a/v2/bin/ansible b/v2/bin/ansible
index 79d5f0a28b34ee..415a12af2c2359 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -40,8 +40,12 @@ from ansible.vars import VariableManager
class Cli(object):
''' code behind bin/ansible '''
- def __init__(self):
- pass
+ def __init__(self, display=None):
+
+ if display is None:
+ self.display = Display()
+ else:
+ self.display = display
def parse(self):
''' create an options parser for bin/ansible '''
@@ -105,7 +109,7 @@ class Cli(object):
if options.listhosts:
for host in hosts:
- print(' %s' % host.name)
+ self.display(' %s' % host.name)
sys.exit(0)
if ((options.module_name == 'command' or options.module_name == 'shell') and not options.module_args):
@@ -157,22 +161,17 @@ class Cli(object):
########################################################
if __name__ == '__main__':
- #callbacks.display("", log_only=True)
- #callbacks.display(" ".join(sys.argv), log_only=True)
- #callbacks.display("", log_only=True)
+
+ display = Display()
+ #display.display(" ".join(sys.argv), log_only=True)
try:
- cli = Cli()
+ cli = Cli(display=display)
(options, args) = cli.parse()
- result = cli.run(options, args)
-
- except AnsibleError, e:
- print(e)
+ sys.exit(cli.run(options, args))
+ except AnsibleError as e:
+ display.display("[ERROR]: %s" % e, color='red', stderr=True)
sys.exit(1)
-
- except Exception, e:
- # Generic handler for errors
- print("ERROR: %s" % str(e))
+ except KeyboardInterrupt:
+ display.display("[ERROR]: interrupted", color='red', stderr=True)
sys.exit(1)
-
- sys.exit(result)
From b1e6aaa7903c01b5839af9e7aad4ae1ca0fbc681 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 4 Apr 2015 15:54:54 -0400
Subject: [PATCH 0253/3617] implemented verbosity, added 5th level and now can
see how many plays per playbooko if -vvvvv
---
v2/ansible/executor/playbook_executor.py | 1 +
v2/ansible/utils/display.py | 14 +++++++-------
v2/bin/ansible | 5 +++--
v2/bin/ansible-playbook | 3 ++-
4 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 94bdbf01e1f1ff..ad9570963aad80 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -127,6 +127,7 @@ def run(self):
if entry:
entrylist.append(entry) # per playbook
+ self._display.vvvvv('%d plays in %s' % (i, playbook_path))
if entrylist:
return entrylist
diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py
index dd44d61dd30eca..62dbeabca51f69 100644
--- a/v2/ansible/utils/display.py
+++ b/v2/ansible/utils/display.py
@@ -26,11 +26,9 @@
class Display:
- def __init__(self, conn_info=None):
- if conn_info:
- self._verbosity = conn_info.verbosity
- else:
- self._verbosity = 0
+ def __init__(self, verbosity=0):
+
+ self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
@@ -70,10 +68,13 @@ def vvv(self, msg, host=None):
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
+ def vvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=4)
+
def verbose(self, msg, host=None, caplevel=2):
# FIXME: this needs to be implemented
#msg = utils.sanitize_output(msg)
- if self._verbosity > caplevel:
+ if self.verbosity > caplevel:
if host is None:
self.display(msg, color='blue')
else:
@@ -124,4 +125,3 @@ def banner(self, msg, color=None):
star_len = 3
stars = "*" * star_len
self.display("\n%s %s" % (msg, stars), color=color)
-
diff --git a/v2/bin/ansible b/v2/bin/ansible
index 415a12af2c2359..7d2f01bc5c5e7c 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -72,6 +72,7 @@ class Cli(object):
parser.print_help()
sys.exit(1)
+ display.verbosity = options.verbosity
validate_conflicts(parser,options)
return (options, args)
@@ -109,7 +110,7 @@ class Cli(object):
if options.listhosts:
for host in hosts:
- self.display(' %s' % host.name)
+ self.display.display(' %s' % host.name)
sys.exit(0)
if ((options.module_name == 'command' or options.module_name == 'shell') and not options.module_args):
@@ -163,7 +164,7 @@ class Cli(object):
if __name__ == '__main__':
display = Display()
- #display.display(" ".join(sys.argv), log_only=True)
+ #display.display(" ".join(sys.argv))
try:
cli = Cli(display=display)
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 49748129e125a2..79c2eed785df52 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -53,6 +53,7 @@ def main(display, args):
parser.print_help(file=sys.stderr)
return 1
+ display.verbosity = options.verbosity
validate_conflicts(parser,options)
# Note: slightly wrong, this is written so that implicit localhost
@@ -154,7 +155,7 @@ def main(display, args):
if __name__ == "__main__":
display = Display()
- display.display(" ".join(sys.argv), log_only=True)
+ #display.display(" ".join(sys.argv), log_only=True)
try:
sys.exit(main(display, sys.argv[1:]))
From 4bc79a746ad6f1f9841b6f637d45f69155babf69 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Sat, 4 Apr 2015 16:26:05 -0400
Subject: [PATCH 0254/3617] more fine tunnign on verbosity
---
v2/ansible/executor/playbook_executor.py | 6 ++++--
v2/ansible/plugins/connections/__init__.py | 2 +-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index ad9570963aad80..9f02cddddb6b10 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -70,7 +70,10 @@ def run(self):
entry['plays'] = []
i = 1
- for play in pb.get_plays():
+ plays = pb.get_plays()
+ self._display.vv('%d plays in %s' % (len(plays), playbook_path))
+
+ for play in plays:
self._inventory.remove_restriction()
# Create a temporary copy of the play here, so we can run post_validate
@@ -127,7 +130,6 @@ def run(self):
if entry:
entrylist.append(entry) # per playbook
- self._display.vvvvv('%d plays in %s' % (i, playbook_path))
if entrylist:
return entrylist
diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py
index 11015d7431338d..74ff693a331944 100644
--- a/v2/ansible/plugins/connections/__init__.py
+++ b/v2/ansible/plugins/connections/__init__.py
@@ -39,7 +39,7 @@ class ConnectionBase:
def __init__(self, connection_info, *args, **kwargs):
self._connection_info = connection_info
- self._display = Display(connection_info)
+ self._display = Display(verbosity=connection_info.verbosity)
def _become_method_supported(self, become_method):
From e82ba723e2a8c1dd1b7b4eb218ed15cc3235f0bc Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Sun, 5 Apr 2015 01:05:17 -0500
Subject: [PATCH 0255/3617] Fixing multiple v2 bugs
---
v2/ansible/executor/play_iterator.py | 11 +-
v2/ansible/executor/playbook_executor.py | 3 +-
v2/ansible/executor/task_queue_manager.py | 20 ++--
v2/ansible/plugins/action/assemble.py | 2 +-
v2/ansible/plugins/strategies/__init__.py | 1 -
v2/ansible/plugins/strategies/free.py | 135 ++++++++++++++--------
v2/samples/test_free.yml | 10 ++
v2/samples/test_pb.yml | 44 ++-----
8 files changed, 123 insertions(+), 103 deletions(-)
create mode 100644 v2/samples/test_free.yml
diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py
index d6fe3750955943..38bebb21132c9c 100644
--- a/v2/ansible/executor/play_iterator.py
+++ b/v2/ansible/executor/play_iterator.py
@@ -88,18 +88,11 @@ class PlayIterator:
FAILED_ALWAYS = 8
def __init__(self, inventory, play):
- # FIXME: should we save the post_validated play from below here instead?
self._play = play
- # post validate the play, as we need some fields to be finalized now
- # so that we can use them to setup the iterator properly
- all_vars = inventory._variable_manager.get_vars(loader=inventory._loader, play=play)
- new_play = play.copy()
- new_play.post_validate(all_vars, fail_on_undefined=False)
-
- self._blocks = new_play.compile()
+ self._blocks = self._play.compile()
self._host_states = {}
- for host in inventory.get_hosts(new_play.hosts):
+ for host in inventory.get_hosts(self._play.hosts):
self._host_states[host.name] = HostState(blocks=self._blocks)
def get_host_state(self, host):
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 9f02cddddb6b10..6504fddfc8217a 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -124,7 +124,7 @@ def run(self):
break
if result != 0:
- raise AnsibleError("Play failed!: %d" % result)
+ break
i = i + 1 # per play
@@ -138,7 +138,6 @@ def run(self):
if self._tqm is not None:
self._cleanup()
- #TODO: move to callback
# FIXME: this stat summary stuff should be cleaned up and moved
# to a new method, if it even belongs here...
self._display.banner("PLAY RECAP")
diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py
index 28904676eb28d4..d0354786da9b3b 100644
--- a/v2/ansible/executor/task_queue_manager.py
+++ b/v2/ansible/executor/task_queue_manager.py
@@ -123,7 +123,8 @@ def _initialize_notified_handlers(self, handlers):
# FIXME: there is a block compile helper for this...
handler_list = []
for handler_block in handlers:
- handler_list.extend(handler_block.compile())
+ for handler in handler_block.block:
+ handler_list.append(handler)
# then initalize it with the handler names from the handler list
for handler in handler_list:
@@ -138,23 +139,28 @@ def run(self, play):
are done with the current task).
'''
- connection_info = ConnectionInformation(play, self._options)
+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
+
+ new_play = play.copy()
+ new_play.post_validate(all_vars, fail_on_undefined=False)
+
+ connection_info = ConnectionInformation(new_play, self._options)
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_connection_info'):
callback_plugin.set_connection_info(connection_info)
- self.send_callback('v2_playbook_on_play_start', play)
+ self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
- self._initialize_notified_handlers(play.handlers)
+ self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
- strategy = strategy_loader.get(play.strategy, self)
+ strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
- raise AnsibleError("Invalid play strategy specified: %s" % play.strategy, obj=play._ds)
+ raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
- iterator = PlayIterator(inventory=self._inventory, play=play)
+ iterator = PlayIterator(inventory=self._inventory, play=new_play)
# and run the play using the strategy
return strategy.run(iterator, connection_info)
diff --git a/v2/ansible/plugins/action/assemble.py b/v2/ansible/plugins/action/assemble.py
index b1bdc06c6d3873..638d4b92bb5568 100644
--- a/v2/ansible/plugins/action/assemble.py
+++ b/v2/ansible/plugins/action/assemble.py
@@ -90,7 +90,7 @@ def run(self, tmp=None, task_vars=dict()):
src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
else:
# the source is local, so expand it here
- src = os.path.expanduser(src)
+ src = self._loader.path_dwim(os.path.expanduser(src))
_re = None
if regexp is not None:
diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py
index 59c0b9b84eef6a..afbc373f4f3332 100644
--- a/v2/ansible/plugins/strategies/__init__.py
+++ b/v2/ansible/plugins/strategies/__init__.py
@@ -390,7 +390,6 @@ def run_handlers(self, iterator, connection_info):
# of handlers based on the notified list
for handler_block in iterator._play.handlers:
- debug("handlers are: %s" % handlers)
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
diff --git a/v2/ansible/plugins/strategies/free.py b/v2/ansible/plugins/strategies/free.py
index 6aab495fec3ed1..4fd8a132018ca3 100644
--- a/v2/ansible/plugins/strategies/free.py
+++ b/v2/ansible/plugins/strategies/free.py
@@ -22,6 +22,7 @@
import time
from ansible.plugins.strategies import StrategyBase
+from ansible.utils.debug import debug
class StrategyModule(StrategyBase):
@@ -42,66 +43,106 @@ def run(self, iterator, connection_info):
# the last host to be given a task
last_host = 0
+ result = True
+
work_to_do = True
while work_to_do and not self._tqm._terminated:
- hosts_left = self.get_hosts_remaining()
+ hosts_left = self.get_hosts_remaining(iterator._play)
if len(hosts_left) == 0:
- self._callback.playbook_on_no_hosts_remaining()
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
break
- # using .qsize() is a best estimate anyway, due to the
- # multiprocessing/threading concerns (per the python docs)
- if 1: #if self._job_queue.qsize() < len(hosts_left):
-
- work_to_do = False # assume we have no more work to do
- starting_host = last_host # save current position so we know when we've
- # looped back around and need to break
-
- # try and find an unblocked host with a task to run
- while True:
- host = hosts_left[last_host]
- host_name = host.get_name()
-
- # peek at the next task for the host, to see if there's
- # anything to do do for this host
- if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and iterator.get_next_task_for_host(host, peek=True):
-
- # FIXME: check task tags, etc. here as we do in linear
- # FIXME: handle meta tasks here, which will require a tweak
- # to run_handlers so that only the handlers on this host
- # are flushed and not all
-
- # set the flag so the outer loop knows we've still found
- # some work which needs to be done
- work_to_do = True
-
- # check to see if this host is blocked (still executing a previous task)
- if not host_name in self._blocked_hosts:
- # pop the task, mark the host blocked, and queue it
- self._blocked_hosts[host_name] = True
- task = iterator.get_next_task_for_host(host)
- #self._callback.playbook_on_task_start(task.get_name(), False)
- self._queue_task(iterator._play, host, task, connection_info)
-
- # move on to the next host and make sure we
- # haven't gone past the end of our hosts list
- last_host += 1
- if last_host > len(hosts_left) - 1:
- last_host = 0
-
- # if we've looped around back to the start, break out
- if last_host == starting_host:
- break
+ work_to_do = False # assume we have no more work to do
+ starting_host = last_host # save current position so we know when we've
+ # looped back around and need to break
+
+ # try and find an unblocked host with a task to run
+ host_results = []
+ while True:
+ host = hosts_left[last_host]
+ debug("next free host: %s" % host)
+ host_name = host.get_name()
+
+ # peek at the next task for the host, to see if there's
+ # anything to do do for this host
+ (state, task) = iterator.get_next_task_for_host(host, peek=True)
+ debug("free host state: %s" % state)
+ debug("free host task: %s" % task)
+ if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
+
+ # set the flag so the outer loop knows we've still found
+ # some work which needs to be done
+ work_to_do = True
+
+ debug("this host has work to do")
+
+ # check to see if this host is blocked (still executing a previous task)
+ if not host_name in self._blocked_hosts:
+ # pop the task, mark the host blocked, and queue it
+ self._blocked_hosts[host_name] = True
+ (state, task) = iterator.get_next_task_for_host(host)
+
+ debug("getting variables")
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
+ debug("done getting variables")
+
+ # check to see if this task should be skipped, due to it being a member of a
+ # role which has already run (and whether that role allows duplicate execution)
+ if task._role and task._role.has_run():
+ # If there is no metadata, the default behavior is to not allow duplicates,
+ # if there is metadata, check to see if the allow_duplicates flag was set to true
+ if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
+ debug("'%s' skipped because role has already run" % task)
+ continue
+
+ if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup':
+ debug("'%s' failed tag evaluation" % task)
+ continue
+
+ if task.action == 'meta':
+ # meta tasks store their args in the _raw_params field of args,
+ # since they do not use k=v pairs, so get that
+ meta_action = task.args.get('_raw_params')
+ if meta_action == 'noop':
+ # FIXME: issue a callback for the noop here?
+ continue
+ elif meta_action == 'flush_handlers':
+ # FIXME: in the 'free' mode, flushing handlers should result in
+ # only those handlers notified for the host doing the flush
+ self.run_handlers(iterator, connection_info)
+ else:
+ raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
+
+ self._blocked_hosts[host_name] = False
+ else:
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ self._queue_task(host, task, task_vars, connection_info)
+
+ # move on to the next host and make sure we
+ # haven't gone past the end of our hosts list
+ last_host += 1
+ if last_host > len(hosts_left) - 1:
+ last_host = 0
+
+ # if we've looped around back to the start, break out
+ if last_host == starting_host:
+ break
+
+ results = self._process_pending_results(iterator)
+ host_results.extend(results)
# pause briefly so we don't spin lock
time.sleep(0.05)
try:
- self._wait_for_pending_results()
- except:
+ results = self._wait_on_pending_results(iterator)
+ host_results.extend(results)
+ except Exception, e:
# FIXME: ctrl+c can cause some failures here, so catch them
# with the appropriate error type
+ print("wtf: %s" % e)
pass
# run the base class run() method, which executes the cleanup function
diff --git a/v2/samples/test_free.yml b/v2/samples/test_free.yml
new file mode 100644
index 00000000000000..d5f8bcaac944c3
--- /dev/null
+++ b/v2/samples/test_free.yml
@@ -0,0 +1,10 @@
+- hosts: all
+ strategy: free
+ gather_facts: no
+ tasks:
+ - debug: msg="all hosts should print this"
+ - pause: seconds=5
+ when: inventory_hostname == 'l2'
+ - pause: seconds=10
+ when: inventory_hostname == 'l3'
+ - debug: msg="and we're done"
diff --git a/v2/samples/test_pb.yml b/v2/samples/test_pb.yml
index 3912d4566b293b..ab5b7ab2954f87 100644
--- a/v2/samples/test_pb.yml
+++ b/v2/samples/test_pb.yml
@@ -1,12 +1,7 @@
# will use linear strategy by default
-- hosts:
- - "{{hosts|default('all')}}"
- #- ubuntu1404
- #- awxlocal
- connection: ssh
+- hosts: "{{hosts|default('all')}}"
#gather_facts: false
- #strategy: free
- #serial: 3
+ strategy: "{{strategy|default('linear')}}"
vars:
play_var: foo
test_dict:
@@ -15,14 +10,9 @@
vars_files:
- testing/vars.yml
tasks:
- - block:
- - debug: var=ansible_nodename
- when: ansible_nodename == "ubuntu1404"
- block:
- debug: msg="in block for {{inventory_hostname}} ({{ansible_nodename}}), group_var is {{group_var}}, host var is {{host_var}}"
notify: foo
- - debug: msg="test dictionary is {{test_dict}}"
- when: asdf is defined
- command: hostname
register: hostname_result
- debug: msg="registered result is {{hostname_result.stdout}}"
@@ -31,26 +21,18 @@
sudo_user: testing
- assemble: src=./testing/ dest=/tmp/output.txt remote_src=no
- copy: content="hello world\n" dest=/tmp/copy_content.out mode=600
- - command: /bin/false
- retries: "{{num_retries|default(5)}}"
- delay: 1
- - debug: msg="you shouldn't see me"
+ #- command: /bin/false
+ # retries: "{{num_retries|default(5)}}"
+ # delay: 1
+ #- debug: msg="you shouldn't see me"
rescue:
- debug: msg="this is the rescue"
- command: /bin/false
- debug: msg="you should not see this rescue message"
always:
- debug: msg="this is the always block, it should always be seen"
- - command: /bin/false
- - debug: msg="you should not see this always message"
-
- #- debug: msg="linear task 01"
- #- debug: msg="linear task 02"
- #- debug: msg="linear task 03"
- # with_items:
- # - a
- # - b
- # - c
+ #- command: /bin/false
+ #- debug: msg="you should not see this always message"
handlers:
- name: foo
@@ -58,13 +40,3 @@
- name: bar
debug: msg="this is the bar handler, you should not see this"
-#- hosts: all
-# connection: local
-# strategy: free
-# tasks:
-# - ping:
-# - command: /bin/false
-# - debug: msg="free task 01"
-# - debug: msg="free task 02"
-# - debug: msg="free task 03"
-
From bb3f50361e4c616e57550b15ed609738a7d00ae8 Mon Sep 17 00:00:00 2001
From: Mohamed Hazem
Date: Sun, 5 Apr 2015 20:47:22 +0300
Subject: [PATCH 0256/3617] Replaced --start-at with --start-at-task
---
docsite/rst/playbooks_startnstep.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docsite/rst/playbooks_startnstep.rst b/docsite/rst/playbooks_startnstep.rst
index 1067c3e121452b..106fd2d5de4109 100644
--- a/docsite/rst/playbooks_startnstep.rst
+++ b/docsite/rst/playbooks_startnstep.rst
@@ -8,9 +8,9 @@ This shows a few alternative ways to run playbooks. These modes are very useful
Start-at-task
`````````````
-If you want to start executing your playbook at a particular task, you can do so with the ``--start-at`` option::
+If you want to start executing your playbook at a particular task, you can do so with the ``--start-at-task`` option::
- ansible-playbook playbook.yml --start-at="install packages"
+ ansible-playbook playbook.yml --start-at-task="install packages"
The above will start executing your playbook at a task named "install packages".
From e79c9202602f123375dbbdeaef205ec10b74f597 Mon Sep 17 00:00:00 2001
From: Joost Molenaar
Date: Tue, 19 Aug 2014 12:04:27 +0200
Subject: [PATCH 0257/3617] Add support for Arch to module_utils.basic.py
Fixes ansible/ansible#8653
---
lib/ansible/module_utils/basic.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index aaaf85e5e057e5..eeb64d972485ed 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -181,7 +181,8 @@ def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
- distribution = platform.linux_distribution()[0].capitalize()
+ supported_dists = platform._supported_dists + ('arch',)
+ distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
From c7f33627950d352cdca46f71c93b0783981b8c89 Mon Sep 17 00:00:00 2001
From: Johannes 'fish' Ziemke
Date: Mon, 6 Apr 2015 14:43:39 +0200
Subject: [PATCH 0258/3617] Replace - in ec2 inventory as well
Dash (-) is not a variable ansible group name, so it needs to be replaced as well.
---
plugins/inventory/ec2.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py
index e93df1053d1e53..76871b0266dba0 100755
--- a/plugins/inventory/ec2.py
+++ b/plugins/inventory/ec2.py
@@ -787,7 +787,7 @@ def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
- return re.sub("[^A-Za-z0-9\-]", "_", word)
+ return re.sub("[^A-Za-z0-9\_]", "_", word)
def json_format_dict(self, data, pretty=False):
From 5150d83d01166b498af050c6806b83c94ed5e906 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Mon, 6 Apr 2015 12:15:07 -0500
Subject: [PATCH 0259/3617] Fixing the version in lib/
---
lib/ansible/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
index 27e79a41cadf52..200ecb79e361d6 100644
--- a/lib/ansible/__init__.py
+++ b/lib/ansible/__init__.py
@@ -14,5 +14,5 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-__version__ = '1.9'
+__version__ = '2.0'
__author__ = 'Michael DeHaan'
From 2244178c6da5faa5a235b1dfcf292521e8f6823c Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Mon, 6 Apr 2015 14:09:49 -0500
Subject: [PATCH 0260/3617] Updating debian packaging changelog for devel 2.0
version
---
packaging/debian/changelog | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/packaging/debian/changelog b/packaging/debian/changelog
index 843ca7f6f5ef50..aa03e724d07e3f 100644
--- a/packaging/debian/changelog
+++ b/packaging/debian/changelog
@@ -1,8 +1,14 @@
-ansible (1.9) unstable; urgency=low
+ansible (2.0.0) unstable; urgency=low
- * 1.9 release (PENDING)
+ * 2.0.0 (in progress)
- -- Ansible, Inc. Wed, 21 Oct 2015 04:29:00 -0500
+ -- Ansible, Inc. Fri, 01 Jan 2016 00:00:00 -0500
+
+ansible (1.9.0.1) unstable; urgency=low
+
+ * 1.9 release
+
+ -- Ansible, Inc. Wed, 25 Mar 2015 17:00:00 -0500
ansible (1.8.4) unstable; urgency=low
From 43775daa4bbcf6c02cfefa491250b1619701f1bf Mon Sep 17 00:00:00 2001
From: Jason DeTiberus
Date: Mon, 6 Apr 2015 16:47:52 -0400
Subject: [PATCH 0261/3617] Fix indentation
---
lib/ansible/module_utils/facts.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 40be989241f6d2..628d1dd267833c 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -2452,7 +2452,7 @@ def get_virtual_facts(self):
self.facts['virtualization_role'] = 'guest'
return
- if sys_vendor == 'oVirt':
+ if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
From f6c116a81fc19ed1470901b153a72b411b0e8cef Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Mon, 6 Apr 2015 18:30:38 -0500
Subject: [PATCH 0262/3617] Updating version to contain the full major/release
---
lib/ansible/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
index 200ecb79e361d6..ba5ca83b7231d1 100644
--- a/lib/ansible/__init__.py
+++ b/lib/ansible/__init__.py
@@ -14,5 +14,5 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-__version__ = '2.0'
+__version__ = '2.0.0'
__author__ = 'Michael DeHaan'
From d732c94ac23be49e71df1410027b3f39f9d86b68 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 6 Apr 2015 22:31:55 -0400
Subject: [PATCH 0263/3617] a bunch of updates to connection info and related,
to pass down passwords also now options populate required fields in required
order allowing play to override added capture of debug in action plugins when
stdout is not json
---
v2/ansible/executor/connection_info.py | 77 +++++++++++++++--------
v2/ansible/executor/playbook_executor.py | 5 +-
v2/ansible/executor/task_queue_manager.py | 5 +-
v2/ansible/playbook/play.py | 2 +-
v2/ansible/plugins/action/__init__.py | 6 +-
v2/ansible/plugins/connections/local.py | 3 +
v2/ansible/plugins/connections/ssh.py | 4 +-
v2/bin/ansible | 3 +-
v2/bin/ansible-playbook | 2 +-
9 files changed, 72 insertions(+), 35 deletions(-)
diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py
index 165cd1245fb44c..19c8b130c72f66 100644
--- a/v2/ansible/executor/connection_info.py
+++ b/v2/ansible/executor/connection_info.py
@@ -38,34 +38,40 @@ class ConnectionInformation:
connection/authentication information.
'''
- def __init__(self, play=None, options=None):
- # FIXME: implement the new methodology here for supporting
- # various different auth escalation methods (becomes, etc.)
+ def __init__(self, play=None, options=None, passwords=None):
- self.connection = C.DEFAULT_TRANSPORT
+ if passwords is None:
+ passwords = {}
+
+ # connection
+ self.connection = None
self.remote_addr = None
- self.remote_user = 'root'
- self.password = ''
- self.port = 22
+ self.remote_user = None
+ self.password = passwords.get('conn_pass','')
+ self.port = None
self.private_key_file = None
- self.verbosity = 0
- self.only_tags = set()
- self.skip_tags = set()
# privilege escalation
- self.become = False
- self.become_method = C.DEFAULT_BECOME_METHOD
- self.become_user = ''
- self.become_pass = ''
+ self.become = None
+ self.become_method = None
+ self.become_user = None
+ self.become_pass = passwords.get('become_pass','')
+ # general flags (should we move out?)
+ self.verbosity = 0
+ self.only_tags = set()
+ self.skip_tags = set()
self.no_log = False
self.check_mode = False
+ #TODO: just pull options setup to above?
+ # set options before play to allow play to override them
+ if options:
+ self.set_options(options)
+
if play:
self.set_play(play)
- if options:
- self.set_options(options)
def __repr__(self):
value = "CONNECTION INFO:\n"
@@ -84,12 +90,18 @@ def set_play(self, play):
if play.connection:
self.connection = play.connection
- self.remote_user = play.remote_user
- self.password = ''
- self.port = int(play.port) if play.port else 22
- self.become = play.become
- self.become_method = play.become_method
- self.become_user = play.become_user
+ if play.remote_user:
+ self.remote_user = play.remote_user
+
+ if play.port:
+ self.port = int(play.port)
+
+ if play.become is not None:
+ self.become = play.become
+ if play.become_method:
+ self.become_method = play.become_method
+ if play.become_user:
+ self.become_user = play.become_user
self.become_pass = play.become_pass
# non connection related
@@ -103,15 +115,30 @@ def set_options(self, options):
higher precedence than those set on the play or host.
'''
- # FIXME: set other values from options here?
-
- self.verbosity = options.verbosity
if options.connection:
self.connection = options.connection
+ self.remote_user = options.remote_user
+ #if 'port' in options and options.port is not None:
+ # self.port = options.port
+ self.private_key_file = None
+
+ # privilege escalation
+ self.become = options.become
+ self.become_method = options.become_method
+ self.become_user = options.become_user
+ self.become_pass = ''
+
+ # general flags (should we move out?)
+ if options.verbosity:
+ self.verbosity = options.verbosity
+ #if options.no_log:
+ # self.no_log = boolean(options.no_log)
if options.check:
self.check_mode = boolean(options.check)
+
+
# get the tag info from options, converting a comma-separated list
# of values into a proper list if need be. We check to see if the
# options have the attribute, as it is not always added via the CLI
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 6504fddfc8217a..40c0798b0034b4 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -36,18 +36,19 @@ class PlaybookExecutor:
basis for bin/ansible-playbook operation.
'''
- def __init__(self, playbooks, inventory, variable_manager, loader, display, options):
+ def __init__(self, playbooks, inventory, variable_manager, loader, display, options, conn_pass, become_pass):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._display = display
self._options = options
+ self.passwords = {'conn_pass': conn_pass, 'become_pass': become_pass}
if options.listhosts or options.listtasks or options.listtags:
self._tqm = None
else:
- self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, display=display, options=options)
+ self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords)
def run(self):
diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py
index d0354786da9b3b..026726b3d8e02d 100644
--- a/v2/ansible/executor/task_queue_manager.py
+++ b/v2/ansible/executor/task_queue_manager.py
@@ -48,7 +48,7 @@ class TaskQueueManager:
which dispatches the Play's tasks to hosts.
'''
- def __init__(self, inventory, callback, variable_manager, loader, display, options):
+ def __init__(self, inventory, callback, variable_manager, loader, display, options, passwords):
self._inventory = inventory
self._variable_manager = variable_manager
@@ -56,6 +56,7 @@ def __init__(self, inventory, callback, variable_manager, loader, display, optio
self._display = display
self._options = options
self._stats = AggregateStats()
+ self.passwords = passwords
# a special flag to help us exit cleanly
self._terminated = False
@@ -144,7 +145,7 @@ def run(self, play):
new_play = play.copy()
new_play.post_validate(all_vars, fail_on_undefined=False)
- connection_info = ConnectionInformation(new_play, self._options)
+ connection_info = ConnectionInformation(new_play, self._options, self.passwords)
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_connection_info'):
callback_plugin.set_connection_info(connection_info)
diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py
index eeabfce062a4d6..33fd5efd9fa417 100644
--- a/v2/ansible/playbook/play.py
+++ b/v2/ansible/playbook/play.py
@@ -61,7 +61,7 @@ class Play(Base, Taggable, Become):
_hosts = FieldAttribute(isa='list', default=[], required=True)
_name = FieldAttribute(isa='string', default='')
_port = FieldAttribute(isa='int', default=22)
- _remote_user = FieldAttribute(isa='string', default='root')
+ _remote_user = FieldAttribute(isa='string')
# Variable Attributes
_vars = FieldAttribute(isa='dict', default=dict())
diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py
index 2d258dd5250a9b..2f56c4df582eb6 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/v2/ansible/plugins/action/__init__.py
@@ -415,7 +415,11 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_
# FIXME: in error situations, the stdout may not contain valid data, so we
# should check for bad rc codes better to catch this here
if 'stdout' in res and res['stdout'].strip():
- data = json.loads(self._filter_leading_non_json_lines(res['stdout']))
+ try:
+ data = json.loads(self._filter_leading_non_json_lines(res['stdout']))
+ except ValueError:
+ # not valid json, lets try to capture error
+ data = {'traceback': res['stdout']}
if 'parsed' in data and data['parsed'] == False:
data['msg'] += res['stderr']
# pre-split stdout into lines, if stdout is in the data and there
diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py
index c847ee79d5d0ef..31d0b296e4aee1 100644
--- a/v2/ansible/plugins/connections/local.py
+++ b/v2/ansible/plugins/connections/local.py
@@ -37,6 +37,9 @@ def get_transport(self):
def connect(self, port=None):
''' connect to the local host; nothing to do here '''
+
+ self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr)
+
return self
def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py
index e233a704f987a4..e59311ead96df6 100644
--- a/v2/ansible/plugins/connections/ssh.py
+++ b/v2/ansible/plugins/connections/ssh.py
@@ -57,7 +57,7 @@ def get_transport(self):
def connect(self):
''' connect to the remote host '''
- self._display.vvv("ESTABLISH CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr)
+ self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr)
self._common_args = []
extra_args = C.ANSIBLE_SSH_ARGS
@@ -99,7 +99,7 @@ def connect(self):
self._common_args += ["-o", "KbdInteractiveAuthentication=no",
"-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
"-o", "PasswordAuthentication=no"]
- if self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]:
+ if self._connection_info.remote_user is not None and self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]:
self._common_args += ["-o", "User="+self._connection_info.remote_user]
# FIXME: figure out where this goes
#self._common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
diff --git a/v2/bin/ansible b/v2/bin/ansible
index 7d2f01bc5c5e7c..9b3ccd38be673b 100755
--- a/v2/bin/ansible
+++ b/v2/bin/ansible
@@ -93,6 +93,7 @@ class Cli(object):
normalize_become_options(options)
(sshpass, becomepass, vault_pass) = ask_passwords(options)
+ passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
if options.vault_password_file:
# read vault_pass from a file
@@ -138,7 +139,7 @@ class Cli(object):
# now create a task queue manager to execute the play
try:
display = Display()
- tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, display=display, options=options)
+ tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords)
result = tqm.run(play)
tqm.cleanup()
except AnsibleError:
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 79c2eed785df52..000a0b74c7a84c 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -127,7 +127,7 @@ def main(display, args):
raise errors.AnsibleError("Specified --limit does not match any hosts")
# create the playbook executor, which manages running the plays via a task queue manager
- pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options)
+ pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, conn_pass=sshpass, become_pass=becomepass)
results = pbex.run()
From 7076298dc1eb03fbf6bea1fe5f58fcdc2a6b54e0 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Mon, 6 Apr 2015 22:27:14 -0500
Subject: [PATCH 0264/3617] Adding FIXME note to playbook executor code
regarding password params
---
v2/ansible/executor/playbook_executor.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 40c0798b0034b4..20aad364766ba5 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -36,6 +36,8 @@ class PlaybookExecutor:
basis for bin/ansible-playbook operation.
'''
+ # FIXME: passwords should not be passed in piecemeal like this,
+ # if they're just going to be stuck in a dict later.
def __init__(self, playbooks, inventory, variable_manager, loader, display, options, conn_pass, become_pass):
self._playbooks = playbooks
self._inventory = inventory
From faadb6830899138de2dfcfca3973a898c5ace3a2 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 6 Apr 2015 23:37:32 -0400
Subject: [PATCH 0265/3617] backup_local now only tries to back up exising
files, returns '' otherwise
---
lib/ansible/module_utils/basic.py | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index aaaf85e5e057e5..54a1a9cfff7f88 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -1297,14 +1297,18 @@ def sha256(self, filename):
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
- # backups named basename-YYYY-MM-DD@HH:MM:SS~
- ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
- backupdest = '%s.%s' % (fn, ext)
- try:
- shutil.copy2(fn, backupdest)
- except (shutil.Error, IOError), e:
- self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
+ backupdest = ''
+ if os.path.exists(fn):
+ # backups named basename-YYYY-MM-DD@HH:MM:SS~
+ ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
+ backupdest = '%s.%s' % (fn, ext)
+
+ try:
+ shutil.copy2(fn, backupdest)
+ except (shutil.Error, IOError), e:
+ self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
+
return backupdest
def cleanup(self, tmpfile):
From 9409cc74432e4841b469481ffb250ee4459ef2cc Mon Sep 17 00:00:00 2001
From: Kimmo Koskinen
Date: Tue, 7 Apr 2015 14:26:42 +0300
Subject: [PATCH 0266/3617] Use codecs module while reading & writing json
cache file
---
lib/ansible/cache/jsonfile.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py
index b7d72c8d2e865d..93ee69903beff6 100644
--- a/lib/ansible/cache/jsonfile.py
+++ b/lib/ansible/cache/jsonfile.py
@@ -18,6 +18,7 @@
import os
import time
import errno
+import codecs
try:
import simplejson as json
@@ -57,7 +58,7 @@ def get(self, key):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
- f = open( cachefile, 'r')
+ f = codecs.open(cachefile, 'r', encoding='utf-8')
except (OSError,IOError), e:
utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
else:
@@ -73,7 +74,7 @@ def set(self, key, value):
cachefile = "%s/%s" % (self._cache_dir, key)
try:
- f = open(cachefile, 'w')
+ f = codecs.open(cachefile, 'w', encoding='utf-8')
except (OSError,IOError), e:
utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
else:
From b8a9d87f30c86b7737b3cf63c4de67fd8547ce0e Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Tue, 7 Apr 2015 08:22:56 -0500
Subject: [PATCH 0267/3617] Fixing the VERSION file to match the expected
"version release" format
---
VERSION | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/VERSION b/VERSION
index cd5ac039d67e0b..a4b5d82d9e5211 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.0
+2.0.0 0.0.pre
From 1cf911d5244bc15640823bfa59acd08c421d7940 Mon Sep 17 00:00:00 2001
From: James Cammarata
Date: Tue, 7 Apr 2015 09:54:19 -0500
Subject: [PATCH 0268/3617] Back-porting Makefile changes for version/release
---
Makefile | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/Makefile b/Makefile
index 81e24efab367d5..636986028e8628 100644
--- a/Makefile
+++ b/Makefile
@@ -34,7 +34,8 @@ PYTHON=python
SITELIB = $(shell $(PYTHON) -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")
# VERSION file provides one place to update the software version
-VERSION := $(shell cat VERSION)
+VERSION := $(shell cat VERSION | cut -f1 -d' ')
+RELEASE := $(shell cat VERSION | cut -f2 -d' ')
# Get the branch information from git
ifneq ($(shell which git),)
@@ -53,7 +54,7 @@ DEBUILD_OPTS = --source-option="-I"
DPUT_BIN ?= dput
DPUT_OPTS ?=
ifeq ($(OFFICIAL),yes)
- DEB_RELEASE = 1ppa
+ DEB_RELEASE = $(RELEASE)ppa
# Sign OFFICIAL builds using 'DEBSIGN_KEYID'
# DEBSIGN_KEYID is required when signing
ifneq ($(DEBSIGN_KEYID),)
@@ -74,7 +75,7 @@ DEB_DIST ?= unstable
RPMSPECDIR= packaging/rpm
RPMSPEC = $(RPMSPECDIR)/ansible.spec
RPMDIST = $(shell rpm --eval '%{?dist}')
-RPMRELEASE = 1
+RPMRELEASE = $(RELEASE)
ifneq ($(OFFICIAL),yes)
RPMRELEASE = 0.git$(DATE)
endif
From 72457e4326b51cd6066dbdeea75755de0d1a4caf Mon Sep 17 00:00:00 2001
From: John Galt
Date: Tue, 7 Apr 2015 12:19:37 -0400
Subject: [PATCH 0269/3617] Fixed typo
---
plugins/inventory/ec2.ini | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini
index 523a80ed8334bf..1866f0bf3d6c27 100644
--- a/plugins/inventory/ec2.ini
+++ b/plugins/inventory/ec2.ini
@@ -33,7 +33,7 @@ destination_variable = public_dns_name
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
-# be run from with EC2. The key of an EC2 tag may optionally be used; however
+# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
vpc_destination_variable = ip_address
From 665babdaab7fc5949cf319f66854711b1bc01a60 Mon Sep 17 00:00:00 2001
From: Mengdi Gao
Date: Wed, 8 Apr 2015 14:19:45 +0800
Subject: [PATCH 0270/3617] Remove redundant whitespace.
---
docsite/rst/playbooks_intro.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst
index 4751467b016857..4e10528b8c65cd 100644
--- a/docsite/rst/playbooks_intro.rst
+++ b/docsite/rst/playbooks_intro.rst
@@ -334,7 +334,7 @@ Here's an example handlers section::
handlers:
- name: restart memcached
- service: name=memcached state=restarted
+ service: name=memcached state=restarted
- name: restart apache
service: name=apache state=restarted
From 3c9890a35893f63ff7ba61ba1795d3fa1fbaa8f6 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 8 Apr 2015 03:16:13 -0400
Subject: [PATCH 0271/3617] now in v2 everything passes a single passwords hash
---
v2/ansible/executor/playbook_executor.py | 6 ++----
v2/bin/ansible-playbook | 3 ++-
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py
index 20aad364766ba5..8af19ed378fa3d 100644
--- a/v2/ansible/executor/playbook_executor.py
+++ b/v2/ansible/executor/playbook_executor.py
@@ -36,16 +36,14 @@ class PlaybookExecutor:
basis for bin/ansible-playbook operation.
'''
- # FIXME: passwords should not be passed in piecemeal like this,
- # if they're just going to be stuck in a dict later.
- def __init__(self, playbooks, inventory, variable_manager, loader, display, options, conn_pass, become_pass):
+ def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._display = display
self._options = options
- self.passwords = {'conn_pass': conn_pass, 'become_pass': become_pass}
+ self.passwords = passwords
if options.listhosts or options.listtasks or options.listtags:
self._tqm = None
diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook
index 000a0b74c7a84c..d663e2e0a3fd4a 100755
--- a/v2/bin/ansible-playbook
+++ b/v2/bin/ansible-playbook
@@ -66,6 +66,7 @@ def main(display, args):
if not options.listhosts and not options.listtasks and not options.listtags:
normalize_become_options(options)
(sshpass, becomepass, vault_pass) = ask_passwords(options)
+ passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
if options.vault_password_file:
# read vault_pass from a file
@@ -127,7 +128,7 @@ def main(display, args):
raise errors.AnsibleError("Specified --limit does not match any hosts")
# create the playbook executor, which manages running the plays via a task queue manager
- pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, conn_pass=sshpass, become_pass=becomepass)
+ pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords)
results = pbex.run()
From e122236f55d8666a0ad5f9df7833597a1105beec Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 8 Apr 2015 03:18:13 -0400
Subject: [PATCH 0272/3617] updated submodule refs
---
lib/ansible/modules/core | 2 +-
lib/ansible/modules/extras | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
index 04c34cfa02185a..5f58240d176a74 160000
--- a/lib/ansible/modules/core
+++ b/lib/ansible/modules/core
@@ -1 +1 @@
-Subproject commit 04c34cfa02185a8d74165f5bdc96371ec6df37a8
+Subproject commit 5f58240d176a74b8eb0da0b45cf60e498d11ab34
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
index 21fce8ac730346..4048de9c1e2333 160000
--- a/lib/ansible/modules/extras
+++ b/lib/ansible/modules/extras
@@ -1 +1 @@
-Subproject commit 21fce8ac730346b4e77427e3582553f2dc93c675
+Subproject commit 4048de9c1e2333aa7880b61f34af8cbdce5cbcec
From 1c796543c9d9e46c0beefb9b3f6d22d4d97f875b Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Wed, 8 Apr 2015 03:30:21 -0400
Subject: [PATCH 0273/3617] fix for when calling bootinfo throws permmission
errors (AIX) fixes
https://github.com/ansible/ansible-modules-core/issues/1108
---
lib/ansible/module_utils/facts.py | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 628d1dd267833c..21bbc93d4d102a 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -172,9 +172,12 @@ def get_platform_facts(self):
if self.facts['system'] == 'Linux':
self.get_distribution_facts()
elif self.facts['system'] == 'AIX':
- rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
- data = out.split('\n')
- self.facts['architecture'] = data[0]
+ try:
+ rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
+ data = out.split('\n')
+ self.facts['architecture'] = data[0]
+ except:
+ self.facts['architectrure' = 'Not Available'
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
From 3ae4ee9c52171d58068d90a6c11ad48ad86a8769 Mon Sep 17 00:00:00 2001
From: Niall Donegan
Date: Wed, 8 Apr 2015 14:24:21 +0100
Subject: [PATCH 0274/3617] Updated outdated link to module directory.
Core modules link updated and Extras link added.
---
docsite/rst/common_return_values.rst | 4 +++-
docsite/rst/developing_modules.rst | 4 +++-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst
index ff2b92b4af0a92..fe147c2dee027a 100644
--- a/docsite/rst/common_return_values.rst
+++ b/docsite/rst/common_return_values.rst
@@ -40,8 +40,10 @@ a stdout in the results it will append a stdout_lines which is just a list or th
:doc:`modules`
Learn about available modules
- `GitHub modules directory `_
+ `GitHub Core modules directory `_
Browse source of core modules
+ `Github Extras modules directory `_
+ Browse source of extras modules.
`Mailing List `_
Development mailing list
`irc.freenode.net `_
diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst
index 82edea9de894bd..3b563ee755f42f 100644
--- a/docsite/rst/developing_modules.rst
+++ b/docsite/rst/developing_modules.rst
@@ -474,8 +474,10 @@ This example allows the stat module to be called with fileinfo, making the follo
Learn about developing plugins
:doc:`developing_api`
Learn about the Python API for playbook and task execution
- `GitHub modules directory `_
+ `GitHub Core modules directory `_
Browse source of core modules
+ `Github Extras modules directory `_
+ Browse source of extras modules.
`Mailing List `_
Development mailing list
`irc.freenode.net `_
From a3b35ed1a6e46f2f63f08476400d94026d92e2b8 Mon Sep 17 00:00:00 2001
From: Erinn Looney-Triggs
Date: Wed, 8 Apr 2015 20:33:38 -0600
Subject: [PATCH 0275/3617] Small change for FreeIPA < 4.0 compatibility.
---
plugins/inventory/freeipa.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/plugins/inventory/freeipa.py b/plugins/inventory/freeipa.py
index caf336239ccd8b..05a8dba356ad6f 100755
--- a/plugins/inventory/freeipa.py
+++ b/plugins/inventory/freeipa.py
@@ -13,7 +13,11 @@ def initialize():
api.bootstrap(context='cli')
api.finalize()
- api.Backend.xmlclient.connect()
+ try:
+ api.Backend.rpcclient.connect()
+ except AttributeError:
+ #FreeIPA < 4.0 compatibility
+ api.Backend.xmlclient.connect()
return api
From bbc05a2cf5d0c72c51f62d28b4565f6da2796c1d Mon Sep 17 00:00:00 2001
From: James Laska
Date: Thu, 9 Apr 2015 09:30:24 -0400
Subject: [PATCH 0276/3617] Improve generation of debian changelog
---
Makefile | 3 ++-
packaging/debian/changelog | 7 ++++---
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/Makefile b/Makefile
index 636986028e8628..e01e1a9713c6aa 100644
--- a/Makefile
+++ b/Makefile
@@ -53,6 +53,7 @@ DEBUILD_BIN ?= debuild
DEBUILD_OPTS = --source-option="-I"
DPUT_BIN ?= dput
DPUT_OPTS ?=
+DEB_DATE := $(shell date +"%a, %d %b %Y %T %z")
ifeq ($(OFFICIAL),yes)
DEB_RELEASE = $(RELEASE)ppa
# Sign OFFICIAL builds using 'DEBSIGN_KEYID'
@@ -217,7 +218,7 @@ debian: sdist
mkdir -p deb-build/$${DIST} ; \
tar -C deb-build/$${DIST} -xvf dist/$(NAME)-$(VERSION).tar.gz ; \
cp -a packaging/debian deb-build/$${DIST}/$(NAME)-$(VERSION)/ ; \
- sed -ie "s#^$(NAME) (\([^)]*\)) \([^;]*\);#ansible (\1-$(DEB_RELEASE)~$${DIST}) $${DIST};#" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \
+ sed -ie "s|%VERSION%|$(VERSION)|g;s|%RELEASE%|$(DEB_RELEASE)|;s|%DIST%|$${DIST}|g;s|%DATE%|$(DEB_DATE)|g" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \
done
deb: debian
diff --git a/packaging/debian/changelog b/packaging/debian/changelog
index aa03e724d07e3f..84bf7e770336c4 100644
--- a/packaging/debian/changelog
+++ b/packaging/debian/changelog
@@ -1,8 +1,9 @@
-ansible (2.0.0) unstable; urgency=low
+ansible (%VERSION%-%RELEASE%~%DIST%) %DIST%; urgency=low
- * 2.0.0 (in progress)
+ * %VERSION% release
- -- Ansible, Inc. Fri, 01 Jan 2016 00:00:00 -0500
+ -- Ansible, Inc. %DATE%
+>>>>>>> Stashed changes
ansible (1.9.0.1) unstable; urgency=low
From 7f034a74d1c71907b407f00c9150850b35dba0d2 Mon Sep 17 00:00:00 2001
From: Chris Church
Date: Thu, 9 Apr 2015 13:29:38 -0400
Subject: [PATCH 0277/3617] Add -ExecutionPolicy Unrestricted back, was removed
by #9602.
---
lib/ansible/runner/shell_plugins/powershell.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/lib/ansible/runner/shell_plugins/powershell.py
index 50b759ae63389e..850b380eddb997 100644
--- a/lib/ansible/runner/shell_plugins/powershell.py
+++ b/lib/ansible/runner/shell_plugins/powershell.py
@@ -57,7 +57,7 @@ def _build_file_cmd(cmd_parts, quote_args=True):
'''Build command line to run a file, given list of file name plus args.'''
if quote_args:
cmd_parts = ['"%s"' % x for x in cmd_parts]
- return ' '.join(['&'] + cmd_parts)
+ return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + cmd_parts)
class ShellModule(object):
From 5675982b0f64cbc3bf01eff63951d1302132c6d2 Mon Sep 17 00:00:00 2001
From: Chris Church
Date: Thu, 9 Apr 2015 13:36:58 -0400
Subject: [PATCH 0278/3617] Only try kerberos auth when username contains `@`
and pass realm to pywinrm. Alternative to #10644, fixes #10577.
---
lib/ansible/runner/connection_plugins/winrm.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py
index 7a2d6d3318ddbb..eb02d743072b0d 100644
--- a/lib/ansible/runner/connection_plugins/winrm.py
+++ b/lib/ansible/runner/connection_plugins/winrm.py
@@ -90,13 +90,18 @@ def _winrm_connect(self):
return _winrm_cache[cache_key]
exc = None
for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']:
- if transport == 'kerberos' and not HAVE_KERBEROS:
+ if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user):
continue
+ if transport == 'kerberos':
+ realm = self.user.split('@', 1)[1].strip() or None
+ else:
+ realm = None
endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
host=self.host)
protocol = Protocol(endpoint, transport=transport,
- username=self.user, password=self.password)
+ username=self.user, password=self.password,
+ realm=realm)
try:
protocol.send_message('')
_winrm_cache[cache_key] = protocol
From 7ba2950c5ae9c51226276c6da7acac9b99757f87 Mon Sep 17 00:00:00 2001
From: Chris Church
Date: Thu, 9 Apr 2015 13:45:21 -0400
Subject: [PATCH 0279/3617] Remove winrm connection cache (only useful when
running against one host). Also fixes #10391.
---
lib/ansible/runner/connection_plugins/winrm.py | 12 ------------
1 file changed, 12 deletions(-)
diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py
index eb02d743072b0d..b41a74c8e1f994 100644
--- a/lib/ansible/runner/connection_plugins/winrm.py
+++ b/lib/ansible/runner/connection_plugins/winrm.py
@@ -18,8 +18,6 @@
from __future__ import absolute_import
import base64
-import hashlib
-import imp
import os
import re
import shlex
@@ -44,10 +42,6 @@
except ImportError:
pass
-_winrm_cache = {
- # 'user:pwhash@host:port':
-}
-
def vvvvv(msg, host=None):
verbose(msg, host=host, caplevel=4)
@@ -84,10 +78,6 @@ def _winrm_connect(self):
vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
(self.user, port, self.host), host=self.host)
netloc = '%s:%d' % (self.host, port)
- cache_key = '%s:%s@%s:%d' % (self.user, hashlib.md5(self.password).hexdigest(), self.host, port)
- if cache_key in _winrm_cache:
- vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host)
- return _winrm_cache[cache_key]
exc = None
for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']:
if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user):
@@ -104,7 +94,6 @@ def _winrm_connect(self):
realm=realm)
try:
protocol.send_message('')
- _winrm_cache[cache_key] = protocol
return protocol
except WinRMTransportError, exc:
err_msg = str(exc)
@@ -116,7 +105,6 @@ def _winrm_connect(self):
if code == 401:
raise errors.AnsibleError("the username/password specified for this server was incorrect")
elif code == 411:
- _winrm_cache[cache_key] = protocol
return protocol
vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
continue
From 944690118f824247ef2cb1a7db5c1f6a23f4254e Mon Sep 17 00:00:00 2001
From: Chris Church
Date: Thu, 9 Apr 2015 15:51:43 -0400
Subject: [PATCH 0280/3617] Update windows documentation to indicate how to
specify kerberos vs. basic auth.
---
docsite/rst/intro_windows.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst
index d96478b0a267f0..00cd8af404f038 100644
--- a/docsite/rst/intro_windows.rst
+++ b/docsite/rst/intro_windows.rst
@@ -57,7 +57,7 @@ In group_vars/windows.yml, define the following inventory variables::
Notice that the ssh_port is not actually for SSH, but this is a holdover variable name from how Ansible is mostly an SSH-oriented system. Again, Windows management will not happen over SSH.
-If you have installed the ``kerberos`` module, Ansible will first attempt Kerberos authentication. *This uses the principal you are authenticated to Kerberos with on the control machine and not the ``ansible_ssh_user`` specified above*. If that fails, either because you are not signed into Kerberos on the control machine or because the corresponding domain account on the remote host is not available, then Ansible will fall back to "plain" username/password authentication.
+If you have installed the ``kerberos`` module and ``ansible_ssh_user`` contains ``@`` (e.g. ``username@realm``), Ansible will first attempt Kerberos authentication. *This method uses the principal you are authenticated to Kerberos with on the control machine and not ``ansible_ssh_user``*. If that fails, either because you are not signed into Kerberos on the control machine or because the corresponding domain account on the remote host is not available, then Ansible will fall back to "plain" username/password authentication.
When using your playbook, don't forget to specify --ask-vault-pass to provide the password to unlock the file.
From 79f9fbd50efc23217ef28184a09d685b51c39aee Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Thu, 9 Apr 2015 10:40:04 -0700
Subject: [PATCH 0281/3617] Reverse the error messages from jsonfile get and
set
---
lib/ansible/cache/jsonfile.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py
index 93ee69903beff6..9c45dc22fd7912 100644
--- a/lib/ansible/cache/jsonfile.py
+++ b/lib/ansible/cache/jsonfile.py
@@ -60,7 +60,7 @@ def get(self, key):
try:
f = codecs.open(cachefile, 'r', encoding='utf-8')
except (OSError,IOError), e:
- utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
+ utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
else:
value = json.load(f)
self._cache[key] = value
@@ -76,7 +76,7 @@ def set(self, key, value):
try:
f = codecs.open(cachefile, 'w', encoding='utf-8')
except (OSError,IOError), e:
- utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
+ utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
else:
f.write(utils.jsonify(value))
finally:
From 2af6314f57676b88895ed88996cd71d6c33cb162 Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Fri, 10 Apr 2015 04:01:18 -0700
Subject: [PATCH 0282/3617] Comment to clarify why we add one to the line and
column recording
---
v2/ansible/parsing/yaml/constructor.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py
index 97f9c71ef8bd44..d1a2a01bc28649 100644
--- a/v2/ansible/parsing/yaml/constructor.py
+++ b/v2/ansible/parsing/yaml/constructor.py
@@ -58,6 +58,7 @@ def construct_yaml_seq(self, node):
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
+ # Add one so that the first line is line 1 rather than line 0
column = node.start_mark.column + 1
line = node.start_mark.line + 1
From 652cd6cd5e60879cac3e74088930de1fc603cdda Mon Sep 17 00:00:00 2001
From: Jesse Rusak
Date: Sat, 4 Apr 2015 16:37:14 -0400
Subject: [PATCH 0283/3617] Fix --force-handlers, and allow it in plays and
ansible.cfg
The --force-handlers command line argument was not correctly running
handlers on hosts which had tasks that later failed. This corrects that,
and also allows you to specify force_handlers in ansible.cfg or in a
play.
---
bin/ansible-playbook | 3 +-
docsite/rst/intro_configuration.rst | 14 ++++++++++
docsite/rst/playbooks_error_handling.rst | 20 +++++++++++++
lib/ansible/constants.py | 2 ++
lib/ansible/playbook/__init__.py | 17 +++++------
lib/ansible/playbook/play.py | 8 ++++--
test/integration/Makefile | 14 ++++++++++
.../test_force_handlers/handlers/main.yml | 2 ++
.../roles/test_force_handlers/tasks/main.yml | 26 +++++++++++++++++
test/integration/test_force_handlers.yml | 28 +++++++++++++++++++
test/units/TestPlayVarsFiles.py | 1 +
11 files changed, 123 insertions(+), 12 deletions(-)
create mode 100644 test/integration/roles/test_force_handlers/handlers/main.yml
create mode 100644 test/integration/roles/test_force_handlers/tasks/main.yml
create mode 100644 test/integration/test_force_handlers.yml
diff --git a/bin/ansible-playbook b/bin/ansible-playbook
index 118a0198e4293f..3d6e1f9f4029de 100755
--- a/bin/ansible-playbook
+++ b/bin/ansible-playbook
@@ -97,7 +97,8 @@ def main(args):
help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at',
help="start the playbook at the task matching this name")
- parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
+ parser.add_option('--force-handlers', dest='force_handlers',
+ default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst
index 4cb1f3599486ec..a13f6c6ecd990d 100644
--- a/docsite/rst/intro_configuration.rst
+++ b/docsite/rst/intro_configuration.rst
@@ -252,6 +252,20 @@ This options forces color mode even when running without a TTY::
force_color = 1
+.. _force_handlers:
+
+force_handlers
+==============
+
+.. versionadded:: 1.9.1
+
+This option causes notified handlers to run on a host even if a failure occurs on that host::
+
+ force_handlers = True
+
+The default is False, meaning that handlers will not run if a failure has occurred on a host.
+This can also be set per play or on the command line. See :doc:`_handlers_and_failure` for more details.
+
.. _forks:
forks
diff --git a/docsite/rst/playbooks_error_handling.rst b/docsite/rst/playbooks_error_handling.rst
index 98ffb2860f3f41..ac573d86ba6ae1 100644
--- a/docsite/rst/playbooks_error_handling.rst
+++ b/docsite/rst/playbooks_error_handling.rst
@@ -29,6 +29,26 @@ write a task that looks like this::
Note that the above system only governs the failure of the particular task, so if you have an undefined
variable used, it will still raise an error that users will need to address.
+.. _handlers_and_failure:
+
+Handlers and Failure
+````````````````````
+
+.. versionadded:: 1.9.1
+
+When a task fails on a host, handlers which were previously notified
+will *not* be run on that host. This can lead to cases where an unrelated failure
+can leave a host in an unexpected state. For example, a task could update
+a configuration file and notify a handler to restart some service. If a
+task later on in the same play fails, the service will not be restarted despite
+the configuration change.
+
+You can change this behavior with the ``--force-handlers`` command-line option,
+or by including ``force_handlers: True`` in a play, or ``force_handlers = True``
+in ansible.cfg. When handlers are forced, they will run when notified even
+if a task fails on that host. (Note that certain errors could still prevent
+the handler from running, such as a host becoming unreachable.)
+
.. _controlling_what_defines_failure:
Controlling What Defines Failure
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 71efefdbc383da..089de5b7c5bf15 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -173,6 +173,8 @@ def shell_expand_path(path):
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
+DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
+
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
index d58657012c625f..93804d123c8726 100644
--- a/lib/ansible/playbook/__init__.py
+++ b/lib/ansible/playbook/__init__.py
@@ -375,17 +375,17 @@ def _async_poll(self, poller, async_seconds, async_poll_interval):
# *****************************************************
- def _trim_unavailable_hosts(self, hostlist=[]):
+ def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
''' returns a list of hosts that haven't failed and aren't dark '''
- return [ h for h in hostlist if (h not in self.stats.failures) and (h not in self.stats.dark)]
+ return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
# *****************************************************
- def _run_task_internal(self, task):
+ def _run_task_internal(self, task, include_failed=False):
''' run a particular module step in a playbook '''
- hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts))
+ hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
self.inventory.restrict_to(hosts)
runner = ansible.runner.Runner(
@@ -493,7 +493,8 @@ def _run_task(self, play, task, is_handler):
task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
# load up an appropriate ansible runner to run the task in parallel
- results = self._run_task_internal(task)
+ include_failed = is_handler and play.force_handlers
+ results = self._run_task_internal(task, include_failed=include_failed)
# if no hosts are matched, carry on
hosts_remaining = True
@@ -811,7 +812,7 @@ def _run_play(self, play):
# if no hosts remain, drop out
if not host_list:
- if self.force_handlers:
+ if play.force_handlers:
task_errors = True
break
else:
@@ -821,7 +822,7 @@ def _run_play(self, play):
# lift restrictions after each play finishes
self.inventory.lift_also_restriction()
- if task_errors and not self.force_handlers:
+ if task_errors and not play.force_handlers:
# if there were failed tasks and handler execution
# is not forced, quit the play with an error
return False
@@ -856,7 +857,7 @@ def run_handlers(self, play):
play.max_fail_pct = 0
if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
host_list = None
- if not host_list and not self.force_handlers:
+ if not host_list and not play.force_handlers:
self.callbacks.on_no_hosts_remaining()
return False
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 78f2f6d9ba8000..9fd8a86f4e4127 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -34,9 +34,10 @@ class Play(object):
_pb_common = [
'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
- 'become_method', 'become_user', 'environment', 'gather_facts', 'handlers', 'hosts',
- 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', 'su_user', 'sudo',
- 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', 'vault_password',
+ 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
+ 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
+ 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
+ 'vault_password',
]
__slots__ = _pb_common + [
@@ -153,6 +154,7 @@ def __init__(self, playbook, ds, basedir, vault_password=None):
self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
self.no_log = utils.boolean(ds.get('no_log', 'false'))
+ self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
# Fail out if user specifies conflicting privelege escalations
if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
diff --git a/test/integration/Makefile b/test/integration/Makefile
index ac526cf752ecbc..6e2acec341d131 100644
--- a/test/integration/Makefile
+++ b/test/integration/Makefile
@@ -56,6 +56,20 @@ test_group_by:
test_handlers:
ansible-playbook test_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS)
+ # Not forcing, should only run on successful host
+ [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ]
+ # Forcing from command line
+ [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+ # Forcing from command line, should only run later tasks on unfailed hosts
+ [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_TASK_. | sort | uniq | xargs)" = "CALLED_TASK_B CALLED_TASK_D CALLED_TASK_E" ]
+ # Forcing from command line, should call handlers even if all hosts fail
+ [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v -e fail_all=yes $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+ # Forcing from ansible.cfg
+ [ "$$(ANSIBLE_FORCE_HANDLERS=true ansible-playbook --tags normal test_force_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+ # Forcing true in play
+ [ "$$(ansible-playbook test_force_handlers.yml --tags force_true_in_play -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ]
+ # Forcing false in play, which overrides command line
+ [ "$$(ansible-playbook test_force_handlers.yml --force-handlers --tags force_false_in_play -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ]
test_hash:
ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}'
diff --git a/test/integration/roles/test_force_handlers/handlers/main.yml b/test/integration/roles/test_force_handlers/handlers/main.yml
new file mode 100644
index 00000000000000..2cfb1ef7109201
--- /dev/null
+++ b/test/integration/roles/test_force_handlers/handlers/main.yml
@@ -0,0 +1,2 @@
+- name: echoing handler
+ command: echo CALLED_HANDLER_{{ inventory_hostname }}
\ No newline at end of file
diff --git a/test/integration/roles/test_force_handlers/tasks/main.yml b/test/integration/roles/test_force_handlers/tasks/main.yml
new file mode 100644
index 00000000000000..a3948756d71704
--- /dev/null
+++ b/test/integration/roles/test_force_handlers/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+
+# We notify for A and B, and hosts B and C fail.
+# When forcing, we expect A and B to run handlers
+# When not forcing, we expect only B to run handlers
+
+- name: notify the handler for host A and B
+ shell: echo
+ notify:
+ - echoing handler
+ when: inventory_hostname == 'A' or inventory_hostname == 'B'
+
+- name: fail task for all
+ fail: msg="Fail All"
+ when: fail_all is defined and fail_all
+
+- name: fail task for A
+ fail: msg="Fail A"
+ when: inventory_hostname == 'A'
+
+- name: fail task for C
+ fail: msg="Fail C"
+ when: inventory_hostname == 'C'
+
+- name: echo after A and C have failed
+ command: echo CALLED_TASK_{{ inventory_hostname }}
\ No newline at end of file
diff --git a/test/integration/test_force_handlers.yml b/test/integration/test_force_handlers.yml
new file mode 100644
index 00000000000000..a700da08f0be28
--- /dev/null
+++ b/test/integration/test_force_handlers.yml
@@ -0,0 +1,28 @@
+---
+
+- name: test force handlers (default)
+ tags: normal
+ hosts: testgroup
+ gather_facts: False
+ connection: local
+ roles:
+ - { role: test_force_handlers }
+
+- name: test force handlers (set to true)
+ tags: force_true_in_play
+ hosts: testgroup
+ gather_facts: False
+ connection: local
+ force_handlers: True
+ roles:
+ - { role: test_force_handlers }
+
+
+- name: test force handlers (set to false)
+ tags: force_false_in_play
+ hosts: testgroup
+ gather_facts: False
+ connection: local
+ force_handlers: False
+ roles:
+ - { role: test_force_handlers }
diff --git a/test/units/TestPlayVarsFiles.py b/test/units/TestPlayVarsFiles.py
index 497c3112ede0d4..9d42b73e8b6ed1 100644
--- a/test/units/TestPlayVarsFiles.py
+++ b/test/units/TestPlayVarsFiles.py
@@ -47,6 +47,7 @@ def __init__(self):
self.transport = None
self.only_tags = None
self.skip_tags = None
+ self.force_handlers = None
self.VARS_CACHE = {}
self.SETUP_CACHE = {}
self.inventory = FakeInventory()
From 56f4bf44f53881162ec7a0f35526eaaa68fa9398 Mon Sep 17 00:00:00 2001
From: Chris Church
Date: Tue, 30 Sep 2014 11:52:05 -0400
Subject: [PATCH 0284/3617] Add integration tests for win_user module.
---
.../roles/test_win_user/defaults/main.yml | 5 +
.../test_win_user/files/lockout_user.ps1 | 17 +
.../roles/test_win_user/tasks/main.yml | 400 ++++++++++++++++++
test/integration/test_winrm.yml | 1 +
4 files changed, 423 insertions(+)
create mode 100644 test/integration/roles/test_win_user/defaults/main.yml
create mode 100644 test/integration/roles/test_win_user/files/lockout_user.ps1
create mode 100644 test/integration/roles/test_win_user/tasks/main.yml
diff --git a/test/integration/roles/test_win_user/defaults/main.yml b/test/integration/roles/test_win_user/defaults/main.yml
new file mode 100644
index 00000000000000..c6a18ed3a30797
--- /dev/null
+++ b/test/integration/roles/test_win_user/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+
+test_win_user_name: test_win_user
+test_win_user_password: "T35Tus3rP@ssW0rd"
+test_win_user_password2: "pa55wOrd4te5tU53R!"
diff --git a/test/integration/roles/test_win_user/files/lockout_user.ps1 b/test/integration/roles/test_win_user/files/lockout_user.ps1
new file mode 100644
index 00000000000000..e15f13f3bf2b61
--- /dev/null
+++ b/test/integration/roles/test_win_user/files/lockout_user.ps1
@@ -0,0 +1,17 @@
+trap
+{
+ Write-Error -ErrorRecord $_
+ exit 1;
+}
+
+$username = $args[0]
+[void][system.reflection.assembly]::LoadWithPartialName('System.DirectoryServices.AccountManagement')
+$pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $env:COMPUTERNAME
+For ($i = 1; $i -le 10; $i++) {
+ try {
+ $pc.ValidateCredentials($username, 'b@DP@ssw0rd')
+ }
+ catch {
+ break
+ }
+}
diff --git a/test/integration/roles/test_win_user/tasks/main.yml b/test/integration/roles/test_win_user/tasks/main.yml
new file mode 100644
index 00000000000000..ebe8c5da3e8edf
--- /dev/null
+++ b/test/integration/roles/test_win_user/tasks/main.yml
@@ -0,0 +1,400 @@
+# test code for the win_user module
+# (c) 2014, Chris Church
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+- name: remove existing test user if present
+ win_user: name="{{ test_win_user_name }}" state="absent"
+ register: win_user_remove_result
+
+- name: check user removal result
+ assert:
+ that:
+ - "win_user_remove_result.name"
+ - "win_user_remove_result.state == 'absent'"
+
+- name: try to remove test user again
+ win_user: name="{{ test_win_user_name }}" state="absent"
+ register: win_user_remove_result_again
+
+- name: check user removal result again
+ assert:
+ that:
+ - "not win_user_remove_result_again|changed"
+ - "win_user_remove_result_again.name"
+ - "win_user_remove_result_again.msg"
+ - "win_user_remove_result.state == 'absent'"
+
+- name: test missing user with query state
+ win_user: name="{{ test_win_user_name }}" state="query"
+ register: win_user_missing_query_result
+
+- name: check missing query result
+ assert:
+ that:
+ - "not win_user_missing_query_result|changed"
+ - "win_user_missing_query_result.name"
+ - "win_user_missing_query_result.msg"
+ - "win_user_missing_query_result.state == 'absent'"
+
+- name: test create user
+ win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}"
+ register: win_user_create_result
+
+- name: check user creation result
+ assert:
+ that:
+ - "win_user_create_result|changed"
+ - "win_user_create_result.name == '{{ test_win_user_name }}'"
+ - "win_user_create_result.fullname == '{{ test_win_user_name }}'"
+ - "win_user_create_result.path"
+ - "win_user_create_result.state == 'present'"
+
+- name: update user full name and description
+ win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible"
+ register: win_user_update_result
+
+- name: check full name and description update result
+ assert:
+ that:
+ - "win_user_update_result|changed"
+ - "win_user_update_result.fullname == 'Test Ansible User'"
+ - "win_user_update_result.description == 'Test user account created by Ansible'"
+
+- name: update user full name and description again with same values
+ win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible"
+ register: win_user_update_result_again
+
+- name: check full name and description result again
+ assert:
+ that:
+ - "not win_user_update_result_again|changed"
+ - "win_user_update_result_again.fullname == 'Test Ansible User'"
+ - "win_user_update_result_again.description == 'Test user account created by Ansible'"
+
+- name: test again with no options or changes
+ win_user: name="{{ test_win_user_name }}"
+ register: win_user_nochange_result
+
+- name: check no changes result
+ assert:
+ that:
+ - "not win_user_nochange_result|changed"
+
+- name: test again with query state
+ win_user: name="{{ test_win_user_name }}" state="query"
+ register: win_user_query_result
+
+- name: check query result
+ assert:
+ that:
+ - "not win_user_query_result|changed"
+ - "win_user_query_result.state == 'present'"
+ - "win_user_query_result.name == '{{ test_win_user_name }}'"
+ - "win_user_query_result.fullname == 'Test Ansible User'"
+ - "win_user_query_result.description == 'Test user account created by Ansible'"
+ - "win_user_query_result.path"
+ - "win_user_query_result.sid"
+ - "win_user_query_result.groups == []"
+
+- name: change user password
+ win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password2 }}"
+ register: win_user_password_result
+
+- name: check password change result
+ assert:
+ that:
+ - "win_user_password_result|changed"
+
+- name: change user password again to same value
+ win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password2 }}"
+ register: win_user_password_result_again
+
+- name: check password change result again
+ assert:
+ that:
+ - "not win_user_password_result_again|changed"
+
+- name: check update_password=on_create for existing user
+ win_user: name="{{ test_win_user_name }}" password="ThisP@ssW0rdShouldNotBeUsed" update_password=on_create
+ register: win_user_nopasschange_result
+
+- name: check password change with on_create flag result
+ assert:
+ that:
+ - "not win_user_nopasschange_result|changed"
+
+- name: set password expired flag
+ win_user: name="{{ test_win_user_name }}" password_expired=yes
+ register: win_user_password_expired_result
+
+- name: check password expired result
+ assert:
+ that:
+ - "win_user_password_expired_result|changed"
+ - "win_user_password_expired_result.password_expired"
+
+- name: clear password expired flag
+ win_user: name="{{ test_win_user_name }}" password_expired=no
+ register: win_user_clear_password_expired_result
+
+- name: check clear password expired result
+ assert:
+ that:
+ - "win_user_clear_password_expired_result|changed"
+ - "not win_user_clear_password_expired_result.password_expired"
+
+- name: set password never expires flag
+ win_user: name="{{ test_win_user_name }}" password_never_expires=yes
+ register: win_user_password_never_expires_result
+
+- name: check password never expires result
+ assert:
+ that:
+ - "win_user_password_never_expires_result|changed"
+ - "win_user_password_never_expires_result.password_never_expires"
+
+- name: clear password never expires flag
+ win_user: name="{{ test_win_user_name }}" password_never_expires=no
+ register: win_user_clear_password_never_expires_result
+
+- name: check clear password never expires result
+ assert:
+ that:
+ - "win_user_clear_password_never_expires_result|changed"
+ - "not win_user_clear_password_never_expires_result.password_never_expires"
+
+- name: set user cannot change password flag
+ win_user: name="{{ test_win_user_name }}" user_cannot_change_password=yes
+ register: win_user_cannot_change_password_result
+
+- name: check user cannot change password result
+ assert:
+ that:
+ - "win_user_cannot_change_password_result|changed"
+ - "win_user_cannot_change_password_result.user_cannot_change_password"
+
+- name: clear user cannot change password flag
+ win_user: name="{{ test_win_user_name }}" user_cannot_change_password=no
+ register: win_user_can_change_password_result
+
+- name: check clear user cannot change password result
+ assert:
+ that:
+ - "win_user_can_change_password_result|changed"
+ - "not win_user_can_change_password_result.user_cannot_change_password"
+
+- name: set account disabled flag
+ win_user: name="{{ test_win_user_name }}" account_disabled=true
+ register: win_user_account_disabled_result
+
+- name: check account disabled result
+ assert:
+ that:
+ - "win_user_account_disabled_result|changed"
+ - "win_user_account_disabled_result.account_disabled"
+
+- name: clear account disabled flag
+ win_user: name="{{ test_win_user_name }}" account_disabled=false
+ register: win_user_clear_account_disabled_result
+
+- name: check clear account disabled result
+ assert:
+ that:
+ - "win_user_clear_account_disabled_result|changed"
+ - "not win_user_clear_account_disabled_result.account_disabled"
+
+- name: attempt to set account locked flag
+ win_user: name="{{ test_win_user_name }}" account_locked=yes
+ register: win_user_set_account_locked_result
+ ignore_errors: true
+
+- name: verify that attempting to set account locked flag fails
+ assert:
+ that:
+ - "win_user_set_account_locked_result|failed"
+ - "not win_user_set_account_locked_result|changed"
+
+- name: attempt to lockout test account
+ script: lockout_user.ps1 "{{ test_win_user_name }}"
+
+- name: get user to check if account locked flag is set
+ win_user: name="{{ test_win_user_name }}" state="query"
+ register: win_user_account_locked_result
+
+- name: clear account locked flag if set
+ win_user: name="{{ test_win_user_name }}" account_locked=no
+ register: win_user_clear_account_locked_result
+ when: "win_user_account_locked_result.account_locked"
+
+- name: check clear account lockout result if account was locked
+ assert:
+ that:
+ - "win_user_clear_account_locked_result|changed"
+ - "not win_user_clear_account_locked_result.account_locked"
+ when: "win_user_account_locked_result.account_locked"
+
+- name: assign test user to a group
+ win_user: name="{{ test_win_user_name }}" groups="Users"
+ register: win_user_replace_groups_result
+
+- name: check assign user to group result
+ assert:
+ that:
+ - "win_user_replace_groups_result|changed"
+ - "win_user_replace_groups_result.groups|length == 1"
+ - "win_user_replace_groups_result.groups[0]['name'] == 'Users'"
+
+- name: assign test user to the same group
+ win_user:
+ name: "{{ test_win_user_name }}"
+ groups: ["Users"]
+ register: win_user_replace_groups_again_result
+
+- name: check assign user to group again result
+ assert:
+ that:
+ - "not win_user_replace_groups_again_result|changed"
+
+- name: add user to another group
+ win_user: name="{{ test_win_user_name }}" groups="Power Users" groups_action="add"
+ register: win_user_add_groups_result
+
+- name: check add user to another group result
+ assert:
+ that:
+ - "win_user_add_groups_result|changed"
+ - "win_user_add_groups_result.groups|length == 2"
+ - "win_user_add_groups_result.groups[0]['name'] in ('Users', 'Power Users')"
+ - "win_user_add_groups_result.groups[1]['name'] in ('Users', 'Power Users')"
+
+- name: add user to another group again
+ win_user:
+ name: "{{ test_win_user_name }}"
+ groups: "Power Users"
+ groups_action: add
+ register: win_user_add_groups_again_result
+
+- name: check add user to another group again result
+ assert:
+ that:
+ - "not win_user_add_groups_again_result|changed"
+
+- name: remove user from a group
+ win_user: name="{{ test_win_user_name }}" groups="Users" groups_action="remove"
+ register: win_user_remove_groups_result
+
+- name: check remove user from group result
+ assert:
+ that:
+ - "win_user_remove_groups_result|changed"
+ - "win_user_remove_groups_result.groups|length == 1"
+ - "win_user_remove_groups_result.groups[0]['name'] == 'Power Users'"
+
+- name: remove user from a group again
+ win_user:
+ name: "{{ test_win_user_name }}"
+ groups:
+ - "Users"
+ groups_action: remove
+ register: win_user_remove_groups_again_result
+
+- name: check remove user from group again result
+ assert:
+ that:
+ - "not win_user_remove_groups_again_result|changed"
+
+- name: reassign test user to multiple groups
+ win_user: name="{{ test_win_user_name }}" groups="Users, Guests" groups_action="replace"
+ register: win_user_reassign_groups_result
+
+- name: check reassign user groups result
+ assert:
+ that:
+ - "win_user_reassign_groups_result|changed"
+ - "win_user_reassign_groups_result.groups|length == 2"
+ - "win_user_reassign_groups_result.groups[0]['name'] in ('Users', 'Guests')"
+ - "win_user_reassign_groups_result.groups[1]['name'] in ('Users', 'Guests')"
+
+- name: reassign test user to multiple groups again
+ win_user:
+ name: "{{ test_win_user_name }}"
+ groups:
+ - "Users"
+ - "Guests"
+ groups_action: replace
+ register: win_user_reassign_groups_again_result
+
+- name: check reassign user groups again result
+ assert:
+ that:
+ - "not win_user_reassign_groups_again_result|changed"
+
+- name: remove user from all groups
+ win_user: name="{{ test_win_user_name }}" groups=""
+ register: win_user_remove_all_groups_result
+
+- name: check remove user from all groups result
+ assert:
+ that:
+ - "win_user_remove_all_groups_result|changed"
+ - "win_user_remove_all_groups_result.groups|length == 0"
+
+- name: remove user from all groups again
+ win_user:
+ name: "{{ test_win_user_name }}"
+ groups: []
+ register: win_user_remove_all_groups_again_result
+
+- name: check remove user from all groups again result
+ assert:
+ that:
+ - "not win_user_remove_all_groups_again_result|changed"
+
+- name: assign user to invalid group
+ win_user: name="{{ test_win_user_name }}" groups="Userz"
+ register: win_user_invalid_group_result
+ ignore_errors: true
+
+- name: check invalid group result
+ assert:
+ that:
+ - "win_user_invalid_group_result|failed"
+ - "win_user_invalid_group_result.msg"
+
+- name: remove test user when finished
+ win_user: name="{{ test_win_user_name }}" state="absent"
+ register: win_user_final_remove_result
+
+- name: check final user removal result
+ assert:
+ that:
+ - "win_user_final_remove_result|changed"
+ - "win_user_final_remove_result.name"
+ - "win_user_final_remove_result.msg"
+ - "win_user_final_remove_result.state == 'absent'"
+
+- name: test removed user with query state
+ win_user: name="{{ test_win_user_name }}" state="query"
+ register: win_user_removed_query_result
+
+- name: check removed query result
+ assert:
+ that:
+ - "not win_user_removed_query_result|changed"
+ - "win_user_removed_query_result.name"
+ - "win_user_removed_query_result.msg"
+ - "win_user_removed_query_result.state == 'absent'"
diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml
index e2a282e061f247..69d3b652a6f727 100644
--- a/test/integration/test_winrm.yml
+++ b/test/integration/test_winrm.yml
@@ -30,6 +30,7 @@
- { role: test_win_msi, tags: test_win_msi }
- { role: test_win_service, tags: test_win_service }
- { role: test_win_feature, tags: test_win_feature }
+ - { role: test_win_user, tags: test_win_user }
- { role: test_win_file, tags: test_win_file }
- { role: test_win_copy, tags: test_win_copy }
- { role: test_win_template, tags: test_win_template }
From 42bd640d143740f3d2613320ec7df67377a5f5a0 Mon Sep 17 00:00:00 2001
From: Chris Church
Date: Mon, 24 Nov 2014 00:44:45 -0500
Subject: [PATCH 0285/3617] Update win_user tests to set a group on user
creation.
---
test/integration/roles/test_win_user/tasks/main.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/test/integration/roles/test_win_user/tasks/main.yml b/test/integration/roles/test_win_user/tasks/main.yml
index ebe8c5da3e8edf..0e22e332ae923a 100644
--- a/test/integration/roles/test_win_user/tasks/main.yml
+++ b/test/integration/roles/test_win_user/tasks/main.yml
@@ -51,7 +51,7 @@
- "win_user_missing_query_result.state == 'absent'"
- name: test create user
- win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}"
+ win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}" groups="Guests"
register: win_user_create_result
- name: check user creation result
@@ -64,7 +64,7 @@
- "win_user_create_result.state == 'present'"
- name: update user full name and description
- win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible"
+ win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible" groups=""
register: win_user_update_result
- name: check full name and description update result
From 0abcebf1e4763a7e3a1f81b1c8ea5a195de55064 Mon Sep 17 00:00:00 2001
From: Feanil Patel
Date: Sat, 14 Mar 2015 16:26:48 -0400
Subject: [PATCH 0286/3617] Don't convert numbers and booleans to strings.
Before this change if a variable was of type int or bool and the variable was referenced
by another variable, the type would change to string.
eg. defaults/main.yml
```
PORT: 4567
OTHER_CONFIG:
secret1: "so_secret"
secret2: "even_more_secret"
CONFIG:
hostname: "some_hostname"
port: "{{ PORT }}"
secrets: "{{ OTHER_CONFIG }}"
```
If you output `CONFIG` to json or yaml, the port would get represented in the output as a
string instead of as a number, but secrets would get represented as a dictionary. This is
a mis-match in behaviour where some "types" are retained and others are not. This change
should fix the issue.
Update template test to also test var retainment.
Make the template changes in v2.
Update to only short-circuit for booleans and numbers.
Added an entry to the changelog.
---
CHANGELOG.md | 5 +++-
lib/ansible/utils/template.py | 30 +++++++++++++++----
.../roles/test_template/files/foo.txt | 7 +++++
.../roles/test_template/templates/foo.j2 | 2 ++
.../roles/test_template/vars/main.yml | 13 ++++++++
v2/ansible/template/__init__.py | 21 +++++++++++++
6 files changed, 71 insertions(+), 7 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 06fe0504fc7ea4..69d7c3fd56aa4a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,7 +4,10 @@ Ansible Changes By Release
## 2.0 "TBD" - ACTIVE DEVELOPMENT
Major Changes:
- big_ip modules now support turning off ssl certificate validation (use only for self signed)
+ - big_ip modules now support turning off ssl certificate validation (use only for self signed)
+
+ - template code now retains types for bools and Numbers instead of turning them into strings
+ - If you need the old behaviour, quote the value and it will get passed around as a string
New Modules:
cloudtrail
diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py
index 9426e254eb5826..5f712b2675ea9e 100644
--- a/lib/ansible/utils/template.py
+++ b/lib/ansible/utils/template.py
@@ -31,6 +31,7 @@
import pwd
import ast
import traceback
+from numbers import Number
from ansible.utils.string_functions import count_newlines_from_end
from ansible.utils import to_bytes, to_unicode
@@ -81,6 +82,11 @@ class Flags:
FILTER_PLUGINS = None
_LISTRE = re.compile(r"(\w+)\[(\d+)\]")
+
+# A regex for checking to see if a variable we're trying to
+# expand is just a single variable name.
+SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$")
+
JINJA2_OVERRIDE = '#jinja2:'
JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']
@@ -109,7 +115,6 @@ def lookup(name, *args, **kwargs):
def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True):
''' templates a data structure by traversing it and substituting for other data structures '''
from ansible import utils
-
try:
if convert_bare and isinstance(varname, basestring):
first_part = varname.split(".")[0].split("[")[0]
@@ -123,10 +128,13 @@ def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_
except errors.AnsibleError, e:
raise errors.AnsibleError("Failed to template %s: %s" % (varname, str(e)))
- if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["):
- eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True)
- if eval_results[1] is None:
- varname = eval_results[0]
+ # template_from_string may return non strings for the case where the var is just
+ # a reference to a single variable, so we should re_check before we do further evals
+ if isinstance(varname, basestring):
+ if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["):
+ eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True)
+ if eval_results[1] is None:
+ varname = eval_results[0]
return varname
@@ -323,10 +331,20 @@ def my_finalize(thing):
def template_from_string(basedir, data, vars, fail_on_undefined=False):
''' run a string through the (Jinja2) templating engine '''
-
try:
if type(data) == str:
data = unicode(data, 'utf-8')
+
+ # Check to see if the string we are trying to render is just referencing a single
+ # var. In this case we don't wont to accidentally change the type of the variable
+ # to a string by using the jinja template renderer. We just want to pass it.
+ only_one = SINGLE_VAR.match(data)
+ if only_one:
+ var_name = only_one.group(1)
+ if var_name in vars:
+ resolved_val = vars[var_name]
+ if isinstance(resolved_val, (bool, Number)):
+ return resolved_val
def my_finalize(thing):
return thing if thing is not None else ''
diff --git a/test/integration/roles/test_template/files/foo.txt b/test/integration/roles/test_template/files/foo.txt
index 3e96db9b3ec01e..edd704da048007 100644
--- a/test/integration/roles/test_template/files/foo.txt
+++ b/test/integration/roles/test_template/files/foo.txt
@@ -1 +1,8 @@
templated_var_loaded
+
+{
+ "bool": true,
+ "multi_part": "1Foo",
+ "number": 5,
+ "string_num": "5"
+}
diff --git a/test/integration/roles/test_template/templates/foo.j2 b/test/integration/roles/test_template/templates/foo.j2
index 55aab8f1ea1435..22187f913004c3 100644
--- a/test/integration/roles/test_template/templates/foo.j2
+++ b/test/integration/roles/test_template/templates/foo.j2
@@ -1 +1,3 @@
{{ templated_var }}
+
+{{ templated_dict | to_nice_json }}
diff --git a/test/integration/roles/test_template/vars/main.yml b/test/integration/roles/test_template/vars/main.yml
index 1e8f64ccf4458a..b79f95e6cf16f7 100644
--- a/test/integration/roles/test_template/vars/main.yml
+++ b/test/integration/roles/test_template/vars/main.yml
@@ -1 +1,14 @@
templated_var: templated_var_loaded
+
+number_var: 5
+string_num: "5"
+bool_var: true
+part_1: 1
+part_2: "Foo"
+
+templated_dict:
+ number: "{{ number_var }}"
+ string_num: "{{ string_num }}"
+ bool: "{{ bool_var }}"
+ multi_part: "{{ part_1 }}{{ part_2 }}"
+
diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py
index 46bbc06a07dd96..0345a750081cdd 100644
--- a/v2/ansible/template/__init__.py
+++ b/v2/ansible/template/__init__.py
@@ -32,8 +32,17 @@
from ansible.template.vars import AnsibleJ2Vars
from ansible.utils.debug import debug
+from numbers import Number
+
__all__ = ['Templar']
+# A regex for checking to see if a variable we're trying to
+# expand is just a single variable name.
+SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$")
+
+# Primitive Types which we don't want Jinja to convert to strings.
+NON_TEMPLATED_TYPES = ( bool, Number )
+
JINJA2_OVERRIDE = '#jinja2:'
JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']
@@ -125,6 +134,18 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals
if isinstance(variable, basestring):
result = variable
if self._contains_vars(variable):
+
+ # Check to see if the string we are trying to render is just referencing a single
+ # var. In this case we don't wont to accidentally change the type of the variable
+ # to a string by using the jinja template renderer. We just want to pass it.
+ only_one = SINGLE_VAR.match(variable)
+ if only_one:
+ var_name = only_one.group(1)
+ if var_name in self._available_vars:
+ resolved_val = self._available_vars[var_name]
+ if isinstance(resolved_val, NON_TEMPLATED_TYPES):
+ return resolved_val
+
result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines)
# if this looks like a dictionary or list, convert it to such using the safe_eval method
From e6b7b9206d16a9b446437e06957096ed242c0fc7 Mon Sep 17 00:00:00 2001
From: Andrew Murray
Date: Mon, 13 Apr 2015 23:45:09 +1000
Subject: [PATCH 0287/3617] Fixed changelog typos
---
CHANGELOG.md | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 69d7c3fd56aa4a..256b3bafe28155 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -45,7 +45,7 @@ Major changes:
For some use cases this can lead to dramatic improvements in startup time.
* Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly.
* Fix skipped tasks to not display their parameters if no_log is specified.
-* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries.
+* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundaries.
* Added travis integration to github for basic tests, this should speed up ticket triage and merging.
* environment: directive now can also be applied to play and is inhertited by tasks, which can still override it.
* expanded facts and OS/distribution support for existing facts and improved performance with pypy.
@@ -162,7 +162,7 @@ Other Notable Changes:
## 1.8.3 "You Really Got Me" - Feb 17, 2015
-* Fixing a security bug related to the default permissions set on a tempoary file created when using "ansible-vault view ".
+* Fixing a security bug related to the default permissions set on a temporary file created when using "ansible-vault view ".
* Many bug fixes, for both core code and core modules.
## 1.8.2 "You Really Got Me" - Dec 04, 2014
@@ -450,7 +450,7 @@ Other notable changes:
## 1.5.4 "Love Walks In" - April 1, 2014
- Security fix for safe_eval, which further hardens the checking of the evaluation function.
-- Changing order of variable precendence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host.
+- Changing order of variable precedence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host.
## 1.5.3 "Love Walks In" - March 13, 2014
@@ -485,7 +485,7 @@ Major features/changes:
* ec2 module now accepts 'exact_count' and 'count_tag' as a way to enforce a running number of nodes by tags.
* all ec2 modules that work with Eucalyptus also now support a 'validate_certs' option, which can be set to 'off' for installations using self-signed certs.
* Start of new integration test infrastructure (WIP, more details TBD)
-* if repoquery is unavailble, the yum module will automatically attempt to install yum-utils
+* if repoquery is unavailable, the yum module will automatically attempt to install yum-utils
* ansible-vault: a framework for encrypting your playbooks and variable files
* added support for privilege escalation via 'su' into bin/ansible and bin/ansible-playbook and associated keywords 'su', 'su_user', 'su_pass' for tasks/plays
@@ -948,7 +948,7 @@ Bugfixes and Misc Changes:
* misc fixes to the Riak module
* make template module slightly more efficient
* base64encode / decode filters are now available to templates
-* libvirt module can now work with multiple different libvirt connecton URIs
+* libvirt module can now work with multiple different libvirt connection URIs
* fix for postgresql password escaping
* unicode fix for shlex.split in some cases
* apt module upgrade logic improved
@@ -1153,7 +1153,7 @@ New playbook/language features:
* task includes can now be of infinite depth
* when_set and when_unset can take more than one var (when_set: $a and $b and $c)
* added the with_sequence lookup plugin
-* can override "connection:" on an indvidual task
+* can override "connection:" on an individual task
* parameterized playbook includes can now define complex variables (not just all on one line)
* making inventory variables available for use in vars_files paths
* messages when skipping plays are now more clear
From 224fd0adfe8c977d55b0924ec558a51f59de4bab Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 13 Apr 2015 10:10:32 -0400
Subject: [PATCH 0288/3617] added fleetctl entry for new inventory script to
changelog
---
CHANGELOG.md | 3 +++
1 file changed, 3 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 256b3bafe28155..0211defbaa0f4d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -25,6 +25,9 @@ New Modules:
vertica_schema
vertica_user
+New Inventory scripts:
+ fleetctl
+
Other Notable Changes:
## 1.9 "Dancing In the Street" - Mar 25, 2015
From 89cc54cc16c36c8c46b76a5c0f70afe9c86aa4b5 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 13 Apr 2015 10:49:31 -0400
Subject: [PATCH 0289/3617] typo fix
---
lib/ansible/module_utils/facts.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 21bbc93d4d102a..a85f3fff0ef221 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -177,7 +177,7 @@ def get_platform_facts(self):
data = out.split('\n')
self.facts['architecture'] = data[0]
except:
- self.facts['architectrure' = 'Not Available'
+ self.facts['architectrure'] = 'Not Available'
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
From 62c08d96e50ad7fd17da5b8b1396e7d168dc3f48 Mon Sep 17 00:00:00 2001
From: Brian Coca
Date: Mon, 13 Apr 2015 10:58:17 -0400
Subject: [PATCH 0290/3617] fixed another typo
---
lib/ansible/module_utils/facts.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index a85f3fff0ef221..595629a7109759 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -177,7 +177,7 @@ def get_platform_facts(self):
data = out.split('\n')
self.facts['architecture'] = data[0]
except:
- self.facts['architectrure'] = 'Not Available'
+ self.facts['architecture'] = 'Not Available'
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
From b193d327b616da2774ce4293aa52539fbd61b6ef Mon Sep 17 00:00:00 2001
From: Dorian Pula
Date: Mon, 13 Apr 2015 12:17:07 -0400
Subject: [PATCH 0291/3617] Fix re import failure in templates module when
running unit tests.
---
v2/ansible/template/__init__.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py
index 0345a750081cdd..4e15e83424c22b 100644
--- a/v2/ansible/template/__init__.py
+++ b/v2/ansible/template/__init__.py
@@ -19,6 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import re
+
from jinja2 import Environment
from jinja2.exceptions import TemplateSyntaxError, UndefinedError
from jinja2.utils import concat as j2_concat
From 6747f825476e7e82c315fbbef29794bc8d0026e6 Mon Sep 17 00:00:00 2001
From: ian
Date: Mon, 13 Apr 2015 12:35:20 -0400
Subject: [PATCH 0292/3617] Change exceptions to python3 syntax.
---
v2/ansible/playbook/base.py | 4 ++--
v2/ansible/plugins/__init__.py | 2 +-
v2/ansible/plugins/action/__init__.py | 4 ++--
v2/ansible/plugins/action/copy.py | 6 +++---
v2/ansible/plugins/action/pause.py | 2 +-
v2/ansible/plugins/action/template.py | 2 +-
v2/ansible/plugins/connections/accelerate.py | 2 +-
v2/ansible/plugins/connections/paramiko_ssh.py | 8 ++++----
v2/ansible/plugins/connections/winrm.py | 2 +-
v2/ansible/plugins/lookup/csvfile.py | 4 ++--
v2/ansible/plugins/lookup/dnstxt.py | 2 +-
v2/ansible/plugins/lookup/first_found.py | 2 +-
v2/ansible/plugins/lookup/password.py | 4 ++--
v2/ansible/plugins/lookup/url.py | 4 ++--
v2/ansible/plugins/strategies/__init__.py | 2 +-
v2/ansible/plugins/strategies/free.py | 2 +-
v2/ansible/template/safe_eval.py | 4 ++--
v2/ansible/utils/hashing.py | 2 +-
v2/ansible/utils/vault.py | 4 ++--
v2/ansible/vars/__init__.py | 2 +-
v2/samples/multi.py | 4 ++--
v2/samples/multi_queues.py | 8 ++++----
22 files changed, 38 insertions(+), 38 deletions(-)
diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py
index e834d3b729684f..c6a9d9a051396e 100644
--- a/v2/ansible/playbook/base.py
+++ b/v2/ansible/playbook/base.py
@@ -270,9 +270,9 @@ def post_validate(self, all_vars=dict(), fail_on_undefined=True):
# and assign the massaged value back to the attribute field
setattr(self, name, value)
- except (TypeError, ValueError), e:
+ except (TypeError, ValueError) as e:
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
- except UndefinedError, e:
+ except UndefinedError as e:
if fail_on_undefined:
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py
index a55059f1b7b7bc..d16eecd3c39921 100644
--- a/v2/ansible/plugins/__init__.py
+++ b/v2/ansible/plugins/__init__.py
@@ -180,7 +180,7 @@ def find_plugin(self, name, suffixes=None):
if os.path.isdir(path):
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
- except OSError,e:
+ except OSError as e:
d = Display()
d.warning("Error accessing plugin paths: %s" % str(e))
for full_path in (f for f in full_paths if os.path.isfile(f)):
diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py
index 2f56c4df582eb6..0e98bbc5b75e59 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/v2/ansible/plugins/action/__init__.py
@@ -122,7 +122,7 @@ def _early_needs_tmp_path(self):
# FIXME: modified from original, needs testing? Since this is now inside
# the action plugin, it should make it just this simple
return getattr(self, 'TRANSFERS_FILES', False)
-
+
def _late_needs_tmp_path(self, tmp, module_style):
'''
Determines if a temp path is required after some early actions have already taken place.
@@ -223,7 +223,7 @@ def _transfer_data(self, remote_path, data):
#else:
# data = data.encode('utf-8')
afo.write(data)
- except Exception, e:
+ except Exception as e:
#raise AnsibleError("failure encoding into utf-8: %s" % str(e))
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py
index ece8b5b11b0973..6db130ad7f3a32 100644
--- a/v2/ansible/plugins/action/copy.py
+++ b/v2/ansible/plugins/action/copy.py
@@ -70,7 +70,7 @@ def run(self, tmp=None, task_vars=dict()):
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
- except Exception, err:
+ except Exception as err:
return dict(failed=True, msg="could not write content temp file: %s" % err)
###############################################################################################
@@ -270,7 +270,7 @@ def run(self, tmp=None, task_vars=dict()):
if module_return.get('changed') == True:
changed = True
- # the file module returns the file path as 'path', but
+ # the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in module_return and 'dest' not in module_return:
module_return['dest'] = module_return['path']
@@ -297,7 +297,7 @@ def _create_content_tempfile(self, content):
content = to_bytes(content)
try:
f.write(content)
- except Exception, err:
+ except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
diff --git a/v2/ansible/plugins/action/pause.py b/v2/ansible/plugins/action/pause.py
index 9c6075e1011fa2..c56e6654b1bb44 100644
--- a/v2/ansible/plugins/action/pause.py
+++ b/v2/ansible/plugins/action/pause.py
@@ -68,7 +68,7 @@ def run(self, tmp=None, task_vars=dict()):
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
- except ValueError, e:
+ except ValueError as e:
return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e))
# Is 'prompt' a key in 'args'?
diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py
index 76b2e78a737d62..f82cbb376670eb 100644
--- a/v2/ansible/plugins/action/template.py
+++ b/v2/ansible/plugins/action/template.py
@@ -102,7 +102,7 @@ def run(self, tmp=None, task_vars=dict()):
with open(source, 'r') as f:
template_data = f.read()
resultant = templar.template(template_data, preserve_trailing_newlines=True)
- except Exception, e:
+ except Exception as e:
return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
local_checksum = checksum_s(resultant)
diff --git a/v2/ansible/plugins/connections/accelerate.py b/v2/ansible/plugins/connections/accelerate.py
index a31124e119f655..13012aa9299a86 100644
--- a/v2/ansible/plugins/connections/accelerate.py
+++ b/v2/ansible/plugins/connections/accelerate.py
@@ -140,7 +140,7 @@ def connect(self, allow_ssh=True):
# shutdown, so we'll reconnect.
wrong_user = True
- except AnsibleError, e:
+ except AnsibleError as e:
if allow_ssh:
if "WRONG_USER" in e:
vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/v2/ansible/plugins/connections/paramiko_ssh.py
index 4bb06e01c36147..81470f657c8c24 100644
--- a/v2/ansible/plugins/connections/paramiko_ssh.py
+++ b/v2/ansible/plugins/connections/paramiko_ssh.py
@@ -170,7 +170,7 @@ def _connect_uncached(self):
key_filename=key_filename, password=self.password,
timeout=self.runner.timeout, port=self.port)
- except Exception, e:
+ except Exception as e:
msg = str(e)
if "PID check failed" in msg:
@@ -197,7 +197,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
- except Exception, e:
+ except Exception as e:
msg = "Failed to open session"
if len(str(e)) > 0:
@@ -284,7 +284,7 @@ def put_file(self, in_path, out_path):
try:
self.sftp = self.ssh.open_sftp()
- except Exception, e:
+ except Exception as e:
raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
@@ -308,7 +308,7 @@ def fetch_file(self, in_path, out_path):
try:
self.sftp = self._connect_sftp()
- except Exception, e:
+ except Exception as e:
raise errors.AnsibleError("failed to open a SFTP connection (%s)", e)
try:
diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py
index d6e51710b5f27a..57d26ce61880a2 100644
--- a/v2/ansible/plugins/connections/winrm.py
+++ b/v2/ansible/plugins/connections/winrm.py
@@ -147,7 +147,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable
cmd_parts = powershell._encode_script(script, as_list=True)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
- except Exception, e:
+ except Exception as e:
traceback.print_exc()
raise errors.AnsibleError("failed to exec cmd %s" % cmd)
return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
diff --git a/v2/ansible/plugins/lookup/csvfile.py b/v2/ansible/plugins/lookup/csvfile.py
index 87757399ce5b79..e5fb9a451213c0 100644
--- a/v2/ansible/plugins/lookup/csvfile.py
+++ b/v2/ansible/plugins/lookup/csvfile.py
@@ -33,7 +33,7 @@ def read_csv(self, filename, key, delimiter, dflt=None, col=1):
for row in creader:
if row[0] == key:
return row[int(col)]
- except Exception, e:
+ except Exception as e:
raise AnsibleError("csvfile: %s" % str(e))
return dflt
@@ -61,7 +61,7 @@ def run(self, terms, variables=None, **kwargs):
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
- except (ValueError, AssertionError), e:
+ except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
diff --git a/v2/ansible/plugins/lookup/dnstxt.py b/v2/ansible/plugins/lookup/dnstxt.py
index 7100f8d96dfc39..75222927c79b7d 100644
--- a/v2/ansible/plugins/lookup/dnstxt.py
+++ b/v2/ansible/plugins/lookup/dnstxt.py
@@ -59,7 +59,7 @@ def run(self, terms, variables=None, **kwargs):
string = 'NXDOMAIN'
except dns.resolver.Timeout:
string = ''
- except dns.exception.DNSException, e:
+ except dns.exception.DNSException as e:
raise AnsibleError("dns.resolver unhandled exception", e)
ret.append(''.join(string))
diff --git a/v2/ansible/plugins/lookup/first_found.py b/v2/ansible/plugins/lookup/first_found.py
index 0ed268801508e2..b1d655b81147b5 100644
--- a/v2/ansible/plugins/lookup/first_found.py
+++ b/v2/ansible/plugins/lookup/first_found.py
@@ -177,7 +177,7 @@ def run(self, terms, variables, **kwargs):
for fn in total_search:
try:
fn = templar.template(fn)
- except (AnsibleUndefinedVariable, UndefinedError), e:
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
continue
if os.path.isabs(fn) and os.path.exists(fn):
diff --git a/v2/ansible/plugins/lookup/password.py b/v2/ansible/plugins/lookup/password.py
index 6e13410e1ab67e..7e812a38c5f6b4 100644
--- a/v2/ansible/plugins/lookup/password.py
+++ b/v2/ansible/plugins/lookup/password.py
@@ -85,7 +85,7 @@ def run(self, terms, variables, **kwargs):
paramvals['chars'] = use_chars
else:
paramvals[name] = value
- except (ValueError, AssertionError), e:
+ except (ValueError, AssertionError) as e:
raise AnsibleError(e)
length = paramvals['length']
@@ -99,7 +99,7 @@ def run(self, terms, variables, **kwargs):
if not os.path.isdir(pathdir):
try:
os.makedirs(pathdir, mode=0700)
- except OSError, e:
+ except OSError as e:
raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'')
diff --git a/v2/ansible/plugins/lookup/url.py b/v2/ansible/plugins/lookup/url.py
index c907bfbce3965a..1b9c5c0d808d48 100644
--- a/v2/ansible/plugins/lookup/url.py
+++ b/v2/ansible/plugins/lookup/url.py
@@ -31,10 +31,10 @@ def run(self, terms, inject=None, **kwargs):
try:
r = urllib2.Request(term)
response = urllib2.urlopen(r)
- except URLError, e:
+ except URLError as e:
utils.warnings("Failed lookup url for %s : %s" % (term, str(e)))
continue
- except HTTPError, e:
+ except HTTPError as e:
utils.warnings("Recieved HTTP error for %s : %s" % (term, str(e)))
continue
diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py
index afbc373f4f3332..c5b3dd0f066731 100644
--- a/v2/ansible/plugins/strategies/__init__.py
+++ b/v2/ansible/plugins/strategies/__init__.py
@@ -109,7 +109,7 @@ def _queue_task(self, host, task, task_vars, connection_info):
self._pending_results += 1
main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, module_loader), block=False)
- except (EOFError, IOError, AssertionError), e:
+ except (EOFError, IOError, AssertionError) as e:
# most likely an abort
debug("got an error while queuing: %s" % e)
return
diff --git a/v2/ansible/plugins/strategies/free.py b/v2/ansible/plugins/strategies/free.py
index 4fd8a132018ca3..d0506d37ddab59 100644
--- a/v2/ansible/plugins/strategies/free.py
+++ b/v2/ansible/plugins/strategies/free.py
@@ -139,7 +139,7 @@ def run(self, iterator, connection_info):
try:
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
- except Exception, e:
+ except Exception as e:
# FIXME: ctrl+c can cause some failures here, so catch them
# with the appropriate error type
print("wtf: %s" % e)
diff --git a/v2/ansible/template/safe_eval.py b/v2/ansible/template/safe_eval.py
index ba377054d7ad2e..c52ef398d76b76 100644
--- a/v2/ansible/template/safe_eval.py
+++ b/v2/ansible/template/safe_eval.py
@@ -105,13 +105,13 @@ def generic_visit(self, node, inside_call=False):
return (result, None)
else:
return result
- except SyntaxError, e:
+ except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is
if include_exceptions:
return (expr, None)
return expr
- except Exception, e:
+ except Exception as e:
if include_exceptions:
return (expr, e)
return expr
diff --git a/v2/ansible/utils/hashing.py b/v2/ansible/utils/hashing.py
index 0b2edd434bc544..2c7dd534fcb28c 100644
--- a/v2/ansible/utils/hashing.py
+++ b/v2/ansible/utils/hashing.py
@@ -64,7 +64,7 @@ def secure_hash(filename, hash_func=sha1):
digest.update(block)
block = infile.read(blocksize)
infile.close()
- except IOError, e:
+ except IOError as e:
raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
diff --git a/v2/ansible/utils/vault.py b/v2/ansible/utils/vault.py
index 04634aa377b498..5c704afac59b2b 100644
--- a/v2/ansible/utils/vault.py
+++ b/v2/ansible/utils/vault.py
@@ -40,7 +40,7 @@ def read_vault_file(vault_password_file):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
- except OSError, e:
+ except OSError as e:
raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
stdout, stderr = p.communicate()
vault_pass = stdout.strip('\r\n')
@@ -49,7 +49,7 @@ def read_vault_file(vault_password_file):
f = open(this_path, "rb")
vault_pass=f.read().strip()
f.close()
- except (OSError, IOError), e:
+ except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
return vault_pass
diff --git a/v2/ansible/vars/__init__.py b/v2/ansible/vars/__init__.py
index eb75d9c9929b8a..183116ea2d84fe 100644
--- a/v2/ansible/vars/__init__.py
+++ b/v2/ansible/vars/__init__.py
@@ -243,7 +243,7 @@ def _load_inventory_file(self, path, loader):
try:
names = loader.list_directory(path)
- except os.error, err:
+ except os.error as err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
# evaluate files in a stable order rather than whatever
diff --git a/v2/samples/multi.py b/v2/samples/multi.py
index ca4c8b68f744e4..dce61430594bfb 100644
--- a/v2/samples/multi.py
+++ b/v2/samples/multi.py
@@ -59,10 +59,10 @@ def _read_worker_result(cur_worker):
time.sleep(0.01)
continue
pipe.send(result)
- except (IOError, EOFError, KeyboardInterrupt), e:
+ except (IOError, EOFError, KeyboardInterrupt) as e:
debug("got a breaking error: %s" % e)
break
- except Exception, e:
+ except Exception as e:
debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e)
traceback.print_exc()
break
diff --git a/v2/samples/multi_queues.py b/v2/samples/multi_queues.py
index 8eb80366076476..9e8f22b9a945bc 100644
--- a/v2/samples/multi_queues.py
+++ b/v2/samples/multi_queues.py
@@ -55,10 +55,10 @@ def _read_worker_result(cur_worker):
time.sleep(0.01)
continue
final_q.put(result, block=False)
- except (IOError, EOFError, KeyboardInterrupt), e:
+ except (IOError, EOFError, KeyboardInterrupt) as e:
debug("got a breaking error: %s" % e)
break
- except Exception, e:
+ except Exception as e:
debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e)
traceback.print_exc()
break
@@ -77,10 +77,10 @@ def worker(main_q, res_q, loader):
time.sleep(0.01)
except Queue.Empty:
pass
- except (IOError, EOFError, KeyboardInterrupt), e:
+ except (IOError, EOFError, KeyboardInterrupt) as e:
debug("got a breaking error: %s" % e)
break
- except Exception, e:
+ except Exception as e:
debug("EXCEPTION DURING WORKER PROCESSING: %s" % e)
traceback.print_exc()
break
From b407dd8b58258379b824721c193ca005deeb3a19 Mon Sep 17 00:00:00 2001
From: Dorian Pula
Date: Mon, 13 Apr 2015 13:34:48 -0400
Subject: [PATCH 0293/3617] Add setup.py for v2 to allow for pip editable
installs.
---
v2/setup.py | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
create mode 100644 v2/setup.py
diff --git a/v2/setup.py b/v2/setup.py
new file mode 100644
index 00000000000000..a9a518798188ea
--- /dev/null
+++ b/v2/setup.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+import sys
+
+from ansible import __version__
+try:
+ from setuptools import setup, find_packages
+except ImportError:
+ print("Ansible now needs setuptools in order to build. Install it using"
+ " your package manager (usually python-setuptools) or via pip (pip"
+ " install setuptools).")
+ sys.exit(1)
+
+setup(name='ansible',
+ version=__version__,
+ description='Radically simple IT automation',
+ author='Michael DeHaan',
+ author_email='michael@ansible.com',
+ url='http://ansible.com/',
+ license='GPLv3',
+ install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'],
+ # package_dir={ '': 'lib' },
+ # packages=find_packages('lib'),
+ package_data={
+ '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'],
+ },
+ scripts=[
+ 'bin/ansible',
+ 'bin/ansible-playbook',
+ # 'bin/ansible-pull',
+ # 'bin/ansible-doc',
+ # 'bin/ansible-galaxy',
+ # 'bin/ansible-vault',
+ ],
+ data_files=[],
+)
From 5f1ba589a5a27d0379e8154293ba19964ac60e8f Mon Sep 17 00:00:00 2001
From: Timothy Sutton
Date: Mon, 13 Apr 2015 13:38:11 -0400
Subject: [PATCH 0294/3617] Git integration test: remove test for ambiguous
.git/branches dir
- '.git/branches' does not always exist, but the git integration
tests always checks for this directory's existence so it always
fails
- more info:
- http://stackoverflow.com/questions/10398225/what-is-the-git-branches-folder-used-for
---
test/integration/roles/test_git/tasks/main.yml | 5 -----
1 file changed, 5 deletions(-)
diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml
index 4bdc1d8bd870fd..831db8ea698803 100644
--- a/test/integration/roles/test_git/tasks/main.yml
+++ b/test/integration/roles/test_git/tasks/main.yml
@@ -65,16 +65,11 @@
stat: path={{ checkout_dir }}/.git/HEAD
register: head
-- name: check for remotes
- stat: path={{ checkout_dir }}/.git/branches
- register: branches
-
- name: assert presence of tags/trunk/branches
assert:
that:
- "tags.stat.isdir"
- "head.stat.isreg"
- - "branches.stat.isdir"
- name: verify on a reclone things are marked unchanged
assert:
From 3504f1cad96f781c3ebf5bb8d50b6bed1df13d15 Mon Sep 17 00:00:00 2001
From: Dorian Pula
Date: Mon, 13 Apr 2015 13:44:58 -0400
Subject: [PATCH 0295/3617] Add test requirements for working with v2.
---
v2/test-requirements.txt | 11 +++++++++++
1 file changed, 11 insertions(+)
create mode 100644 v2/test-requirements.txt
diff --git a/v2/test-requirements.txt b/v2/test-requirements.txt
new file mode 100644
index 00000000000000..97a75d3cb5c767
--- /dev/null
+++ b/v2/test-requirements.txt
@@ -0,0 +1,11 @@
+# Ansible requirementss
+paramiko
+PyYAML
+jinja2
+httplib2
+passlib
+
+# Test requirements
+unittest2
+mock
+nose
From 87dde862bd5b93900a3f1db1d99962f89e160705 Mon Sep 17 00:00:00 2001
From: eroldan
Date: Mon, 13 Apr 2015 16:21:08 -0300
Subject: [PATCH 0296/3617] Fixed wrong example of 'environment' for setting
PATH
---
docsite/rst/faq.rst | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst
index 1b499c547406bb..ba3ae1264ffd3d 100644
--- a/docsite/rst/faq.rst
+++ b/docsite/rst/faq.rst
@@ -3,15 +3,17 @@ Frequently Asked Questions
Here are some commonly-asked questions and their answers.
-.. _users_and_ports:
+.. _set_environment:
-If you are looking to set environment variables remotely for your project (in a task, not locally for Ansible)
-The keyword is simply `environment`
+How can I set the PATH or any other environment variable for a task or entire playbook?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Setting environment variables can be done with the `environment` keyword. It can be used at task or playbook level::
+
+ environment:
+ PATH: {{ ansible_env.PATH }}:/thingy/bin
+ SOME: value
-```
- environment:
- PATH:$PATH:/thingy/bin
-```
How do I handle different machines needing different user accounts or ports to log in with?
From 1bdf0bb0d67849d96aa1b29713af6643e35d148f Mon Sep 17 00:00:00 2001
From: ian
Date: Mon, 13 Apr 2015 15:37:25 -0400
Subject: [PATCH 0297/3617] Several more changes to suport python3 syntax.
---
v2/ansible/plugins/action/__init__.py | 2 +-
v2/ansible/plugins/lookup/password.py | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py
index 0e98bbc5b75e59..be83539def6ddb 100644
--- a/v2/ansible/plugins/action/__init__.py
+++ b/v2/ansible/plugins/action/__init__.py
@@ -19,7 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import StringIO
+from six.moves import StringIO
import json
import os
import random
diff --git a/v2/ansible/plugins/lookup/password.py b/v2/ansible/plugins/lookup/password.py
index 7e812a38c5f6b4..74017eff619948 100644
--- a/v2/ansible/plugins/lookup/password.py
+++ b/v2/ansible/plugins/lookup/password.py
@@ -98,7 +98,7 @@ def run(self, terms, variables, **kwargs):
pathdir = os.path.dirname(path)
if not os.path.isdir(pathdir):
try:
- os.makedirs(pathdir, mode=0700)
+ os.makedirs(pathdir, mode=0o700)
except OSError as e:
raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
@@ -111,7 +111,7 @@ def run(self, terms, variables, **kwargs):
else:
content = password
with open(path, 'w') as f:
- os.chmod(path, 0600)
+ os.chmod(path, 0o600)
f.write(content + '\n')
else:
content = open(path).read().rstrip()
@@ -129,12 +129,12 @@ def run(self, terms, variables, **kwargs):
salt = self.random_salt()
content = '%s salt=%s' % (password, salt)
with open(path, 'w') as f:
- os.chmod(path, 0600)
+ os.chmod(path, 0o600)
f.write(content + '\n')
# crypt not requested, remove salt if present
elif (encrypt is None and salt):
with open(path, 'w') as f:
- os.chmod(path, 0600)
+ os.chmod(path, 0o600)
f.write(password + '\n')
if encrypt:
From 3a8088fe3009e2ef29a33517c6a787c27098041c Mon Sep 17 00:00:00 2001
From: Toshio Kuratomi
Date: Mon, 13 Apr 2015 12:57:17 -0700
Subject: [PATCH 0298/3617] _available_vars in v1 == _available_variables in v2
---
v2/ansible/template/__init__.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py
index 4e15e83424c22b..6c41ad3cf40697 100644
--- a/v2/ansible/template/__init__.py
+++ b/v2/ansible/template/__init__.py
@@ -143,8 +143,8 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals
only_one = SINGLE_VAR.match(variable)
if only_one:
var_name = only_one.group(1)
- if var_name in self._available_vars:
- resolved_val = self._available_vars[var_name]
+ if var_name in self._available_variables:
+ resolved_val = self._available_variables[var_name]
if isinstance(resolved_val, NON_TEMPLATED_TYPES):
return resolved_val
From 3bb7b0eef309dbac7ca97ae7fa54213950e86ac8 Mon Sep 17 00:00:00 2001
From: ian
Date: Mon, 13 Apr 2015 16:03:02 -0400
Subject: [PATCH 0299/3617] Import StringIO from six in a couple more places.
---
v2/ansible/executor/module_common.py | 2 +-
v2/test/parsing/yaml/test_loader.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py
index 23890d64e61a69..535fbd45e335aa 100644
--- a/v2/ansible/executor/module_common.py
+++ b/v2/ansible/executor/module_common.py
@@ -21,7 +21,7 @@
__metaclass__ = type
# from python and deps
-from cStringIO import StringIO
+from six.moves import StringIO
import json
import os
import shlex
diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py
index 9a4746b99dfeab..d393d72a0054a0 100644
--- a/v2/test/parsing/yaml/test_loader.py
+++ b/v2/test/parsing/yaml/test_loader.py
@@ -20,7 +20,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from StringIO import StringIO
+from six.moves import StringIO
from collections import Sequence, Set, Mapping
from ansible.compat.tests import unittest
From 3d2a056ad4e748eb22d51ce73f94d3cb53092776 Mon Sep 17 00:00:00 2001
From: Amandine Lee