From 6b70ee23abf434f82cf98f42f1eb4a3156bb21bd Mon Sep 17 00:00:00 2001 From: Nick Irvine Date: Wed, 21 May 2014 19:24:28 -0700 Subject: [PATCH 0001/3617] Clean non-printable chars from stdout instead of dropping the whole thing --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index adc9b7bcbd147a..077724f9f305a2 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -877,7 +877,7 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port, if hasattr(sys.stdout, "isatty"): if "stdout" in data and sys.stdout.isatty(): if not string_functions.isprintable(data['stdout']): - data['stdout'] = '' + data['stdout'] = ''.join(c for c in data['stdout'] if string_functions.isprintable(c)) if 'item' in inject: result.result['item'] = inject['item'] From 3d61f077ec1ba2c0fdd4d493c730a4299e2f883d Mon Sep 17 00:00:00 2001 From: Jordon Replogle Date: Wed, 30 Jul 2014 10:08:22 -0700 Subject: [PATCH 0002/3617] Added OpenVZ Inventory python script --- plugins/inventory/openvz.py | 74 +++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 plugins/inventory/openvz.py diff --git a/plugins/inventory/openvz.py b/plugins/inventory/openvz.py new file mode 100644 index 00000000000000..1f441a39f540f8 --- /dev/null +++ b/plugins/inventory/openvz.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# openvz.py +# +# Copyright 2014 jordonr +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. +# +# +# Inspired by libvirt_lxc.py inventory script +# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py +# +# Groups are determined by the description field of openvz guests +# multiple groups can be seperated by commas: webserver,dbserver + +from subprocess import Popen,PIPE +import sys +import json + + +#List openvz hosts +vzhosts = ['192.168.1.3','192.168.1.2','192.168.1.1'] +#Add openvzhosts to the inventory +inventory = {'vzhosts': {'hosts': vzhosts}} +#default group, when description not defined +default_group = ['vzguest'] + +def getGuests(): + #Loop through vzhosts + for h in vzhosts: + #SSH to vzhost and get the list of guests in json + pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True) + + #Load Json info of guests + json_data = json.loads(pipe.stdout.read()) + + #loop through guests + for j in json_data: + #determine group from guest description + if j['description'] is not None: + groups = j['description'].split(",") + else: + groups = default_group + + #add guest to inventory + for g in groups: + if g not in inventory: + inventory[g] = {'hosts': []} + + for ip in j['ip']: + inventory[g]['hosts'].append(ip) + + print json.dumps(inventory) + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + getGuests() +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print json.dumps({}); +else: + print "Need an argument, either --list or --host " From df8dfdce06f837c49f230d5e27b513f2bfe27cf1 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Wed, 6 Aug 2014 13:00:14 +0200 Subject: [PATCH 0003/3617] packaging: add short has and branch name in package version for unofficial builds --- Makefile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index afd7162f96e502..56c63903b6d620 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,11 @@ VERSION := $(shell cat VERSION) # Get the branch information from git ifneq ($(shell which git),) GIT_DATE := $(shell git log -n 1 --format="%ai") +GIT_HASH := $(shell git log -n 1 --format="%h") +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.]//g') +GITINFO = .$(GIT_HASH).$(GIT_BRANCH) +else +GITINFO = '' endif ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1) @@ -60,7 +65,7 @@ ifeq ($(OFFICIAL),yes) DEBUILD_OPTS += -k$(DEBSIGN_KEYID) endif else - DEB_RELEASE = 0.git$(DATE) + DEB_RELEASE = 0.git$(DATE)$(GITINFO) # Do not sign unofficial builds DEBUILD_OPTS += -uc -us DPUT_OPTS += -u @@ -76,7 +81,7 @@ RPMSPEC = $(RPMSPECDIR)/ansible.spec RPMDIST = $(shell rpm --eval '%{?dist}') RPMRELEASE = 1 ifneq ($(OFFICIAL),yes) - RPMRELEASE = 0.git$(DATE) + RPMRELEASE = 0.git$(DATE)$(GITINFO) endif RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)" From 0ff2936626afe83e2898e8ccecf59b891e550bf5 Mon Sep 17 00:00:00 2001 From: Jordon Replogle Date: Wed, 13 Aug 2014 10:28:43 -0700 Subject: [PATCH 0004/3617] Updated per Revision Request --- plugins/inventory/openvz.py | 73 +++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 35 deletions(-) diff --git a/plugins/inventory/openvz.py b/plugins/inventory/openvz.py index 1f441a39f540f8..fd0bd9ff79454b 100644 --- a/plugins/inventory/openvz.py +++ b/plugins/inventory/openvz.py @@ -5,21 +5,20 @@ # # Copyright 2014 jordonr # -# This program is free software; you can redistribute it and/or modify +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or +# the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # -# This program is distributed in the hope that it will be useful, +# Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, -# MA 02110-1301, USA. -# +# along with Ansible. If not, see . # # Inspired by libvirt_lxc.py inventory script # https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py @@ -33,42 +32,46 @@ #List openvz hosts -vzhosts = ['192.168.1.3','192.168.1.2','192.168.1.1'] -#Add openvzhosts to the inventory -inventory = {'vzhosts': {'hosts': vzhosts}} +vzhosts = ['vzhost1','vzhost2','vzhost3'] +#Add openvz hosts to the inventory and Add "_meta" trick +inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} #default group, when description not defined default_group = ['vzguest'] -def getGuests(): - #Loop through vzhosts - for h in vzhosts: - #SSH to vzhost and get the list of guests in json - pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True) +def get_guests(): + #Loop through vzhosts + for h in vzhosts: + #SSH to vzhost and get the list of guests in json + pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True) + + #Load Json info of guests + json_data = json.loads(pipe.stdout.read()) + + #loop through guests + for j in json_data: + #Add information to host vars + inventory['_meta']['hostvars'][j['hostname']] = {'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip']} - #Load Json info of guests - json_data = json.loads(pipe.stdout.read()) + #determine group from guest description + if j['description'] is not None: + groups = j['description'].split(",") + else: + groups = default_group - #loop through guests - for j in json_data: - #determine group from guest description - if j['description'] is not None: - groups = j['description'].split(",") - else: - groups = default_group + #add guest to inventory + for g in groups: + if g not in inventory: + inventory[g] = {'hosts': []} - #add guest to inventory - for g in groups: - if g not in inventory: - inventory[g] = {'hosts': []} + inventory[g]['hosts'].append(j['hostname']) - for ip in j['ip']: - inventory[g]['hosts'].append(ip) + return inventory - print json.dumps(inventory) if len(sys.argv) == 2 and sys.argv[1] == '--list': - getGuests() + inv_json = get_guests() + print json.dumps(inv_json, sort_keys=True) elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print json.dumps({}); + print json.dumps({}); else: - print "Need an argument, either --list or --host " + print "Need an argument, either --list or --host " From eccb48c8da77bf9ba884cc989251ed5d5209b1e1 Mon Sep 17 00:00:00 2001 From: Carson Gee Date: Sat, 17 May 2014 22:10:24 -0400 Subject: [PATCH 0005/3617] Improvements to OpenStack inventory script --- plugins/inventory/nova.ini | 9 +- plugins/inventory/nova.py | 166 ++++++++++++++++++++++++++----------- 2 files changed, 124 insertions(+), 51 deletions(-) mode change 100755 => 100644 plugins/inventory/nova.py diff --git a/plugins/inventory/nova.ini b/plugins/inventory/nova.ini index e648e5f143c12b..040c52bcee9b1c 100644 --- a/plugins/inventory/nova.ini +++ b/plugins/inventory/nova.ini @@ -14,7 +14,7 @@ api_key = auth_url = # Authentication system -auth_system = +auth_system = keystone # OpenStack nova project_id project_id = @@ -22,6 +22,13 @@ project_id = # Serverarm region name to use region_name = +# Specify a preference for public or private IPs (public is default) +prefer_private = False + +# What service type (required for newer nova client) +service_type = compute + + # TODO: Some other options # insecure = # endpoint_type = diff --git a/plugins/inventory/nova.py b/plugins/inventory/nova.py old mode 100755 new mode 100644 index 585e26732ed316..b1094c72887df4 --- a/plugins/inventory/nova.py +++ b/plugins/inventory/nova.py @@ -25,11 +25,9 @@ try: import json -except: +except ImportError: import simplejson as json -from ansible.module_utils.openstack import * - ################################################### # executed with no parameters, return the list of # all groups and hosts @@ -54,45 +52,129 @@ def nova_load_config_file(): return None + +def get_fallback(config, value, section="openstack"): + """ + Get value from config object and return the value + or false + """ + try: + return config.get(section, value) + except ConfigParser.NoOptionError: + return False + + +def push(data, key, element): + """ + Assist in items to a dictionary of lists + """ + if (not element) or (not key): + return + + if key in data: + data[key].append(element) + else: + data[key] = [element] + + +def to_safe(word): + ''' + Converts 'bad' characters in a string to underscores so they can + be used as Ansible groups + ''' + return re.sub(r"[^A-Za-z0-9\-]", "_", word) + + +def get_ips(server, access_ip=True): + """ + Returns a list of the server's IPs, or the preferred + access IP + """ + private = [] + public = [] + address_list = [] + # Iterate through each servers network(s), get addresses and get type + addresses = getattr(server, 'addresses', {}) + if len(addresses) > 0: + for network in addresses.itervalues(): + for address in network: + if address.get('OS-EXT-IPS:type', False) == 'fixed': + private.append(address['addr']) + elif address.get('OS-EXT-IPS:type', False) == 'floating': + public.append(address['addr']) + + if not access_ip: + address_list.append(server.accessIPv4) + address_list.extend(private) + address_list.extend(public) + return address_list + + access_ip = None + # Append group to list + if server.accessIPv4: + access_ip = server.accessIPv4 + if (not access_ip) and public and not (private and prefer_private): + access_ip = public[0] + if private and not access_ip: + access_ip = private[0] + + return access_ip + + +def get_metadata(server): + """Returns dictionary of all host metadata""" + get_ips(server, False) + results = {} + for key in vars(server): + # Extract value + value = getattr(server, key) + + # Generate sanitized key + key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower() + + # Att value to instance result (exclude manager class) + #TODO: maybe use value.__class__ or similar inside of key_name + if key != 'os_manager': + results[key] = value + return results + config = nova_load_config_file() if not config: sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES)) client = nova_client.Client( - config.get('openstack', 'version'), - config.get('openstack', 'username'), - config.get('openstack', 'api_key'), - config.get('openstack', 'project_id'), - config.get('openstack', 'auth_url'), + version = config.get('openstack', 'version'), + username = config.get('openstack', 'username'), + api_key = config.get('openstack', 'api_key'), + auth_url = config.get('openstack', 'auth_url'), region_name = config.get('openstack', 'region_name'), + project_id = config.get('openstack', 'project_id'), auth_system = config.get('openstack', 'auth_system') ) -if len(sys.argv) == 2 and (sys.argv[1] == '--list'): - groups = {} - +# Default or added list option +if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1: + groups = {'_meta': {'hostvars': {}}} # Cycle on servers for server in client.servers.list(): - private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - - # Define group (or set to empty string) - group = server.metadata['group'] if server.metadata.has_key('group') else 'undefined' - - # Create group if not exist - if group not in groups: - groups[group] = [] - - # Append group to list - if server.accessIPv4: - groups[group].append(server.accessIPv4) - continue - if public: - groups[group].append(''.join(public)) - continue - if private: - groups[group].append(''.join(private)) - continue + access_ip = get_ips(server) + + # Push to name group of 1 + push(groups, server.name, access_ip) + + # Run through each metadata item and add instance to it + for key, value in server.metadata.iteritems(): + composed_key = to_safe('tag_{0}_{1}'.format(key, value)) + push(groups, composed_key, access_ip) + + # Do special handling of group for backwards compat + # inventory groups + group = server.metadata['group'] if 'group' in server.metadata else 'undefined' + push(groups, group, access_ip) + + # Add vars to _meta key for performance optimization in + # Ansible 1.3+ + groups['_meta']['hostvars'][access_ip] = get_metadata(server) # Return server list print(json.dumps(groups, sort_keys=True, indent=2)) @@ -105,25 +187,9 @@ def nova_load_config_file(): elif len(sys.argv) == 3 and (sys.argv[1] == '--host'): results = {} ips = [] - for instance in client.servers.list(): - private = openstack_find_nova_addresses(getattr(instance, 'addresses'), 'fixed', 'private') - public = openstack_find_nova_addresses(getattr(instance, 'addresses'), 'floating', 'public') - ips.append( instance.accessIPv4) - ips.append(''.join(private)) - ips.append(''.join(public)) - if sys.argv[2] in ips: - for key in vars(instance): - # Extract value - value = getattr(instance, key) - - # Generate sanitized key - key = 'os_' + re.sub("[^A-Za-z0-9\-]", "_", key).lower() - - # Att value to instance result (exclude manager class) - #TODO: maybe use value.__class__ or similar inside of key_name - if key != 'os_manager': - results[key] = value - + for server in client.servers.list(): + if sys.argv[2] in (get_ips(server) or []): + results = get_metadata(server) print(json.dumps(results, sort_keys=True, indent=2)) sys.exit(0) From cd5edc416c810354704c5b41701b1bcebb42305c Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 1 Jul 2014 09:41:55 -0700 Subject: [PATCH 0006/3617] nova.py: Set defaults for OpenStack settings - auth_system - region_name - service_type These are config settings that could be left out in many scenarios, but the current code is requiring them. In particular, "service_type" is a new one in PR #7444 so if we add that and don't set a default, then existing .ini files won't work: ``` File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 618, in get raise NoOptionError(option, section) ConfigParser.NoOptionError: No option 'service_type' in section: 'openstack' ``` --- plugins/inventory/nova.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inventory/nova.py b/plugins/inventory/nova.py index b1094c72887df4..48e720184f5ec0 100644 --- a/plugins/inventory/nova.py +++ b/plugins/inventory/nova.py @@ -39,6 +39,7 @@ NOVA_DEFAULTS = { 'auth_system': None, 'region_name': None, + 'service_type': 'compute', } From 1560b963aa2b5188cf138a1f0be0e27b22f4915a Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 1 Jul 2014 12:20:15 -0700 Subject: [PATCH 0007/3617] nova.py: Support OS_AUTH_SYSTEM and OS_REGION_NAME --- plugins/inventory/nova.py | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/plugins/inventory/nova.py b/plugins/inventory/nova.py index 48e720184f5ec0..7e58390ee1a147 100644 --- a/plugins/inventory/nova.py +++ b/plugins/inventory/nova.py @@ -143,14 +143,37 @@ def get_metadata(server): if not config: sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES)) +# Load up connections info based on config and then environment +# variables +username = (get_fallback(config, 'username') or + os.environ.get('OS_USERNAME', None)) +api_key = (get_fallback(config, 'api_key') or + os.environ.get('OS_PASSWORD', None)) +auth_url = (get_fallback(config, 'auth_url') or + os.environ.get('OS_AUTH_URL', None)) +project_id = (get_fallback(config, 'project_id') or + os.environ.get('OS_TENANT_NAME', None)) +region_name = (get_fallback(config, 'region_name') or + os.environ.get('OS_REGION_NAME', None)) +auth_system = (get_fallback(config, 'auth_system') or + os.environ.get('OS_AUTH_SYSTEM', None)) + +# Determine what type of IP is preferred to return +prefer_private = False +try: + prefer_private = config.getboolean('openstack', 'prefer_private') +except ConfigParser.NoOptionError: + pass + client = nova_client.Client( - version = config.get('openstack', 'version'), - username = config.get('openstack', 'username'), - api_key = config.get('openstack', 'api_key'), - auth_url = config.get('openstack', 'auth_url'), - region_name = config.get('openstack', 'region_name'), - project_id = config.get('openstack', 'project_id'), - auth_system = config.get('openstack', 'auth_system') + version=config.get('openstack', 'version'), + username=username, + api_key=api_key, + auth_url=auth_url, + region_name=region_name, + project_id=project_id, + auth_system=auth_system, + service_type=config.get('openstack', 'service_type'), ) # Default or added list option From 7cc5ecae527588dde572ddbace1d13e4a4b62bdf Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 1 Jul 2014 12:47:25 -0700 Subject: [PATCH 0008/3617] nova.ini: Distinguish between required and optional settings Put them in separate sections of config to make it more clear what is essential and what is not. Also comment out the optional settings. And remove duplicate mention of `service_type`. --- plugins/inventory/nova.ini | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/plugins/inventory/nova.ini b/plugins/inventory/nova.ini index 040c52bcee9b1c..4900c49651603b 100644 --- a/plugins/inventory/nova.ini +++ b/plugins/inventory/nova.ini @@ -1,37 +1,45 @@ # Ansible OpenStack external inventory script [openstack] + +#------------------------------------------------------------------------- +# Required settings +#------------------------------------------------------------------------- + # API version version = 2 # OpenStack nova username username = -# OpenStack nova api_key +# OpenStack nova api_key or password api_key = # OpenStack nova auth_url auth_url = -# Authentication system -auth_system = keystone +# OpenStack nova project_id or tenant name +project_id = -# OpenStack nova project_id -project_id = +#------------------------------------------------------------------------- +# Optional settings +#------------------------------------------------------------------------- + +# Authentication system +# auth_system = keystone # Serverarm region name to use -region_name = +# region_name = # Specify a preference for public or private IPs (public is default) -prefer_private = False +# prefer_private = False # What service type (required for newer nova client) -service_type = compute +# service_type = compute # TODO: Some other options # insecure = # endpoint_type = # extensions = -# service_type = # service_name = From 3ca654ad9ade1ce2745f4b3496d3a1683ace2ce5 Mon Sep 17 00:00:00 2001 From: Strahinja Kustudic Date: Sun, 5 Oct 2014 19:54:31 +0200 Subject: [PATCH 0009/3617] Added an example for paretheses --- docsite/rst/playbooks_conditionals.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docsite/rst/playbooks_conditionals.rst b/docsite/rst/playbooks_conditionals.rst index a00ec916c41f2c..cdaf54f5ea4a7b 100644 --- a/docsite/rst/playbooks_conditionals.rst +++ b/docsite/rst/playbooks_conditionals.rst @@ -26,6 +26,14 @@ It's actually pretty simple:: command: /sbin/shutdown -t now when: ansible_os_family == "Debian" +You can also use parentheses to group conditions:: + + tasks: + - name: "shutdown CentOS 6 and 7 systems" + command: /sbin/shutdown -t now + when: ansible_distribution == "CentOS" and + (ansible_distribution_major_version == "6" or ansible_distribution_major_version == "7") + A number of Jinja2 "filters" can also be used in when statements, some of which are unique and provided by Ansible. Suppose we want to ignore the error of one statement and then decide to do something conditionally based on success or failure:: From 76f473cd5d5a8ed1c6c5deb173587ce01e5b8f29 Mon Sep 17 00:00:00 2001 From: Mathieu GAUTHIER-LAFAYE Date: Mon, 6 Oct 2014 17:12:03 +0200 Subject: [PATCH 0010/3617] add a proxmox inventory plugin --- plugins/inventory/proxmox.py | 131 +++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100755 plugins/inventory/proxmox.py diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py new file mode 100755 index 00000000000000..ceb41110278417 --- /dev/null +++ b/plugins/inventory/proxmox.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python + +# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import urllib +import urllib2 +try: + import json +except ImportError: + import simplejson as json +import os +import sys +from optparse import OptionParser + +class ProxmoxNodeList(list): + def get_names(self): + return [node['node'] for node in self] + +class ProxmoxQemuList(list): + def get_names(self): + return [qemu['name'] for qemu in self if qemu['template'] != 1] + +class ProxmoxPoolList(list): + def get_names(self): + return [pool['poolid'] for pool in self] + +class ProxmoxPool(dict): + def get_members_name(self): + return [member['name'] for member in self['members'] if member['template'] != 1] + +class ProxmoxAPI(object): + def __init__(self, options): + self.options = options + self.credentials = None + + if not options.url: + raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') + elif not options.username: + raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') + elif not options.password: + raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') + + def auth(self): + request_path = '{}api2/json/access/ticket'.format(self.options.url) + + request_params = urllib.urlencode({ + 'username': self.options.username, + 'password': self.options.password, + }) + + data = json.load(urllib2.urlopen(request_path, request_params)) + + self.credentials = { + 'ticket': data['data']['ticket'], + 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], + } + + def get(self, url, data=None): + opener = urllib2.build_opener() + opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket']))) + + request_path = '{}{}'.format(self.options.url, url) + request = opener.open(request_path, data) + + response = json.load(request) + return response['data'] + + def nodes(self): + return ProxmoxNodeList(self.get('api2/json/nodes')) + + def node_qemu(self, node): + return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node))) + + def pools(self): + return ProxmoxPoolList(self.get('api2/json/pools')) + + def pool(self, poolid): + return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid))) + +def main_list(options): + result = {} + + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + # all + result['all'] = [] + for node in proxmox_api.nodes().get_names(): + result['all'] += proxmox_api.node_qemu(node).get_names() + + # pools + for pool in proxmox_api.pools().get_names(): + result[pool] = proxmox_api.pool(pool).get_members_name() + + print json.dumps(result) + +def main_host(): + print json.dumps({}) + +def main(): + parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') + parser.add_option('--list', action="store_true", default=False, dest="list") + parser.add_option('--host', dest="host") + parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') + parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') + parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') + (options, args) = parser.parse_args() + + if options.list: + main_list(options) + elif options.host: + main_host() + else: + parser.print_help() + sys.exit(1) + +if __name__ == '__main__': + main() From 3d62e55abe14be12292186760413ce641f852c09 Mon Sep 17 00:00:00 2001 From: Mathieu GAUTHIER-LAFAYE Date: Tue, 7 Oct 2014 13:10:10 +0200 Subject: [PATCH 0011/3617] add host variables (proxmox_vmid, proxmox_uptime, proxmox_maxmem, ...) --- plugins/inventory/proxmox.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index ceb41110278417..590949a4c6631a 100755 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -33,6 +33,10 @@ class ProxmoxQemuList(list): def get_names(self): return [qemu['name'] for qemu in self if qemu['template'] != 1] + def get_by_name(self, name): + results = [qemu for qemu in self if qemu['name'] == name] + return results[0] if len(results) > 0 else None + class ProxmoxPoolList(list): def get_names(self): return [pool['poolid'] for pool in self] @@ -107,8 +111,24 @@ def main_list(options): print json.dumps(result) -def main_host(): - print json.dumps({}) +def main_host(options): + results = {} + + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + host = None + for node in proxmox_api.nodes().get_names(): + qemu_list = proxmox_api.node_qemu(node) + qemu = qemu_list.get_by_name(options.host) + if qemu: + break + + if qemu: + for key, value in qemu.iteritems(): + results['proxmox_' + key] = value + + print json.dumps(results) def main(): parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') @@ -122,7 +142,7 @@ def main(): if options.list: main_list(options) elif options.host: - main_host() + main_host(options) else: parser.print_help() sys.exit(1) From 7c094c93798eeae5af92961031125de83d6ec91d Mon Sep 17 00:00:00 2001 From: Mathieu GAUTHIER-LAFAYE Date: Tue, 7 Oct 2014 13:45:41 +0200 Subject: [PATCH 0012/3617] add _meta in the list json --- plugins/inventory/proxmox.py | 56 +++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 17 deletions(-) diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index 590949a4c6631a..c9d5e82a623cfb 100755 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -29,7 +29,18 @@ class ProxmoxNodeList(list): def get_names(self): return [node['node'] for node in self] +class ProxmoxQemu(dict): + def get_variables(self): + variables = {} + for key, value in self.iteritems(): + variables['proxmox_' + key] = value + return variables + class ProxmoxQemuList(list): + def __init__(self, data=[]): + for item in data: + self.append(ProxmoxQemu(item)) + def get_names(self): return [qemu['name'] for qemu in self if qemu['template'] != 1] @@ -37,6 +48,13 @@ def get_by_name(self, name): results = [qemu for qemu in self if qemu['name'] == name] return results[0] if len(results) > 0 else None + def get_variables(self): + variables = {} + for qemu in self: + variables[qemu['name']] = qemu.get_variables() + + return variables + class ProxmoxPoolList(list): def get_names(self): return [pool['poolid'] for pool in self] @@ -95,40 +113,42 @@ def pool(self, poolid): return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid))) def main_list(options): - result = {} + results = { + 'all': { + 'hosts': [], + }, + '_meta': { + 'hostvars': {}, + } + } proxmox_api = ProxmoxAPI(options) proxmox_api.auth() - # all - result['all'] = [] for node in proxmox_api.nodes().get_names(): - result['all'] += proxmox_api.node_qemu(node).get_names() + qemu_list = proxmox_api.node_qemu(node) + results['all']['hosts'] += qemu_list.get_names() + results['_meta']['hostvars'].update(qemu_list.get_variables()) # pools for pool in proxmox_api.pools().get_names(): - result[pool] = proxmox_api.pool(pool).get_members_name() + results[pool] = { + 'hosts': proxmox_api.pool(pool).get_members_name(), + } - print json.dumps(result) + return json.dumps(results) def main_host(options): - results = {} - proxmox_api = ProxmoxAPI(options) proxmox_api.auth() - host = None for node in proxmox_api.nodes().get_names(): qemu_list = proxmox_api.node_qemu(node) qemu = qemu_list.get_by_name(options.host) if qemu: - break + return json.dumps(qemu.get_variables()) - if qemu: - for key, value in qemu.iteritems(): - results['proxmox_' + key] = value - - print json.dumps(results) + print json.dumps({}) def main(): parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') @@ -140,12 +160,14 @@ def main(): (options, args) = parser.parse_args() if options.list: - main_list(options) + json = main_list(options) elif options.host: - main_host(options) + json = main_host(options) else: parser.print_help() sys.exit(1) + print json + if __name__ == '__main__': main() From d20ef3a10af5dada0a3e3b3c1f7b15fee3839990 Mon Sep 17 00:00:00 2001 From: Mathieu GAUTHIER-LAFAYE Date: Tue, 7 Oct 2014 13:58:01 +0200 Subject: [PATCH 0013/3617] add --pretty for debuging purpose --- plugins/inventory/proxmox.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py index c9d5e82a623cfb..80f6628d97395d 100755 --- a/plugins/inventory/proxmox.py +++ b/plugins/inventory/proxmox.py @@ -136,7 +136,7 @@ def main_list(options): 'hosts': proxmox_api.pool(pool).get_members_name(), } - return json.dumps(results) + return results def main_host(options): proxmox_api = ProxmoxAPI(options) @@ -146,9 +146,9 @@ def main_host(options): qemu_list = proxmox_api.node_qemu(node) qemu = qemu_list.get_by_name(options.host) if qemu: - return json.dumps(qemu.get_variables()) + return qemu.get_variables() - print json.dumps({}) + return {} def main(): parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') @@ -157,17 +157,22 @@ def main(): parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') + parser.add_option('--pretty', action="store_true", default=False, dest='pretty') (options, args) = parser.parse_args() if options.list: - json = main_list(options) + data = main_list(options) elif options.host: - json = main_host(options) + data = main_host(options) else: parser.print_help() sys.exit(1) - print json + indent = None + if options.pretty: + indent = 2 + + print json.dumps(data, indent=indent) if __name__ == '__main__': main() From fbc1cd553ca6d083a9801a32fae1dfa40e7b9f67 Mon Sep 17 00:00:00 2001 From: Andrew Rothstein Date: Tue, 14 Oct 2014 07:29:21 -0400 Subject: [PATCH 0014/3617] an ansible inventory garnered from fleetctl --- plugins/inventory/fleet.py | 107 +++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100755 plugins/inventory/fleet.py diff --git a/plugins/inventory/fleet.py b/plugins/inventory/fleet.py new file mode 100755 index 00000000000000..d6d7e4d2925bfe --- /dev/null +++ b/plugins/inventory/fleet.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +""" +fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and +returns it under the host group 'coreos' +""" + +# Copyright (C) 2014 Andrew Rothstein +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# +# Thanks to the vagrant.py inventory script for giving me the basic structure +# of this. +# + +import sys +import subprocess +import re +import string +from optparse import OptionParser +try: + import json +except: + import simplejson as json + +# Options +#------------------------------ + +parser = OptionParser(usage="%prog [options] --list | --host ") +parser.add_option('--list', default=False, dest="list", action="store_true", + help="Produce a JSON consumable grouping of Vagrant servers for Ansible") +parser.add_option('--host', default=None, dest="host", + help="Generate additional host specific details for given host for Ansible") +(options, args) = parser.parse_args() + +# +# helper functions +# + +def get_ssh_config() : + configs = [] + for box in list_running_boxes() : + config = get_a_ssh_config(box) + configs.append(config) + return configs + +#list all the running instances in the fleet +def list_running_boxes(): + boxes = [] + for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n') : + matcher = re.search("[^\s]+[\s]+([^\s]+).+", line) + if matcher and matcher.group(1) != "IP": + boxes.append(matcher.group(1)) + + return boxes + +def get_a_ssh_config(box_name) : + config = {} + config['Host'] = box_name + config['ansible_ssh_user'] = 'core' + config['ansible_python_interpreter'] = '/opt/bin/python' + return config + +# List out servers that vagrant has running +#------------------------------ +if options.list: + ssh_config = get_ssh_config() + hosts = { 'coreos': []} + + for data in ssh_config : + hosts['coreos'].append(data['Host']) + + print json.dumps(hosts) + sys.exit(1) + +# Get out the host details +#------------------------------ +elif options.host: + result = {} + ssh_config = get_ssh_config() + + details = filter(lambda x: (x['Host'] == options.host), ssh_config) + if len(details) > 0: + #pass through the port, in case it's non standard. + result = details[0] + result + + print json.dumps(result) + sys.exit(1) + + +# Print out help +#------------------------------ +else: + parser.print_help() + sys.exit(1) From 61ae3c732ff024a9102d5f423eb7fa0c69ae1c46 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 26 Oct 2014 10:41:58 -0700 Subject: [PATCH 0015/3617] Add required_if to AnsibleModule There is a common pattern in modules where some parameters are required only if another parameter is present AND set to a particular value. For instance, if a cloud server state is "present" it's important to indicate the image to be used, but if it's "absent", the image that was used to launch it is not necessary. Provide a check that takes as an input a list of 3-element tuples containing parameter to depend on, the value it should be set to, and a list of parameters which are required if the required parameter is set to the required value. --- lib/ansible/module_utils/basic.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8a4548dc169771..779d8f4cde8cf1 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -247,7 +247,8 @@ class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True, mutually_exclusive=None, required_together=None, - required_one_of=None, add_file_common_args=False, supports_check_mode=False): + required_one_of=None, add_file_common_args=False, supports_check_mode=False, + required_if=None): ''' common code for quickly building an ansible module in Python @@ -295,6 +296,7 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self._check_argument_types() self._check_required_together(required_together) self._check_required_one_of(required_one_of) + self._check_required_if(required_if) self._set_defaults(pre=False) if not self.no_log: @@ -852,6 +854,20 @@ def _check_required_arguments(self): if len(missing) > 0: self.fail_json(msg="missing required arguments: %s" % ",".join(missing)) + def _check_required_if(self, spec): + ''' ensure that parameters which conditionally required are present ''' + if spec is None: + return + for (key, val, requirements) in spec: + missing = [] + if key in self.params and self.params[key] == val: + for check in requirements: + count = self._count_terms(check) + if count == 0: + missing.append(check) + if len(missing) > 0: + self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)) + def _check_argument_values(self): ''' ensure all arguments have the requested values, and there are no stray arguments ''' for (k,v) in self.argument_spec.iteritems(): From e5f651c458b5b3326f0ef371f2c8fc0d0beab6b5 Mon Sep 17 00:00:00 2001 From: Bryan Hunt Date: Tue, 28 Oct 2014 20:19:15 +0000 Subject: [PATCH 0016/3617] export ANSIBLE_HOME so it can be used in scripts In order that scripts like this can work ``` #!/bin/bash ansible -vvvv tag_instance_type_foo-training -i "${ANSIBLE_HOME}/plugins/inventory/ec2.py" --private-key=~/Downloads/foo-training.pem -u ec2-user -m ping ``` --- hacking/env-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/env-setup b/hacking/env-setup index 4fed1690976753..e0de78fc75d94a 100755 --- a/hacking/env-setup +++ b/hacking/env-setup @@ -13,7 +13,7 @@ fi # The below is an alternative to readlink -fn which doesn't exist on OS X # Source: http://stackoverflow.com/a/1678636 FULL_PATH=`python -c "import os; print(os.path.realpath('$HACKING_DIR'))"` -ANSIBLE_HOME=`dirname "$FULL_PATH"` +export ANSIBLE_HOME=`dirname "$FULL_PATH"` PREFIX_PYTHONPATH="$ANSIBLE_HOME/lib" PREFIX_PATH="$ANSIBLE_HOME/bin" From 3b7280b364b14e5fd6a7d1bec5fbaabd1fd23640 Mon Sep 17 00:00:00 2001 From: ktosiek Date: Sun, 9 Nov 2014 22:40:29 +0100 Subject: [PATCH 0017/3617] guide_rax.rst: fix add_host invocations change `groupname` to `groups`, as per add_host documentation --- docsite/rst/guide_rax.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst index d00a090fa3948e..28321ce7fa559a 100644 --- a/docsite/rst/guide_rax.rst +++ b/docsite/rst/guide_rax.rst @@ -131,7 +131,7 @@ The rax module returns data about the nodes it creates, like IP addresses, hostn hostname: "{{ item.name }}" ansible_ssh_host: "{{ item.rax_accessipv4 }}" ansible_ssh_pass: "{{ item.rax_adminpass }}" - groupname: raxhosts + groups: raxhosts with_items: rax.success when: rax.action == 'create' @@ -519,7 +519,7 @@ Build a complete webserver environment with servers, custom networks and load ba ansible_ssh_host: "{{ item.rax_accessipv4 }}" ansible_ssh_pass: "{{ item.rax_adminpass }}" ansible_ssh_user: root - groupname: web + groups: web with_items: rax.success when: rax.action == 'create' From a1adff4ff00091741cd95301d66a33cac161ea9d Mon Sep 17 00:00:00 2001 From: Baptiste Mathus Date: Wed, 26 Nov 2014 10:35:45 +0100 Subject: [PATCH 0018/3617] Setting LC_MESSAGES: prevent unparseable messages This locale variable defines how tools should display their messages. This is for example gonna change the yum message from "Nothing to do" to "Rien a faire" in my case (french). As the yum module parses that string in err, if the message is not enforced in english this is gonna fail. So this commits just enriches a bit more the code that's already written for that enforcement. This commit fixes issue #9635. --- lib/ansible/module_utils/basic.py | 1 + lib/ansible/runner/shell_plugins/sh.py | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index cee6510f34c0ac..761725cea0914e 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -772,6 +772,7 @@ def _check_locale(self): locale.setlocale(locale.LC_ALL, 'C') os.environ['LANG'] = 'C' os.environ['LC_CTYPE'] = 'C' + os.environ['LC_MESSAGES'] = 'C' except Exception, e: self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e) diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/runner/shell_plugins/sh.py index 95d48e9e7de168..27512b2c59c8a2 100644 --- a/lib/ansible/runner/shell_plugins/sh.py +++ b/lib/ansible/runner/shell_plugins/sh.py @@ -29,6 +29,7 @@ def env_prefix(self, **kwargs): env = dict( LANG = C.DEFAULT_MODULE_LANG, LC_CTYPE = C.DEFAULT_MODULE_LANG, + LC_MESSAGES = C.DEFAULT_MODULE_LANG, ) env.update(kwargs) return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()]) From 4ecaa78c79bd919c7d3c6107025ebff0fc8ef123 Mon Sep 17 00:00:00 2001 From: Andrew Rothstein Date: Fri, 28 Nov 2014 00:00:35 -0500 Subject: [PATCH 0019/3617] incorporated code review feedback --- plugins/inventory/fleet.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/inventory/fleet.py b/plugins/inventory/fleet.py index d6d7e4d2925bfe..3267aeb2ea5384 100755 --- a/plugins/inventory/fleet.py +++ b/plugins/inventory/fleet.py @@ -39,7 +39,7 @@ parser = OptionParser(usage="%prog [options] --list | --host ") parser.add_option('--list', default=False, dest="list", action="store_true", - help="Produce a JSON consumable grouping of Vagrant servers for Ansible") + help="Produce a JSON consumable grouping of servers in your fleet") parser.add_option('--host', default=None, dest="host", help="Generate additional host specific details for given host for Ansible") (options, args) = parser.parse_args() @@ -48,9 +48,9 @@ # helper functions # -def get_ssh_config() : +def get_ssh_config(): configs = [] - for box in list_running_boxes() : + for box in list_running_boxes(): config = get_a_ssh_config(box) configs.append(config) return configs @@ -58,14 +58,14 @@ def get_ssh_config() : #list all the running instances in the fleet def list_running_boxes(): boxes = [] - for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n') : + for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): matcher = re.search("[^\s]+[\s]+([^\s]+).+", line) if matcher and matcher.group(1) != "IP": boxes.append(matcher.group(1)) return boxes -def get_a_ssh_config(box_name) : +def get_a_ssh_config(box_name): config = {} config['Host'] = box_name config['ansible_ssh_user'] = 'core' @@ -78,7 +78,7 @@ def get_a_ssh_config(box_name) : ssh_config = get_ssh_config() hosts = { 'coreos': []} - for data in ssh_config : + for data in ssh_config: hosts['coreos'].append(data['Host']) print json.dumps(hosts) From 8146d1fff3a31cf8e801770d49ee1c24b7728806 Mon Sep 17 00:00:00 2001 From: Justin Wyer Date: Mon, 1 Dec 2014 17:17:54 +0200 Subject: [PATCH 0020/3617] /sys/block/sdX/queue/physical_block_size does not correlate with /sys/block/sdX/size for advanced drives larger than 2TB, /sys/block/sdX/queue/logical_block_size correlates with both see #9549 --- lib/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 5ceeb405d5503c..57476586aef9d0 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -791,7 +791,7 @@ def get_device_facts(self): part['start'] = get_file_content(part_sysdir + "/start",0) part['sectors'] = get_file_content(part_sysdir + "/size",0) - part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size") + part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size") if not part['sectorsize']: part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512) part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize']))) @@ -808,7 +808,7 @@ def get_device_facts(self): d['sectors'] = get_file_content(sysdir + "/size") if not d['sectors']: d['sectors'] = 0 - d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size") + d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size") if not d['sectorsize']: d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512) d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize'])) From 19d40cc54ce65b346901e4f040ec9007a57b3fb7 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Wed, 10 Dec 2014 11:26:21 -0500 Subject: [PATCH 0021/3617] Add tags for inventory --- plugins/inventory/apache-libcloud.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/plugins/inventory/apache-libcloud.py b/plugins/inventory/apache-libcloud.py index 95804095da90d0..151daeefe08973 100755 --- a/plugins/inventory/apache-libcloud.py +++ b/plugins/inventory/apache-libcloud.py @@ -222,12 +222,17 @@ def add_node(self, node): self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) ''' # Inventory: Group by key pair - if node.extra['keyname']: - self.push(self.inventory, self.to_safe('key_' + node.extra['keyname']), dest) + if node.extra['key_name']: + self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) # Inventory: Group by security group, quick thing to handle single sg - if node.extra['securitygroup']: - self.push(self.inventory, self.to_safe('sg_' + node.extra['securitygroup'][0]), dest) + if node.extra['security_group']: + self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) + + # Inventory: Group by tag + if node.extra['tags']: + for tagkey in node.extra['tags'].keys(): + self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) def get_host_info(self): ''' From fce04b1eba5343f0b23c50af24404a2826591345 Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Sun, 14 Dec 2014 22:39:17 +0000 Subject: [PATCH 0022/3617] Use command= when we intended to While here sort register variables and add a comment to signal multiline testing. --- .../roles/test_command_shell/tasks/main.yml | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml index b331452b7c63a3..877eb11cd6d171 100644 --- a/test/integration/roles/test_command_shell/tasks/main.yml +++ b/test/integration/roles/test_command_shell/tasks/main.yml @@ -82,7 +82,7 @@ file: path={{output_dir_test}}/afile.txt state=absent - name: create afile.txt with create_afile.sh via command - shell: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.txt" + command: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.txt" - name: verify that afile.txt is present file: path={{output_dir_test}}/afile.txt state=file @@ -90,7 +90,7 @@ # removes - name: remove afile.txt with remote_afile.sh via command - shell: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt" + command: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt" - name: verify that afile.txt is absent file: path={{output_dir_test}}/afile.txt state=absent @@ -161,21 +161,23 @@ - name: remove afile.txt using rm shell: rm {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt - register: shell_result4 + register: shell_result3 - name: assert that using rm under shell causes a warning assert: that: - - "shell_result4.warnings" + - "shell_result3.warnings" - name: verify that afile.txt is absent file: path={{output_dir_test}}/afile.txt state=absent - register: shell_result5 + register: shell_result4 - name: assert that the file was removed by the shell assert: that: - - "shell_result5.changed == False" + - "shell_result4.changed == False" + +# multiline - name: execute a shell command using a literal multiline block args: @@ -189,28 +191,28 @@ | tr -s ' ' \ | cut -f1 -d ' ' echo "this is a second line" - register: shell_result6 + register: shell_result5 -- debug: var=shell_result6 +- debug: var=shell_result5 - name: assert the multiline shell command ran as expected assert: that: - - "shell_result6.changed" - - "shell_result6.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'" + - "shell_result5.changed" + - "shell_result5.stdout == '5575bb6b71c9558db0b6fbbf2f19909eeb4e3b98\nthis is a second line'" - name: execute a shell command using a literal multiline block with arguments in it shell: | executable=/bin/bash creates={{output_dir_test | expanduser}}/afile.txt echo "test" - register: shell_result7 + register: shell_result6 - name: assert the multiline shell command with arguments in it run as expected assert: that: - - "shell_result7.changed" - - "shell_result7.stdout == 'test'" + - "shell_result6.changed" + - "shell_result6.stdout == 'test'" - name: remove the previously created file file: path={{output_dir_test}}/afile.txt state=absent From 91a73cff81476873d73f112406a1c6dae6793c6f Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Sun, 14 Dec 2014 22:40:04 +0000 Subject: [PATCH 0023/3617] Add tests for globbing support --- .../roles/test_command_shell/tasks/main.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml index 877eb11cd6d171..325e76cffea4e1 100644 --- a/test/integration/roles/test_command_shell/tasks/main.yml +++ b/test/integration/roles/test_command_shell/tasks/main.yml @@ -87,6 +87,15 @@ - name: verify that afile.txt is present file: path={{output_dir_test}}/afile.txt state=file +- name: re-run previous command using creates with globbing + command: "{{output_dir_test | expanduser}}/create_afile.sh {{output_dir_test | expanduser}}/afile.txt creates={{output_dir_test | expanduser}}/afile.*" + register: command_result3 + +- name: assert that creates with globbing is working + assert: + that: + - "command_result3.changed != True" + # removes - name: remove afile.txt with remote_afile.sh via command @@ -94,12 +103,15 @@ - name: verify that afile.txt is absent file: path={{output_dir_test}}/afile.txt state=absent - register: command_result3 -- name: assert that the file was removed by the script +- name: re-run previous command using removes with globbing + command: "{{output_dir_test | expanduser}}/remove_afile.sh {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.*" + register: command_result4 + +- name: assert that removes with globbing is working assert: that: - - "command_result3.changed != True" + - "command_result4.changed != True" ## ## shell From 9639f1d8e7b4a756b7343cebd37b015b67a2418f Mon Sep 17 00:00:00 2001 From: axos88 Date: Thu, 18 Dec 2014 12:52:15 +0100 Subject: [PATCH 0024/3617] Make issue rypes as an enumeration Easier to copy&paste, and delete all except the correct line. --- ISSUE_TEMPLATE.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 8ce40348ca1e8a..511760de2658ed 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,6 +1,13 @@ ##### Issue Type: -Can you help us out in labelling this by telling us what kind of ticket this this? You can say “Bug Report”, “Feature Idea”, “Feature Pull Request”, “New Module Pull Request”, “Bugfix Pull Request”, “Documentation Report”, or “Docs Pull Request”. +Can you help us out in labelling this by telling us what kind of ticket this this? You can say: + - Bug Report + - Feature Idea + - Feature Pull Request + - New Module Pull Request + - Bugfix Pull Request + - Documentation Report + - Docs Pull Request ##### Ansible Version: From 17498b58bb85b18368ede4372093297de740eab6 Mon Sep 17 00:00:00 2001 From: Mick Bass Date: Thu, 25 Dec 2014 13:31:34 -0700 Subject: [PATCH 0025/3617] Add support for AWS Security Token Service (temporary credentials) to all AWS cloud modules. --- lib/ansible/module_utils/ec2.py | 28 ++++++++------- .../utils/module_docs_fragments/aws.py | 34 ++++++++++--------- 2 files changed, 33 insertions(+), 29 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 0f08fead18021a..c7bad2970b6522 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -54,7 +54,7 @@ def aws_common_argument_spec(): aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), validate_certs=dict(default=True, type='bool'), - security_token=dict(no_log=True), + security_token=dict(aliases=['access_token'], no_log=True), profile=dict(), ) @@ -87,38 +87,38 @@ def get_aws_connection_info(module): validate_certs = module.params.get('validate_certs') if not ec2_url: - if 'EC2_URL' in os.environ: - ec2_url = os.environ['EC2_URL'] - elif 'AWS_URL' in os.environ: + if 'AWS_URL' in os.environ: ec2_url = os.environ['AWS_URL'] + elif 'EC2_URL' in os.environ: + ec2_url = os.environ['EC2_URL'] if not access_key: - if 'EC2_ACCESS_KEY' in os.environ: - access_key = os.environ['EC2_ACCESS_KEY'] - elif 'AWS_ACCESS_KEY_ID' in os.environ: + if 'AWS_ACCESS_KEY_ID' in os.environ: access_key = os.environ['AWS_ACCESS_KEY_ID'] elif 'AWS_ACCESS_KEY' in os.environ: access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + access_key = os.environ['EC2_ACCESS_KEY'] else: # in case access_key came in as empty string access_key = None if not secret_key: - if 'EC2_SECRET_KEY' in os.environ: - secret_key = os.environ['EC2_SECRET_KEY'] - elif 'AWS_SECRET_ACCESS_KEY' in os.environ: + if 'AWS_SECRET_ACCESS_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] elif 'AWS_SECRET_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + secret_key = os.environ['EC2_SECRET_KEY'] else: # in case secret_key came in as empty string secret_key = None if not region: - if 'EC2_REGION' in os.environ: - region = os.environ['EC2_REGION'] - elif 'AWS_REGION' in os.environ: + if 'AWS_REGION' in os.environ: region = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + region = os.environ['EC2_REGION'] else: # boto.config.get returns None if config not found region = boto.config.get('Boto', 'aws_region') @@ -128,6 +128,8 @@ def get_aws_connection_info(module): if not security_token: if 'AWS_SECURITY_TOKEN' in os.environ: security_token = os.environ['AWS_SECURITY_TOKEN'] + elif 'EC2_SECURITY_TOKEN' in os.environ: + security_token = os.environ['EC2_SECURITY_TOKEN'] else: # in case security_token came in as empty string security_token = None diff --git a/lib/ansible/utils/module_docs_fragments/aws.py b/lib/ansible/utils/module_docs_fragments/aws.py index 9bbe84a1355768..981eb8e105038b 100644 --- a/lib/ansible/utils/module_docs_fragments/aws.py +++ b/lib/ansible/utils/module_docs_fragments/aws.py @@ -23,22 +23,29 @@ class ModuleDocFragment(object): options: ec2_url: description: - - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used + - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used. required: false default: null aliases: [] aws_secret_key: description: - - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + - AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used. required: false default: null aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: - - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + - AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used. required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] + security_token: + description: + - AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used. + required: false + default: null + aliases: [ 'access_token' ] + version_added: "1.6" validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. @@ -54,23 +61,18 @@ class ModuleDocFragment(object): default: null aliases: [] version_added: "1.6" - security_token: - description: - - security token to authenticate against AWS - required: false - default: null - aliases: [] - version_added: "1.6" requirements: - boto notes: - - The following environment variables can be used C(AWS_ACCESS_KEY) or - C(EC2_ACCESS_KEY) or C(AWS_ACCESS_KEY_ID), - C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY) or C(AWS_SECRET_ACCESS_KEY), - C(AWS_REGION) or C(EC2_REGION), C(AWS_SECURITY_TOKEN) + - If parameters are not set within the module, the following + environment variables can be used in decreasing order of precedence + C(AWS_URL) or C(EC2_URL), + C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY), + C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY), + C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN), + C(AWS_REGION) or C(EC2_REGION) - Ansible uses the boto configuration file (typically ~/.boto) if no credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html - C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the - AWS region, when required, but - this can also be configured in the boto config file + AWS region, when required, but this can also be configured in the boto config file """ From 64141dd78987d19b5b72330c0c456d76e31d609f Mon Sep 17 00:00:00 2001 From: John Barker Date: Wed, 31 Dec 2014 22:06:15 +0000 Subject: [PATCH 0026/3617] Correct URL to github so links work when testing locally --- docsite/rst/community.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index 4d2de28ce16d14..c4c9f52b2edf98 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -66,7 +66,7 @@ Bugs related to the core language should be reported to `github.com/ansible/ansi signing up for a free github account. Before reporting a bug, please use the bug/issue search to see if the issue has already been reported. -MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. +MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against. From 54f1eebde855d5ee14b97d0cd91ed1b3b54fe49a Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 1 Jan 2015 14:13:59 +0000 Subject: [PATCH 0027/3617] Strip formatting from lists of modules --- hacking/module_formatter.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 0a7d1c884ca200..26e403e8659394 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -88,6 +88,24 @@ def html_ify(text): return t +##################################################################################### + +def strip_formatting(text): + ''' Strips formatting + In lists of modules, etc, we don't want certain words to be formatted + Also due to a bug in RST, you can not easily nest formatting + #http://docutils.sourceforge.net/FAQ.html#is-nested-inline-markup-possible + ''' + + t = cgi.escape(text) + t = _ITALIC.sub(r"\1", t) + t = _BOLD.sub(r"\1", t) + t = _MODULE.sub(r"\1", t) + t = _URL.sub(r"\1", t) + t = _CONST.sub(r"\1", t) + + return t + ##################################################################################### @@ -310,7 +328,8 @@ def print_modules(module, category_file, deprecated, core, options, env, templat result = process_module(modname, options, env, template, outputname, module_map, aliases) if result != "SKIPPED": - category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) + # Some of the module descriptions have formatting in them, this is noisy in lists, so remove it + category_file.write(" %s - %s <%s_module>\n" % (modstring, strip_formatting(result), module)) def process_category(category, categories, options, env, template, outputname): From dc6e8bff34e1305a79febca44722c4345512d6ad Mon Sep 17 00:00:00 2001 From: John Barker Date: Sat, 3 Jan 2015 11:42:44 +0000 Subject: [PATCH 0028/3617] Fix some mistakes in CHANELOG.md --- CHANGELOG.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a989cdcd4465bf..70e1c8dc9b0b27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -97,7 +97,7 @@ And various other bug fixes and improvements ... - Fixes a bug in vault where the password file option was not being used correctly internally. - Improved multi-line parsing when using YAML literal blocks (using > or |). - Fixed a bug with the file module and the creation of relative symlinks. -- Fixed a bug where checkmode was not being honored during the templating of files. +- Fixed a bug where checkmode was not being honoured during the templating of files. - Other various bug fixes. ## 1.7.1 "Summer Nights" - Aug 14, 2014 @@ -140,7 +140,7 @@ New Modules: Other notable changes: * Security fixes - - Prevent the use of lookups when using legaxy "{{ }}" syntax around variables and with_* loops. + - Prevent the use of lookups when using legacy "{{ }}" syntax around variables and with_* loops. - Remove relative paths in TAR-archived file names used by ansible-galaxy. * Inventory speed improvements for very large inventories. * Vault password files can now be executable, to support scripts that fetch the vault password. @@ -319,7 +319,7 @@ Major features/changes: * ec2 module now accepts 'exact_count' and 'count_tag' as a way to enforce a running number of nodes by tags. * all ec2 modules that work with Eucalyptus also now support a 'validate_certs' option, which can be set to 'off' for installations using self-signed certs. * Start of new integration test infrastructure (WIP, more details TBD) -* if repoquery is unavailble, the yum module will automatically attempt to install yum-utils +* if repoquery is unavailable, the yum module will automatically attempt to install yum-utils * ansible-vault: a framework for encrypting your playbooks and variable files * added support for privilege escalation via 'su' into bin/ansible and bin/ansible-playbook and associated keywords 'su', 'su_user', 'su_pass' for tasks/plays @@ -782,7 +782,7 @@ Bugfixes and Misc Changes: * misc fixes to the Riak module * make template module slightly more efficient * base64encode / decode filters are now available to templates -* libvirt module can now work with multiple different libvirt connecton URIs +* libvirt module can now work with multiple different libvirt connection URIs * fix for postgresql password escaping * unicode fix for shlex.split in some cases * apt module upgrade logic improved @@ -817,7 +817,7 @@ the variable is still registered for the host, with the attribute skipped: True. * service pattern argument now correctly read for BSD services * fetch location can now be controlled more directly via the 'flat' parameter. * added basename and dirname as Jinja2 filters available to all templates -* pip works better when sudoing from unpriveledged users +* pip works better when sudoing from unprivileged users * fix for user creation with groups specification reporting 'changed' incorrectly in some cases * fix for some unicode encoding errors in outputing some data in verbose mode * improved FreeBSD, NetBSD and Solaris facts From 64e61197f970f1602243f84cbfe9da2761b46a7c Mon Sep 17 00:00:00 2001 From: John Barker Date: Mon, 5 Jan 2015 20:57:05 +0000 Subject: [PATCH 0029/3617] Revert accidental changes --- hacking/module_formatter.py | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 26e403e8659394..0a7d1c884ca200 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -88,24 +88,6 @@ def html_ify(text): return t -##################################################################################### - -def strip_formatting(text): - ''' Strips formatting - In lists of modules, etc, we don't want certain words to be formatted - Also due to a bug in RST, you can not easily nest formatting - #http://docutils.sourceforge.net/FAQ.html#is-nested-inline-markup-possible - ''' - - t = cgi.escape(text) - t = _ITALIC.sub(r"\1", t) - t = _BOLD.sub(r"\1", t) - t = _MODULE.sub(r"\1", t) - t = _URL.sub(r"\1", t) - t = _CONST.sub(r"\1", t) - - return t - ##################################################################################### @@ -328,8 +310,7 @@ def print_modules(module, category_file, deprecated, core, options, env, templat result = process_module(modname, options, env, template, outputname, module_map, aliases) if result != "SKIPPED": - # Some of the module descriptions have formatting in them, this is noisy in lists, so remove it - category_file.write(" %s - %s <%s_module>\n" % (modstring, strip_formatting(result), module)) + category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) def process_category(category, categories, options, env, template, outputname): From e213fdb15dfc6964705c0b5d1567cd0872a26497 Mon Sep 17 00:00:00 2001 From: volanja Date: Fri, 9 Jan 2015 01:24:41 +0900 Subject: [PATCH 0030/3617] to replace `running` with `started` --- docsite/rst/test_strategies.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/test_strategies.rst b/docsite/rst/test_strategies.rst index a3abf160906bef..be1b80550d8c78 100644 --- a/docsite/rst/test_strategies.rst +++ b/docsite/rst/test_strategies.rst @@ -19,16 +19,16 @@ also very easy to run the steps on the localhost or testing servers. Ansible let The Right Level of Testing `````````````````````````` -Ansible resources are models of desired-state. As such, it should not be necessary to test that services are running, packages are +Ansible resources are models of desired-state. As such, it should not be necessary to test that services are started, packages are installed, or other such things. Ansible is the system that will ensure these things are declaratively true. Instead, assert these things in your playbooks. .. code-block:: yaml tasks: - - service: name=foo state=running enabled=yes + - service: name=foo state=started enabled=yes -If you think the service may not be running, the best thing to do is request it to be running. If the service fails to start, Ansible +If you think the service may not be started, the best thing to do is request it to be started. If the service fails to start, Ansible will yell appropriately. (This should not be confused with whether the service is doing something functional, which we'll show more about how to do later). From e2ce673b1ab4d607e327ad87e6d67620699b94ef Mon Sep 17 00:00:00 2001 From: James Martin Date: Tue, 27 Jan 2015 12:46:22 -0500 Subject: [PATCH 0031/3617] Properly empties ASG before terminating it, and waits for ASG to be deleted. Updated to support wait_for_instances and replace_all_instances. --- test/integration/cleanup_ec2.py | 34 +++- .../roles/test_ec2_asg/tasks/main.yml | 190 +++++++++++++++++- 2 files changed, 215 insertions(+), 9 deletions(-) diff --git a/test/integration/cleanup_ec2.py b/test/integration/cleanup_ec2.py index e4241b0d7dc35d..1935f0bdc18e5e 100644 --- a/test/integration/cleanup_ec2.py +++ b/test/integration/cleanup_ec2.py @@ -12,6 +12,7 @@ import yaml import os.path import boto.ec2.elb +import time def delete_aws_resources(get_func, attr, opts): for item in get_func(): @@ -19,6 +20,37 @@ def delete_aws_resources(get_func, attr, opts): if re.search(opts.match_re, val): prompt_and_delete(item, "Delete matching %s? [y/n]: " % (item,), opts.assumeyes) +def delete_autoscaling_group(get_func, attr, opts): + assumeyes = opts.assumeyes + group_name = None + for item in get_func(): + group_name = getattr(item, attr) + if re.search(opts.match_re, group_name): + if not opts.assumeyes: + assumeyes = raw_input("Delete matching %s? [y/n]: " % (item).lower()) == 'y' + break + if assumeyes and group_name: + groups = asg.get_all_groups(names=[group_name]) + if groups: + group = groups[0] + group.max_size = 0 + group.min_size = 0 + group.desired_capacity = 0 + group.update() + instances = True + while instances: + tmp_groups = asg.get_all_groups(names=[group_name]) + if tmp_groups: + tmp_group = tmp_groups[0] + if not tmp_group.instances: + instances = False + time.sleep(10) + + group.delete() + while len(asg.get_all_groups(names=[group_name])): + time.sleep(5) + print ("Terminated ASG: %s" % group_name) + def delete_aws_eips(get_func, attr, opts): # the file might not be there if the integration test wasn't run @@ -128,7 +160,7 @@ def parse_args(): delete_aws_resources(aws.get_all_security_groups, 'name', opts) # Delete matching ASGs - delete_aws_resources(asg.get_all_groups, 'name', opts) + delete_autoscaling_group(asg.get_all_groups, 'name', opts) # Delete matching launch configs delete_aws_resources(asg.get_all_launch_configurations, 'name', opts) diff --git a/test/integration/roles/test_ec2_asg/tasks/main.yml b/test/integration/roles/test_ec2_asg/tasks/main.yml index 6c670375d9418f..091eb2ab2b3b40 100644 --- a/test/integration/roles/test_ec2_asg/tasks/main.yml +++ b/test/integration/roles/test_ec2_asg/tasks/main.yml @@ -1,31 +1,69 @@ --- # tasks file for test_ec2_asg +# we are using a custom built AMI that runs an apache server to verify +# ELB health checks and perform rolling ASG updates +# this will only work on us-east-1 + # ============================================================ # create and kill an ASG -- name: lookup ami id - ec2_ami_search: distro=ubuntu region={{ ec2_region }} release=trusty - register: ubuntu_image - name: ensure launch config exists ec2_lc: name: "{{ resource_prefix }}-lc" ec2_access_key: "{{ ec2_access_key }}" ec2_secret_key: "{{ ec2_secret_key }}" region: "{{ ec2_region }}" - image_id: "{{ ubuntu_image.ami }}" - instance_type: t1.micro -- name: launch asg + image_id: ami-964a0efe + instance_type: t2.micro + +- name: launch asg and wait for instances to be deemed healthy (no ELB) + ec2_asg: + name: "{{ resource_prefix }}-asg" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc" + desired_capacity: 1 + min_size: 1 + max_size: 1 + region: "{{ ec2_region }}" + state: present + wait_for_instances: yes + register: output + +- assert: + that: + - "output.viable_instances == 1" + +# - name: pause for a bit to make sure that the group can't be trivially deleted +# pause: seconds=30 +- name: kill asg + ec2_asg: + name: "{{ resource_prefix }}-asg" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + region: "{{ ec2_region }}" + state: absent + async: 300 + + +- name: launch asg and do not wait for instances to be deemed healthy (no ELB) ec2_asg: name: "{{ resource_prefix }}-asg" ec2_access_key: "{{ ec2_access_key }}" ec2_secret_key: "{{ ec2_secret_key }}" launch_config_name: "{{ resource_prefix }}-lc" + desired_capacity: 1 min_size: 1 max_size: 1 region: "{{ ec2_region }}" + wait_for_instances: no state: present -- name: pause for a bit to make sure that the group can't be trivially deleted - pause: seconds=30 + register: output + +- assert: + that: + - "output.viable_instances == 0" + - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" @@ -34,3 +72,139 @@ region: "{{ ec2_region }}" state: absent async: 300 + +- name: launch load balancer + ec2_elb_lb: + name: "{{ resource_prefix }}-lb" + region: "{{ ec2_region }}" + state: present + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + connection_draining_timeout: 60 + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: http + ping_port: 80 + ping_path: "/" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 3 + healthy_threshold: 3 + register: load_balancer + + +- name: launch asg and wait for instances to be deemed healthy (ELB) + ec2_asg: + name: "{{ resource_prefix }}-asg" + availability_zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: ELB + desired_capacity: 1 + min_size: 1 + max_size: 1 + health_check_period: 120 + load_balancers: "{{ resource_prefix }}-lb" + region: "{{ ec2_region }}" + wait_for_instances: yes + wait_timeout: 600 + state: present + register: output + +- assert: + that: + - "output.viable_instances == 1" + + +# grow scaling group to 3 + +- name: add 2 more instances wait for instances to be deemed healthy (ELB) + ec2_asg: + name: "{{ resource_prefix }}-asg" + availability_zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: ELB + desired_capacity: 3 + min_size: 3 + max_size: 5 + health_check_period: 120 + load_balancers: ec2-asg-int-test + region: "{{ ec2_region }}" + wait_for_instances: yes + wait_timeout: 600 + state: present + register: output + +- assert: + that: + - "output.viable_instances == 3" + +# # create new launch config with alternate AMI + +- name: ensure launch config exists + ec2_lc: + name: "{{ resource_prefix }}-lc-2" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + region: "{{ ec2_region }}" + image_id: ami-2a4a0e42 + instance_type: t2.micro + + +# # perform rolling replace + +- name: perform rolling update to new AMI + ec2_asg: + name: "{{ resource_prefix }}-asg" + availability_zones: + - "{{ ec2_region }}b" + - "{{ ec2_region }}c" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + launch_config_name: "{{ resource_prefix }}-lc-2" + health_check_type: ELB + desired_capacity: 3 + min_size: 3 + max_size: 5 + health_check_period: 120 + load_balancers: ec2-asg-int-test + region: "{{ ec2_region }}" + wait_for_instances: yes + replace_all_instances: yes + wait_timeout: 600 + state: present + register: output + +# ensure that all instances have new launch config +- assert: + that: + - "item.value.launch_config_name == '{{ resource_prefix }}-lc-2'" + with_dict: output.instance_facts + +# assert they are all healthy +- assert: + that: + - "output.viable_instances >= 3" + + +- name: kill asg + ec2_asg: + name: "{{ resource_prefix }}-asg" + ec2_access_key: "{{ ec2_access_key }}" + ec2_secret_key: "{{ ec2_secret_key }}" + region: "{{ ec2_region }}" + state: absent + async: 300 \ No newline at end of file From 440df12f2ca970e91442b23bf80fced806aecb32 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 30 Jan 2015 15:25:04 -0500 Subject: [PATCH 0032/3617] added retry configs to v2, pending is actual functionality --- v2/ansible/constants.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 1c2bc092b23cbc..4b51f1f1b1d1d2 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -167,6 +167,8 @@ def shell_expand_path(path): DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) +RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) +RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) From 1e787bd91e663d6fa291290dc83482ee3133429a Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Wed, 4 Feb 2015 09:49:51 -0600 Subject: [PATCH 0033/3617] Add intro to playbook docs using YAML dictionaries --- docsite/rst/playbooks_intro.rst | 37 +++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index ecf8d46de1eae8..4adb5d53a6878b 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -73,6 +73,43 @@ For starters, here's a playbook that contains just one play:: - name: restart apache service: name=httpd state=restarted +We can also break task items out over multiple lines using the YAML dictionary +types to supply module arguments. This can be helpful when working with tasks +that have really long parameters or modules that take many parameters to keep +them well structured. Below is another version of the above example but using +YAML dictionaries to supply the modules with their key=value arguments.:: + + --- + - hosts: webservers + vars: + http_port: 80 + max_clients: 200 + remote_user: root + tasks: + - name: ensure apache is at the latest version + yum: + pkg: httpd + state: latest + - name: write the apache config file + template: + src: /srv/httpd.j2 + dest: /etc/httpd.conf + notify: + - restart apache + - name: ensure apache is running + service: + name: httpd + state: started + handlers: + - name: restart apache + service: + name: httpd + state: restarted + +.. note:: + + The above example using YAML dictionaries for module arguments can also be accomplished using the YAML multiline string syntax with the `>` character but this can lead to string quoting errors. + Below, we'll break down what the various features of the playbook language are. .. _playbook_basics: From 4c661e2b93ad9a7b51de196287b9da7c6b7467d6 Mon Sep 17 00:00:00 2001 From: pdelared Date: Tue, 10 Feb 2015 17:33:29 +0100 Subject: [PATCH 0034/3617] Update facts.py Added support for HPUX network fact --- lib/ansible/module_utils/facts.py | 51 +++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 6d602af7366eca..323c0c0d0591db 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2048,6 +2048,57 @@ def merge_default_interface(self, defaults, interfaces, ip_type): for item in ifinfo[ip_type][0].keys(): defaults[item] = ifinfo[ip_type][0][item] +class HPUX(Network): + """ + HP-UX-specifig subclass of Network. Defines networking facts: + - default_interface + - interfaces (a list of interface names) + - interface_ dictionary of ipv4 address information. + """ + platform = 'HP-UX' + + def __init__(self, module): + Network.__init__(self, module) + + def populate(self): + netstat_path = self.module.get_bin_path('netstat') + if netstat_path is None: + return self.facts + self.get_default_interfaces() + interfaces = self.get_interfaces_info() + self.facts['interfaces'] = interfaces.keys() + for iface in interfaces: + self.facts[iface] = interfaces[iface] + return self.facts + + def get_default_interfaces(self): + rc, out, err = module.run_command("/usr/bin/netstat -nr", use_unsafe_shell=True) + lines = out.split('\n') + for line in lines: + words = line.split() + if len(words) > 1: + if words[0] == 'default': + self.facts['default_interface'] = words[4] + self.facts['default_gateway'] = words[1] + + def get_interfaces_info(self): + interfaces = {} + rc, out, err = module.run_command("/usr/bin/netstat -ni", use_unsafe_shell=True) + lines = out.split('\n') + for line in lines: + words = line.split() + for i in range(len(words) - 1): + if words[i][:3] == 'lan': + device = words[i] + interfaces[device] = { 'device': device } + address = words[i+3] + interfaces[device]['ipv4'] = { 'address': address } + network = words[i+2] + interfaces[device]['ipv4'] = { 'network': network, + 'interface': device, + 'address': address } + return interfaces + class DarwinNetwork(GenericBsdIfconfigNetwork, Network): """ This is the Mac OS X/Darwin Network Class. From 4e4bdaad8d500c1c8168a8606e7284a65685367a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 13 Feb 2015 10:40:50 -0500 Subject: [PATCH 0035/3617] Remove auth_token parameter It turns out that this can actually already be handled by the existing auth plugin framework and does not need its own parameter. Remove before it sees usage and causes confusion. --- lib/ansible/module_utils/openstack.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 5c4503f94cecba..90415cadabbdb6 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -75,7 +75,6 @@ def openstack_full_argument_spec(**kwargs): cloud=dict(default=None), auth_plugin=dict(default=None), auth=dict(default=None), - auth_token=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), state=dict(default='present', choices=['absent', 'present']), @@ -94,10 +93,6 @@ def openstack_module_kwargs(**kwargs): required_one_of=[ ['cloud', 'auth'], ], - mutually_exclusive=[ - ['auth', 'auth_token'], - ['auth_plugin', 'auth_token'], - ], ) for key in ('mutually_exclusive', 'required_together', 'required_one_of'): if key in kwargs: From d06a277b50503e8d142d12ec356a6e0383d22cd7 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 13 Feb 2015 10:41:58 -0500 Subject: [PATCH 0036/3617] Port openstack module_utils changes to v2 branch --- v2/ansible/module_utils/openstack.py | 35 ++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py index 64f95437143527..90415cadabbdb6 100644 --- a/v2/ansible/module_utils/openstack.py +++ b/v2/ansible/module_utils/openstack.py @@ -30,6 +30,9 @@ def openstack_argument_spec(): + # DEPRECATED: This argument spec is only used for the deprecated old + # OpenStack modules. It turns out that modern OpenStack auth is WAY + # more complex than this. # Consume standard OpenStack environment variables. # This is mainly only useful for ad-hoc command line operation as # in playbooks one would assume variables would be used appropriately @@ -67,3 +70,35 @@ def openstack_find_nova_addresses(addresses, ext_tag, key_name=None): ret.append(interface_spec['addr']) return ret +def openstack_full_argument_spec(**kwargs): + spec = dict( + cloud=dict(default=None), + auth_plugin=dict(default=None), + auth=dict(default=None), + region_name=dict(default=None), + availability_zone=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), + wait=dict(default=True, type='bool'), + timeout=dict(default=180, type='int'), + endpoint_type=dict( + default='publicURL', choices=['publicURL', 'internalURL'] + ) + ) + spec.update(kwargs) + return spec + + +def openstack_module_kwargs(**kwargs): + ret = dict( + required_one_of=[ + ['cloud', 'auth'], + ], + ) + for key in ('mutually_exclusive', 'required_together', 'required_one_of'): + if key in kwargs: + if key in ret: + ret[key].extend(kwargs[key]) + else: + ret[key] = kwargs[key] + + return ret From 7044b5a8d1679b603a4b967dfbe34e60fbc7e444 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Feb 2015 08:29:53 -0500 Subject: [PATCH 0037/3617] removed bare variable detection as this confuses people and forced us to allow for bare expressions --- lib/ansible/playbook/task.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index bdffba5527c43a..7645450ee159ba 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -86,11 +86,6 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No elif x.startswith("with_"): if isinstance(ds[x], basestring): param = ds[x].strip() - # Only a variable, no logic - if (param.startswith('{{') and - param.find('}}') == len(ds[x]) - 2 and - param.find('|') == -1): - utils.warning("It is unnecessary to use '{{' in loops, leave variables in loop expressions bare.") plugin_name = x.replace("with_","") if plugin_name in utils.plugins.lookup_loader: From c4144b1391b7df465160e8d5f365bbd163761b37 Mon Sep 17 00:00:00 2001 From: David Mahler Date: Sun, 22 Feb 2015 19:28:16 +0000 Subject: [PATCH 0038/3617] Some grammatical updates --- docsite/rst/intro.rst | 2 +- docsite/rst/intro_getting_started.rst | 33 +++++++++++++-------------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/docsite/rst/intro.rst b/docsite/rst/intro.rst index 9b30a18bbb7d9b..7976462383c4c0 100644 --- a/docsite/rst/intro.rst +++ b/docsite/rst/intro.rst @@ -1,7 +1,7 @@ Introduction ============ -Before we dive into the really fun parts -- playbooks, configuration management, deployment, and orchestration, we'll learn how to get Ansible installed and some basic concepts. We'll go over how to execute ad-hoc commands in parallel across your nodes using /usr/bin/ansible. We'll also see what sort of modules are available in Ansible's core (though you can also write your own, which we'll also show later). +Before we dive into the really fun parts -- playbooks, configuration management, deployment, and orchestration, we'll learn how to get Ansible installed and cover some basic concepts. We'll also go over how to execute ad-hoc commands in parallel across your nodes using /usr/bin/ansible. Additionally, we'll see what sort of modules are available in Ansible's core (though you can also write your own, which is also covered later). .. toctree:: :maxdepth: 1 diff --git a/docsite/rst/intro_getting_started.rst b/docsite/rst/intro_getting_started.rst index 67136036479b5f..c1cd5571e6d6f4 100644 --- a/docsite/rst/intro_getting_started.rst +++ b/docsite/rst/intro_getting_started.rst @@ -11,10 +11,10 @@ Foreword Now that you've read :doc:`intro_installation` and installed Ansible, it's time to dig in and get started with some commands. -What we are showing first are not the powerful configuration/deployment/orchestration of Ansible, called playbooks. -Playbooks are covered in a separate section. +What we are showing first are not the powerful configuration/deployment/orchestration features of Ansible. +These features are handled by playbooks which are covered in a separate section. -This section is about how to get going initially. Once you have these concepts down, read :doc:`intro_adhoc` for some more +This section is about how to initially get going. Once you have these concepts down, read :doc:`intro_adhoc` for some more detail, and then you'll be ready to dive into playbooks and explore the most interesting parts! .. _remote_connection_information: @@ -22,21 +22,20 @@ detail, and then you'll be ready to dive into playbooks and explore the most int Remote Connection Information ````````````````````````````` -Before we get started, it's important to understand how Ansible is communicating with remote +Before we get started, it's important to understand how Ansible communicates with remote machines over SSH. By default, Ansible 1.3 and later will try to use native -OpenSSH for remote communication when possible. This enables both ControlPersist (a performance feature), Kerberos, and options in ~/.ssh/config such as Jump Host setup. When using Enterprise Linux 6 operating systems as the control machine (Red Hat Enterprise Linux and derivatives such as CentOS), however, the version of OpenSSH may be too old to support ControlPersist. On these operating systems, Ansible will fallback into using a high-quality Python implementation of +OpenSSH for remote communication when possible. This enables ControlPersist (a performance feature), Kerberos, and options in ~/.ssh/config such as Jump Host setup. However, when using Enterprise Linux 6 operating systems as the control machine (Red Hat Enterprise Linux and derivatives such as CentOS), the version of OpenSSH may be too old to support ControlPersist. On these operating systems, Ansible will fallback into using a high-quality Python implementation of OpenSSH called 'paramiko'. If you wish to use features like Kerberized SSH and more, consider using Fedora, OS X, or Ubuntu as your control machine until a newer version of OpenSSH is available for your platform -- or engage 'accelerated mode' in Ansible. See :doc:`playbooks_acceleration`. -In Ansible 1.2 and before, the default was strictly paramiko and native SSH had to be explicitly selected with -c ssh or set in the configuration file. +In releases up to and including Ansible 1.2, the default was strictly paramiko. Native SSH had to be explicitly selected with the -c ssh option or set in the configuration file. -Occasionally you'll encounter a device that doesn't do SFTP. This is rare, but if talking with some remote devices that don't support SFTP, you can switch to SCP mode in :doc:`intro_configuration`. +Occasionally you'll encounter a device that doesn't support SFTP. This is rare, but should it occur, you can switch to SCP mode in :doc:`intro_configuration`. -When speaking with remote machines, Ansible will by default assume you are using SSH keys -- which we encourage -- but passwords are fine too. To enable password auth, supply the option ``--ask-pass`` where needed. If using sudo features and when sudo requires a password, also supply ``--ask-sudo-pass`` as appropriate. +When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-sudo-pass``. -While it may be common sense, it is worth sharing: Any management system benefits from being run near the machines being managed. If running in a cloud, consider running Ansible from a machine inside that cloud. It will work better than on the open -internet in most cases. +While it may be common sense, it is worth sharing: Any management system benefits from being run near the machines being managed. If you are running Ansible in a cloud, consider running it from a machine inside that cloud. In most cases this will work better than on the open Internet. As an advanced topic, Ansible doesn't just have to connect remotely over SSH. The transports are pluggable, and there are options for managing things locally, as well as managing chroot, lxc, and jail containers. A mode called 'ansible-pull' can also invert the system and have systems 'phone home' via scheduled git checkouts to pull configuration directives from a central repository. @@ -47,8 +46,8 @@ Your first commands Now that you've installed Ansible, it's time to get started with some basics. -Edit (or create) /etc/ansible/hosts and put one or more remote systems in it, for -which you have your SSH key in ``authorized_keys``:: +Edit (or create) /etc/ansible/hosts and put one or more remote systems in it. Your +public SSH key should be located in ``authorized_keys`` on those systems:: 192.168.1.50 aserver.example.org @@ -95,9 +94,9 @@ Now run a live command on all of your nodes: $ ansible all -a "/bin/echo hello" -Congratulations. You've just contacted your nodes with Ansible. It's -soon going to be time to read some of the more real-world :doc:`intro_adhoc`, and explore -what you can do with different modules, as well as the Ansible +Congratulations! You've just contacted your nodes with Ansible. It's +soon going to be time to: read about some more real-world cases in :doc:`intro_adhoc`, +explore what you can do with different modules, and to learn about the Ansible :doc:`playbooks` language. Ansible is not just about running commands, it also has powerful configuration management and deployment features. There's more to explore, but you already have a fully working infrastructure! @@ -111,7 +110,7 @@ Ansible 1.2.1 and later have host key checking enabled by default. If a host is reinstalled and has a different key in 'known_hosts', this will result in an error message until corrected. If a host is not initially in 'known_hosts' this will result in prompting for confirmation of the key, which results in an interactive experience if using Ansible, from say, cron. You might not want this. -If you wish to disable this behavior and understand the implications, you can do so by editing /etc/ansible/ansible.cfg or ~/.ansible.cfg:: +If you understand the implications and wish to disable this behavior, you can do so by editing /etc/ansible/ansible.cfg or ~/.ansible.cfg:: [defaults] host_key_checking = False @@ -126,7 +125,7 @@ Also note that host key checking in paramiko mode is reasonably slow, therefore .. _a_note_about_logging: -Ansible will log some information about module arguments on the remote system in the remote syslog, unless a task or play is marked with a "no_log: True" attribute, explained later. +Ansible will log some information about module arguments on the remote system in the remote syslog, unless a task or play is marked with a "no_log: True" attribute. This is explained later. To enable basic logging on the control machine see :doc:`intro_configuration` document and set the 'log_path' configuration file setting. Enterprise users may also be interested in :doc:`tower`. Tower provides a very robust database logging feature where it is possible to drill down and see history based on hosts, projects, and particular inventories over time -- explorable both graphically and through a REST API. From e59b3646416942504c4392a3eaf4f8859d1187e8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Feb 2015 05:05:27 -0500 Subject: [PATCH 0039/3617] changed from hash_merge to combine vars which resets default to overwrite and not merge hashing --- lib/ansible/runner/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 79a167c5a00532..7a693cc8d01de7 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -674,11 +674,11 @@ def _executor_internal(self, host, new_stdin): # Then we selectively merge some variable dictionaries down to a # single dictionary, used to template the HostVars for this host temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) - temp_vars = utils.merge_hash(temp_vars, inject['combined_cache']) - temp_vars = utils.merge_hash(temp_vars, self.play_vars) - temp_vars = utils.merge_hash(temp_vars, self.play_file_vars) - temp_vars = utils.merge_hash(temp_vars, self.extra_vars) - temp_vars = utils.merge_hash(temp_vars, {'groups': inject['groups']}) + temp_vars = utils.combine_vars(temp_vars, inject['combined_cache']) + temp_vars = utils.combine_vars(temp_vars, self.play_vars) + temp_vars = utils.combine_vars(temp_vars, self.play_file_vars) + temp_vars = utils.combine_vars(temp_vars, self.extra_vars) + temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']}) hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass) From ce764063f14fdd1a664002e2dad02c10eec24f59 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Feb 2015 05:14:22 -0500 Subject: [PATCH 0040/3617] corrected merge vs combined in all pertinent sections --- lib/ansible/playbook/play.py | 10 +++++----- lib/ansible/runner/__init__.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 8d81424f09e41a..47bfd79b0b4395 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -115,8 +115,8 @@ def __init__(self, playbook, ds, basedir, vault_password=None): _tasks = ds.pop('tasks', []) _handlers = ds.pop('handlers', []) - temp_vars = utils.merge_hash(self.vars, self.vars_file_vars) - temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars) + temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) + temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars) ds = template(basedir, ds, temp_vars) ds['tasks'] = _tasks @@ -632,9 +632,9 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud dirname = os.path.dirname(original_file) # temp vars are used here to avoid trampling on the existing vars structures - temp_vars = utils.merge_hash(self.vars, self.vars_file_vars) - temp_vars = utils.merge_hash(temp_vars, mv) - temp_vars = utils.merge_hash(temp_vars, self.playbook.extra_vars) + temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) + temp_vars = utils.combine_vars(temp_vars, mv) + temp_vars = utils.combine_Vars(temp_vars, self.playbook.extra_vars) include_file = template(dirname, tokens[0], temp_vars) include_filename = utils.path_dwim(dirname, include_file) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 7a693cc8d01de7..15845c6929a346 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -608,7 +608,7 @@ def _executor(self, host, new_stdin): def get_combined_cache(self): # merge the VARS and SETUP caches for this host combined_cache = self.setup_cache.copy() - return utils.merge_hash(combined_cache, self.vars_cache) + return utils.combine_vars(combined_cache, self.vars_cache) def get_inject_vars(self, host): host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) From 4fa51652b42854f7646f3f176486f676d7b78e7c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Feb 2015 05:26:41 -0500 Subject: [PATCH 0041/3617] fixed typoe in combined_Vars --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 47bfd79b0b4395..bb77506dd1243f 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -634,7 +634,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud # temp vars are used here to avoid trampling on the existing vars structures temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) temp_vars = utils.combine_vars(temp_vars, mv) - temp_vars = utils.combine_Vars(temp_vars, self.playbook.extra_vars) + temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars) include_file = template(dirname, tokens[0], temp_vars) include_filename = utils.path_dwim(dirname, include_file) From 0b8773fc99bb3e8e1e10167c7a76a844a1263161 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 26 Feb 2015 11:35:29 -0500 Subject: [PATCH 0042/3617] Remove state from central argument list There is an old PR that shows a great use case for having a different set of states for the server module. Before the other modules start being in real use, pull this out so that we don't get ourselves into a pickle. --- lib/ansible/module_utils/openstack.py | 1 - lib/ansible/utils/module_docs_fragments/openstack.py | 5 ----- v2/ansible/module_utils/openstack.py | 1 - 3 files changed, 7 deletions(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 90415cadabbdb6..6388fffbad2c9f 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -77,7 +77,6 @@ def openstack_full_argument_spec(**kwargs): auth=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), - state=dict(default='present', choices=['absent', 'present']), wait=dict(default=True, type='bool'), timeout=dict(default=180, type='int'), endpoint_type=dict( diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index d740bc719c3682..cb8f2c1bfb8bd7 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -53,11 +53,6 @@ class ModuleDocFragment(object): description: - Name of the availability zone. required: false - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present wait: description: - Should ansible wait until the requested resource is complete. diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py index 90415cadabbdb6..6388fffbad2c9f 100644 --- a/v2/ansible/module_utils/openstack.py +++ b/v2/ansible/module_utils/openstack.py @@ -77,7 +77,6 @@ def openstack_full_argument_spec(**kwargs): auth=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), - state=dict(default='present', choices=['absent', 'present']), wait=dict(default=True, type='bool'), timeout=dict(default=180, type='int'), endpoint_type=dict( From 8758ae201defe5abe166e136a2ddc4e55d66d940 Mon Sep 17 00:00:00 2001 From: Hartmut Goebel Date: Sat, 28 Feb 2015 14:13:58 +0100 Subject: [PATCH 0043/3617] Fix detect of docker as virtualization_type. Not only match`/docker/`, but also `docker-` followed by a hex-id. Example (shortened): ``` $ cat /proc/1/cgroup 8:blkio:/system.slice/docker-de73f4d207861cf8757b69213ee67bb234b897a18bea7385964b6ed2d515da94.scope 7:net_cls:/ ``` --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 17e7c62f83ae28..350f002dcc090d 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2336,7 +2336,7 @@ def get_virtual_facts(self): if os.path.exists('/proc/1/cgroup'): for line in get_file_lines('/proc/1/cgroup'): - if re.search('/docker/', line): + if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line): self.facts['virtualization_type'] = 'docker' self.facts['virtualization_role'] = 'guest' return From 8027a8a0b50514a362abcddf1d4c78acf67bdfee Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 6 Mar 2015 18:11:12 -0500 Subject: [PATCH 0044/3617] Change to auth_type to match python-openstackclient --- lib/ansible/module_utils/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 6388fffbad2c9f..53e18cab0ca49f 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -73,7 +73,7 @@ def openstack_find_nova_addresses(addresses, ext_tag, key_name=None): def openstack_full_argument_spec(**kwargs): spec = dict( cloud=dict(default=None), - auth_plugin=dict(default=None), + auth_type=dict(default=None), auth=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), From 8758ba08bdb07ef8fde669beef750303c455a237 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 6 Mar 2015 18:20:45 -0500 Subject: [PATCH 0045/3617] Update common OpenStack requests-related parameters Also, update docs related to earlier changes in this stack. --- lib/ansible/module_utils/openstack.py | 6 +++- .../utils/module_docs_fragments/openstack.py | 28 ++++++++++++++----- v2/ansible/module_utils/openstack.py | 8 ++++-- 3 files changed, 32 insertions(+), 10 deletions(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 53e18cab0ca49f..35b9026213e988 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -77,10 +77,14 @@ def openstack_full_argument_spec(**kwargs): auth=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), + verify=dict(default=True), + cacert=dict(default=None), + cert=dict(default=None), + key=dict(default=None), wait=dict(default=True, type='bool'), timeout=dict(default=180, type='int'), endpoint_type=dict( - default='publicURL', choices=['publicURL', 'internalURL'] + default='public', choices=['public', 'internal', 'admin'] ) ) spec.update(kwargs) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index cb8f2c1bfb8bd7..2979cb68d7b95f 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -34,17 +34,13 @@ class ModuleDocFragment(object): this param will need to contain whatever parameters that auth plugin requires. This parameter is not needed if a named cloud is provided. required: false - auth_plugin: + auth_type: description: - Name of the auth plugin to use. If the cloud uses something other than password authentication, the name of the plugin should be indicated here and the contents of the I(auth) parameter should be updated accordingly. required: false default: password - auth_token: - description: - - An auth token obtained previously. If I(auth_token) is given, - I(auth) and I(auth_plugin) are not needed. region_name: description: - Name of the region. @@ -64,12 +60,30 @@ class ModuleDocFragment(object): - How long should ansible wait for the requested resource. required: false default: 180 + verify: + description: + - Whether or not SSL API requests should be verified. + required: false + default: True + cacert: + description: + - A path to a CA Cert bundle that can be used as part of verifying + SSL API requests. + required: false + cert: + description: + - A path to a client certificate to use as part of the SSL transaction + required: false + key: + description: + - A path to a client key to use as part of the SSL transaction + required: false endpoint_type: description: - Endpoint URL type to fetch from the service catalog. - choices: [publicURL, internalURL] + choices: [public, internal, admin] required: false - default: publicURL + default: public requirements: - shade notes: diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py index 6388fffbad2c9f..35b9026213e988 100644 --- a/v2/ansible/module_utils/openstack.py +++ b/v2/ansible/module_utils/openstack.py @@ -73,14 +73,18 @@ def openstack_find_nova_addresses(addresses, ext_tag, key_name=None): def openstack_full_argument_spec(**kwargs): spec = dict( cloud=dict(default=None), - auth_plugin=dict(default=None), + auth_type=dict(default=None), auth=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), + verify=dict(default=True), + cacert=dict(default=None), + cert=dict(default=None), + key=dict(default=None), wait=dict(default=True, type='bool'), timeout=dict(default=180, type='int'), endpoint_type=dict( - default='publicURL', choices=['publicURL', 'internalURL'] + default='public', choices=['public', 'internal', 'admin'] ) ) spec.update(kwargs) From 5453e2cbb8be6aa1f0036659d3e66cab54090532 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 9 Mar 2015 10:27:59 -0400 Subject: [PATCH 0046/3617] removed redundant inventory call, moved grousp to proper priority --- lib/ansible/runner/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 15845c6929a346..69c062e205b79d 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -673,12 +673,11 @@ def _executor_internal(self, host, new_stdin): # Then we selectively merge some variable dictionaries down to a # single dictionary, used to template the HostVars for this host - temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) - temp_vars = utils.combine_vars(temp_vars, inject['combined_cache']) + temp_vars = inject['combined_cache'] + temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']}) temp_vars = utils.combine_vars(temp_vars, self.play_vars) temp_vars = utils.combine_vars(temp_vars, self.play_file_vars) temp_vars = utils.combine_vars(temp_vars, self.extra_vars) - temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']}) hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass) From 642d9d6b563837ae5187720444c76abc152fb49c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 9 Mar 2015 12:12:37 -0400 Subject: [PATCH 0047/3617] readded inventory vars to runner's vars --- lib/ansible/runner/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 69c062e205b79d..c1f5b3683cec13 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -608,7 +608,7 @@ def _executor(self, host, new_stdin): def get_combined_cache(self): # merge the VARS and SETUP caches for this host combined_cache = self.setup_cache.copy() - return utils.combine_vars(combined_cache, self.vars_cache) + return utils.merge_hash(combined_cache, self.vars_cache) def get_inject_vars(self, host): host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass) @@ -674,6 +674,7 @@ def _executor_internal(self, host, new_stdin): # Then we selectively merge some variable dictionaries down to a # single dictionary, used to template the HostVars for this host temp_vars = inject['combined_cache'] + temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] ) temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']}) temp_vars = utils.combine_vars(temp_vars, self.play_vars) temp_vars = utils.combine_vars(temp_vars, self.play_file_vars) From d244390064a037dd82a71ee5d98731893e4cd33e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 9 Mar 2015 12:15:41 -0400 Subject: [PATCH 0048/3617] correclty added inventory this time --- lib/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index c1f5b3683cec13..52e530ac652702 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -673,7 +673,7 @@ def _executor_internal(self, host, new_stdin): # Then we selectively merge some variable dictionaries down to a # single dictionary, used to template the HostVars for this host - temp_vars = inject['combined_cache'] + temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] ) temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']}) temp_vars = utils.combine_vars(temp_vars, self.play_vars) From 5f6db0e16477749c1bccf472150132ca06c50b3b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 24 Nov 2014 16:36:31 -0500 Subject: [PATCH 0049/3617] preliminary privlege escalation unification + pbrun - become constants inherit existing sudo/su ones - become command line options, marked sudo/su as deprecated and moved sudo/su passwords to runas group - changed method signatures as privlege escalation is collapsed to become - added tests for su and become, diabled su for lack of support in local.py - updated playbook,play and task objects to become - added become to runner - added whoami test for become/sudo/su - added home override dir for plugins - removed useless method from ask pass - forced become pass to always be string also uses to_bytes - fixed fakerunner for tests - corrected reference in synchronize action plugin - added pfexec (needs testing) - removed unused sudo/su in runner init - removed deprecated info - updated pe tests to allow to run under sudo and not need root - normalized become options into a funciton to avoid duplication and inconsistencies - pushed suppored list to connection classs property - updated all connection plugins to latest 'become' pe - includes fixes from feedback (including typos) - added draft docs - stub of become_exe, leaving for future v2 fixes --- bin/ansible | 53 ++--- bin/ansible-playbook | 36 ++-- docsite/rst/become.rst | 83 ++++++++ examples/ansible.cfg | 6 + lib/ansible/constants.py | 34 +-- lib/ansible/playbook/__init__.py | 44 ++-- lib/ansible/playbook/play.py | 119 +++++++---- lib/ansible/playbook/task.py | 84 ++++---- lib/ansible/runner/__init__.py | 168 +++++++-------- lib/ansible/runner/action_plugins/assemble.py | 2 +- lib/ansible/runner/action_plugins/copy.py | 2 +- lib/ansible/runner/action_plugins/fetch.py | 2 +- lib/ansible/runner/action_plugins/patch.py | 2 +- lib/ansible/runner/action_plugins/script.py | 3 +- .../runner/action_plugins/synchronize.py | 16 +- lib/ansible/runner/action_plugins/template.py | 2 +- .../runner/action_plugins/unarchive.py | 2 +- lib/ansible/runner/action_plugins/win_copy.py | 2 +- .../runner/action_plugins/win_template.py | 2 +- .../runner/connection_plugins/accelerate.py | 15 +- .../runner/connection_plugins/chroot.py | 10 +- .../runner/connection_plugins/fireball.py | 8 +- .../runner/connection_plugins/funcd.py | 8 +- lib/ansible/runner/connection_plugins/jail.py | 10 +- .../runner/connection_plugins/libvirt_lxc.py | 10 +- .../runner/connection_plugins/local.py | 40 ++-- .../runner/connection_plugins/paramiko_ssh.py | 44 ++-- lib/ansible/runner/connection_plugins/ssh.py | 97 +++++---- .../runner/connection_plugins/winrm.py | 10 +- lib/ansible/runner/connection_plugins/zone.py | 11 +- lib/ansible/utils/__init__.py | 194 ++++++++++++------ test/integration/destructive.yml | 2 + .../roles/test_become/files/baz.txt | 1 + .../roles/test_become/tasks/main.yml | 77 +++++++ .../roles/test_become/templates/bar.j2 | 1 + .../roles/test_become/vars/default.yml | 1 + test/integration/roles/test_su/files/baz.txt | 1 + test/integration/roles/test_su/tasks/main.yml | 75 +++++++ .../roles/test_su/templates/bar.j2 | 1 + .../roles/test_su/vars/default.yml | 1 + .../roles/test_sudo/tasks/main.yml | 12 ++ test/units/TestPlayVarsFiles.py | 3 + test/units/TestSynchronize.py | 7 +- test/units/TestUtils.py | 4 +- v2/ansible/constants.py | 16 +- 45 files changed, 845 insertions(+), 476 deletions(-) create mode 100644 docsite/rst/become.rst create mode 100644 test/integration/roles/test_become/files/baz.txt create mode 100644 test/integration/roles/test_become/tasks/main.yml create mode 100644 test/integration/roles/test_become/templates/bar.j2 create mode 100644 test/integration/roles/test_become/vars/default.yml create mode 100644 test/integration/roles/test_su/files/baz.txt create mode 100644 test/integration/roles/test_su/tasks/main.yml create mode 100644 test/integration/roles/test_su/templates/bar.j2 create mode 100644 test/integration/roles/test_su/vars/default.yml diff --git a/bin/ansible b/bin/ansible index 5aaaa582a7e4f0..7fec34ec81e9c6 100755 --- a/bin/ansible +++ b/bin/ansible @@ -58,12 +58,12 @@ class Cli(object): ''' create an options parser for bin/ansible ''' parser = utils.base_parser( - constants=C, - runas_opts=True, - subset_opts=True, + constants=C, + runas_opts=True, + subset_opts=True, async_opts=True, - output_opts=True, - connect_opts=True, + output_opts=True, + connect_opts=True, check_opts=True, diff_opts=False, usage='%prog [options]' @@ -82,12 +82,8 @@ class Cli(object): parser.print_help() sys.exit(1) - # su and sudo command line arguments need to be mutually exclusive - if (options.su or options.su_user or options.ask_su_pass) and \ - (options.sudo or options.sudo_user or options.ask_sudo_pass): - parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') are " - "mutually exclusive") + # privlege escalation command line arguments need to be mutually exclusive + utils.check_mutually_exclusive_privilege(options, parser) if (options.ask_vault_pass and options.vault_password_file): parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") @@ -101,20 +97,20 @@ class Cli(object): pattern = args[0] - sshpass = None - sudopass = None - su_pass = None - vault_pass = None + sshpass = becomepass = vault_pass = become_method = None - options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS # Never ask for an SSH password when we run with local connection if options.connection == "local": options.ask_pass = False - options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS - options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS + else: + options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS + options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass) + # become + utils.normalize_become_options(options) + prompt_method = utils.choose_pass_prompt(options) + (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method) # read vault_pass from a file if not options.ask_vault_pass and options.vault_password_file: @@ -126,6 +122,7 @@ class Cli(object): if options.subset: inventory_manager.subset(options.subset) hosts = inventory_manager.list_hosts(pattern) + if len(hosts) == 0: callbacks.display("No hosts matched", stderr=True) sys.exit(0) @@ -135,16 +132,10 @@ class Cli(object): callbacks.display(' %s' % host) sys.exit(0) - if ((options.module_name == 'command' or options.module_name == 'shell') - and not options.module_args): + if options.module_name in ['command','shell'] and not options.module_args: callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True) sys.exit(1) - - if options.su_user or options.ask_su_pass: - options.su = True - options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER - options.su_user = options.su_user or C.DEFAULT_SU_USER if options.tree: utils.prepare_writeable_dir(options.tree) @@ -160,17 +151,15 @@ class Cli(object): forks=options.forks, pattern=pattern, callbacks=self.callbacks, - sudo=options.sudo, - sudo_pass=sudopass, - sudo_user=options.sudo_user, transport=options.connection, subset=options.subset, check=options.check, diff=options.check, - su=options.su, - su_pass=su_pass, - su_user=options.su_user, vault_pass=vault_pass, + become=options.become, + become_method=options.become_method, + become_pass=becomepass, + become_user=options.become_user, extra_vars=extra_vars, ) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index f62c699d64d614..79cbc43d80a4ec 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -108,19 +108,14 @@ def main(args): parser.print_help(file=sys.stderr) return 1 - # su and sudo command line arguments need to be mutually exclusive - if (options.su or options.su_user or options.ask_su_pass) and \ - (options.sudo or options.sudo_user or options.ask_sudo_pass): - parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') are " - "mutually exclusive") + # privlege escalation command line arguments need to be mutually exclusive + utils.check_mutually_exclusive_privilege(options, parser) if (options.ask_vault_pass and options.vault_password_file): parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") sshpass = None - sudopass = None - su_pass = None + becomepass = None vault_pass = None options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS @@ -132,11 +127,14 @@ def main(args): # Never ask for an SSH password when we run with local connection if options.connection == "local": options.ask_pass = False - options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS - options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS - (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass) - options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER - options.su_user = options.su_user or C.DEFAULT_SU_USER + + # set pe options + utils.normalize_become_options(options) + prompt_method = utils.choose_pass_prompt(options) + (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, + become_ask_pass=options.become_ask_pass, + ask_vault_pass=options.ask_vault_pass, + become_method=prompt_method) # read vault_pass from a file if not options.ask_vault_pass and options.vault_password_file: @@ -197,20 +195,18 @@ def main(args): stats=stats, timeout=options.timeout, transport=options.connection, - sudo=options.sudo, - sudo_user=options.sudo_user, - sudo_pass=sudopass, + become=options.become, + become_method=options.become_method, + become_user=options.become_user, + become_pass=becomepass, extra_vars=extra_vars, private_key_file=options.private_key_file, only_tags=only_tags, skip_tags=skip_tags, check=options.check, diff=options.diff, - su=options.su, - su_pass=su_pass, - su_user=options.su_user, vault_password=vault_pass, - force_handlers=options.force_handlers + force_handlers=options.force_handlers, ) if options.flush_cache: diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst new file mode 100644 index 00000000000000..dd2d9b140cd842 --- /dev/null +++ b/docsite/rst/become.rst @@ -0,0 +1,83 @@ +Ansible Privilege Escalation +++++++++++++++++++++++++++++ + +Ansible can use existing privilege escalation systems to allow a user to execute tasks as another. + +.. contents:: Topics + +Become +`````` +Before 1.9 Ansible mostly allowed the use of sudo and a limited use of su to allow a login/remote user to become a different user +and execute tasks, create resources with the 2nd user's permissions. As of 1.9 'become' supersedes the old sudo/su, while still +being backwards compatible. This new system also makes it easier to add other privilege escalation tools like pbrun (Powerbroker), +pfexec and others. + + +New directives +-------------- + +become + equivalent to adding sudo: or su: to a play or task, set to true/yes to activate privilege escalation + +become_user + equivalent to adding sudo_user: or su_user: to a play or task + +become_method + at play or task level overrides the default method set in ansibile.cfg + + +New ansible_ variables +---------------------- +Each allows you to set an option per group and/or host + +ansible_become + equivalent to ansible_sudo or ansbile_su, allows to force privilege escalation + +ansible_become_method + allows to set privilege escalation method + +ansible_become_user + equivalent to ansible_sudo_user or ansbile_su_user, allows to set the user you become through privilege escalation + +ansible_become_pass + equivalent to ansible_sudo_pass or ansbile_su_pass, allows you to set the privilege escalation password + + +New command line options +----------------------- + +--ask-become-pass + ask for privilege escalation password + +-b, --become + run operations with become (no passorwd implied) + +--become-method=BECOME_METHOD + privilege escalation method to use (default=sudo), + valid choices: [ sudo | su | pbrun | pfexec ] + +--become-user=BECOME_USER + run operations as this user (default=root) + + +sudo and su still work! +----------------------- + +Old playbooks will not need to be changed, even though they are deprecated, sudo and su directives will continue to work though it +is recommended to move to become as they may be retired at one point. You cannot mix directives on the same object though, ansible +will complain if you try to. + +Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the +new ones. + + + +.. note:: Privilege escalation methods must also be supported by the connection plugin used, most will warn if they do not, some will just ignore it as they always run as root (jail, chroot, etc). + +.. seealso:: + + `Mailing List `_ + Questions? Help? Ideas? Stop by the list on Google Groups + `irc.freenode.net `_ + #ansible IRC chat channel + diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 67aa039608e9b0..4cf9d513e59533 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -159,6 +159,12 @@ fact_caching = memory #retry_files_enabled = False #retry_files_save_path = ~/.ansible-retry +[privilege_escalation] +#become=True +#become_method='sudo' +#become_user='root' +#become_ask_pass=False + [paramiko_connection] # uncomment this line to cause the paramiko connection plugin to not record new host diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 31dc91463e8795..1779b792fb3c42 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -86,9 +86,6 @@ def shell_expand_path(path): path = os.path.expanduser(os.path.expandvars(path)) return path -def get_plugin_paths(path): - return ':'.join([os.path.join(x, path) for x in [os.path.expanduser('~/.ansible/plugins/'), '/usr/share/ansible_plugins/']]) - p = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] @@ -137,16 +134,28 @@ def get_plugin_paths(path): DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() - -DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', get_plugin_paths('action_plugins')) -DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', get_plugin_paths('cache_plugins')) -DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', get_plugin_paths('callback_plugins')) -DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', get_plugin_paths('connection_plugins')) -DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', get_plugin_paths('lookup_plugins')) -DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', get_plugin_paths('vars_plugins')) -DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', get_plugin_paths('filter_plugins')) DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +#TODO: get rid of ternary chain mess +BECOME_METHODS = ['sudo','su','pbrun','runas','pfexec'] +DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True) +DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() +DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root') +DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True) +# need to rethink impementing these 2 +DEFAULT_BECOME_EXE = None +#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo') +#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H') + + +DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') +DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') +DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') +DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins') +DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') +DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') +DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') + CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') @@ -172,7 +181,7 @@ def get_plugin_paths(path): ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) -# obsolete -- will be formally removed in 1.6 +# obsolete -- will be formally removed ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) @@ -188,6 +197,7 @@ def get_plugin_paths(path): DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things +DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None DEFAULT_SUBSET = None diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 5de1e6e309a87a..d58657012c625f 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -60,15 +60,12 @@ def __init__(self, timeout = C.DEFAULT_TIMEOUT, remote_user = C.DEFAULT_REMOTE_USER, remote_pass = C.DEFAULT_REMOTE_PASS, - sudo_pass = C.DEFAULT_SUDO_PASS, remote_port = None, transport = C.DEFAULT_TRANSPORT, private_key_file = C.DEFAULT_PRIVATE_KEY_FILE, callbacks = None, runner_callbacks = None, stats = None, - sudo = False, - sudo_user = C.DEFAULT_SUDO_USER, extra_vars = None, only_tags = None, skip_tags = None, @@ -77,11 +74,13 @@ def __init__(self, check = False, diff = False, any_errors_fatal = False, - su = False, - su_user = False, - su_pass = False, vault_password = False, force_handlers = False, + # privelege escalation + become = C.DEFAULT_BECOME, + become_method = C.DEFAULT_BECOME_METHOD, + become_user = C.DEFAULT_BECOME_USER, + become_pass = None, ): """ @@ -92,13 +91,11 @@ def __init__(self, timeout: connection timeout remote_user: run as this user if not specified in a particular play remote_pass: use this remote password (for all plays) vs using SSH keys - sudo_pass: if sudo==True, and a password is required, this is the sudo password remote_port: default remote port to use if not specified with the host or play transport: how to connect to hosts that don't specify a transport (local, paramiko, etc) callbacks output callbacks for the playbook runner_callbacks: more callbacks, this time for the runner API stats: holds aggregrate data about events occurring to each host - sudo: if not specified per play, requests all plays use sudo mode inventory: can be specified instead of host_list to use a pre-existing inventory object check: don't change anything, just try to detect some potential changes any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed @@ -139,21 +136,20 @@ def __init__(self, self.callbacks = callbacks self.runner_callbacks = runner_callbacks self.stats = stats - self.sudo = sudo - self.sudo_pass = sudo_pass - self.sudo_user = sudo_user self.extra_vars = extra_vars self.global_vars = {} self.private_key_file = private_key_file self.only_tags = only_tags self.skip_tags = skip_tags self.any_errors_fatal = any_errors_fatal - self.su = su - self.su_user = su_user - self.su_pass = su_pass self.vault_password = vault_password self.force_handlers = force_handlers + self.become = become + self.become_method = become_method + self.become_user = become_user + self.become_pass = become_pass + self.callbacks.playbook = self self.runner_callbacks.playbook = self @@ -416,10 +412,7 @@ def _run_task_internal(self, task): basedir=task.play.basedir, conditional=task.when, callbacks=self.runner_callbacks, - sudo=task.sudo, - sudo_user=task.sudo_user, transport=task.transport, - sudo_pass=task.sudo_pass, is_playbook=True, check=self.check, diff=self.diff, @@ -429,13 +422,14 @@ def _run_task_internal(self, task): accelerate_port=task.play.accelerate_port, accelerate_ipv6=task.play.accelerate_ipv6, error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, - su=task.su, - su_user=task.su_user, - su_pass=task.su_pass, vault_pass = self.vault_password, run_hosts=hosts, no_log=task.no_log, run_once=task.run_once, + become=task.become, + become_method=task.become_method, + become_user=task.become_user, + become_pass=task.become_pass, ) runner.module_vars.update({'play_hosts': hosts}) @@ -616,12 +610,10 @@ def _do_setup_step(self, play): setup_cache=self.SETUP_CACHE, vars_cache=self.VARS_CACHE, callbacks=self.runner_callbacks, - sudo=play.sudo, - sudo_user=play.sudo_user, - sudo_pass=self.sudo_pass, - su=play.su, - su_user=play.su_user, - su_pass=self.su_pass, + become=play.become, + become_method=play.become_method, + become_user=play.become_user, + become_pass=self.become_pass, vault_pass=self.vault_password, transport=play.transport, is_playbook=True, diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 883142da4cc87c..74c6998b22f823 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -32,24 +32,25 @@ class Play(object): - __slots__ = [ - 'hosts', 'name', 'vars', 'vars_file_vars', 'role_vars', 'default_vars', 'vars_prompt', 'vars_files', - 'handlers', 'remote_user', 'remote_port', 'included_roles', 'accelerate', - 'accelerate_port', 'accelerate_ipv6', 'sudo', 'sudo_user', 'transport', 'playbook', - 'tags', 'gather_facts', 'serial', '_ds', '_handlers', '_tasks', - 'basedir', 'any_errors_fatal', 'roles', 'max_fail_pct', '_play_hosts', 'su', 'su_user', - 'vault_password', 'no_log', 'environment', + _pb_common = [ + 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become', + 'become_method', 'become_user', 'environment', 'gather_facts', 'handlers', 'hosts', + 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', 'su_user', 'sudo', + 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', 'vault_password', + ] + + __slots__ = _pb_common + [ + '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir', + 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port', + 'role_vars', 'transport', 'vars_file_vars', ] # to catch typos and so forth -- these are userland names # and don't line up 1:1 with how they are stored - VALID_KEYS = frozenset(( - 'hosts', 'name', 'vars', 'vars_prompt', 'vars_files', - 'tasks', 'handlers', 'remote_user', 'user', 'port', 'include', 'accelerate', 'accelerate_port', 'accelerate_ipv6', - 'sudo', 'sudo_user', 'connection', 'tags', 'gather_facts', 'serial', - 'any_errors_fatal', 'roles', 'role_names', 'pre_tasks', 'post_tasks', 'max_fail_percentage', - 'su', 'su_user', 'vault_password', 'no_log', 'environment', - )) + VALID_KEYS = frozenset(_pb_common + [ + 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks', + 'pre_tasks', 'role_names', 'tasks', 'user', + ]) # ************************************************* @@ -58,7 +59,7 @@ def __init__(self, playbook, ds, basedir, vault_password=None): for x in ds.keys(): if not x in Play.VALID_KEYS: - raise errors.AnsibleError("%s is not a legal parameter at this level in an Ansible Playbook" % x) + raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x) # allow all playbook keys to be set by --extra-vars self.vars = ds.get('vars', {}) @@ -140,8 +141,6 @@ def __init__(self, playbook, ds, basedir, vault_password=None): self._handlers = ds.get('handlers', []) self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user)) self.remote_port = ds.get('port', self.playbook.remote_port) - self.sudo = ds.get('sudo', self.playbook.sudo) - self.sudo_user = ds.get('sudo_user', self.playbook.sudo_user) self.transport = ds.get('connection', self.playbook.transport) self.remote_port = self.remote_port self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false')) @@ -149,22 +148,40 @@ def __init__(self, playbook, ds, basedir, vault_password=None): self.accelerate_port = ds.get('accelerate_port', None) self.accelerate_ipv6 = ds.get('accelerate_ipv6', False) self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) - self.su = ds.get('su', self.playbook.su) - self.su_user = ds.get('su_user', self.playbook.su_user) self.no_log = utils.boolean(ds.get('no_log', 'false')) + # Fail out if user specifies conflicting privelege escalations + if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')): + raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together') + if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')): + raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together') + if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')): + raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together') + + # become settings are inherited and updated normally + self.become = ds.get('become', self.playbook.become) + self.become_method = ds.get('become_method', self.playbook.become_method) + self.become_user = ds.get('become_user', self.playbook.become_user) + + # Make sure current play settings are reflected in become fields + if 'sudo' in ds: + self.become=ds['sudo'] + self.become_method='sudo' + if 'sudo_user' in ds: + self.become_user=ds['sudo_user'] + elif 'su' in ds: + self.become=True + self.become=ds['su'] + if 'su_user' in ds: + self.become_user=ds['su_user'] + # gather_facts is not a simple boolean, as None means that a 'smart' # fact gathering mode will be used, so we need to be careful here as # calling utils.boolean(None) returns False self.gather_facts = ds.get('gather_facts', None) - if self.gather_facts: + if self.gather_facts is not None: self.gather_facts = utils.boolean(self.gather_facts) - # Fail out if user specifies a sudo param with a su param in a given play - if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')): - raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ' - '("su", "su_user") cannot be used together') - load_vars['role_names'] = ds.get('role_names', []) self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) @@ -173,9 +190,6 @@ def __init__(self, playbook, ds, basedir, vault_password=None): # apply any missing tags to role tasks self._late_merge_role_tags() - if self.sudo_user != 'root': - self.sudo = True - # place holder for the discovered hosts to be used in this play self._play_hosts = None @@ -429,7 +443,7 @@ def _load_roles(self, roles, ds): for (role, role_path, role_vars, role_params, default_vars) in roles: # special vars must be extracted from the dict to the included tasks - special_keys = [ "sudo", "sudo_user", "when", "with_items" ] + special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ] special_vars = {} for k in special_keys: if k in role_vars: @@ -531,7 +545,7 @@ def _resolve_main(self, basepath): # ************************************************* - def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sudo_vars=None, + def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None, additional_conditions=None, original_file=None, role_name=None): ''' handle task and handler include statements ''' @@ -547,8 +561,8 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud role_params = {} if default_vars is None: default_vars = {} - if sudo_vars is None: - sudo_vars = {} + if become_vars is None: + become_vars = {} old_conditions = list(additional_conditions) @@ -560,14 +574,37 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud if not isinstance(x, dict): raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file)) - # evaluate sudo vars for current and child tasks - included_sudo_vars = {} - for k in ["sudo", "sudo_user"]: + # evaluate privilege escalation vars for current and child tasks + included_become_vars = {} + for k in ["become", "become_user", "become_method", "become_exe"]: if k in x: - included_sudo_vars[k] = x[k] - elif k in sudo_vars: - included_sudo_vars[k] = sudo_vars[k] - x[k] = sudo_vars[k] + included_become_vars[k] = x[k] + elif k in become_vars: + included_become_vars[k] = become_vars[k] + x[k] = become_vars[k] + + ## backwards compat with old sudo/su directives + if 'sudo' in x or 'sudo_user' in x: + included_become_vars['become'] = x['sudo'] + x['become'] = x['sudo'] + x['become_method'] = 'sudo' + del x['sudo'] + + if x.get('sudo_user', False): + included_become_vars['become_user'] = x['sudo_user'] + x['become_user'] = x['sudo_user'] + del x['sudo_user'] + + elif 'su' in x or 'su_user' in x: + included_become_vars['become'] = x['su'] + x['become'] = x['su'] + x['become_method'] = 'su' + del x['su'] + + if x.get('su_user', False): + included_become_vars['become_user'] = x['su_user'] + x['become_user'] = x['su_user'] + del x['su_user'] if 'meta' in x: if x['meta'] == 'flush_handlers': @@ -596,7 +633,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud included_additional_conditions.append(x[k]) elif type(x[k]) is list: included_additional_conditions.extend(x[k]) - elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log"): + elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"): continue else: include_vars[k] = x[k] @@ -643,7 +680,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, sud for y in data: if isinstance(y, dict) and 'include' in y: y['role_name'] = new_role - loaded = self._load_tasks(data, mv, role_params, default_vars, included_sudo_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) + loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) results += loaded elif type(x) == dict: task = Task( diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 05f96c84e396be..a43c2ab89d5872 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -24,26 +24,24 @@ class Task(object): - __slots__ = [ - 'name', 'meta', 'action', 'when', 'async_seconds', 'async_poll_interval', - 'notify', 'module_name', 'module_args', 'module_vars', 'play_vars', 'play_file_vars', 'role_vars', 'role_params', 'default_vars', - 'play', 'notified_by', 'tags', 'register', 'role_name', - 'delegate_to', 'first_available_file', 'ignore_errors', - 'local_action', 'transport', 'sudo', 'remote_user', 'sudo_user', 'sudo_pass', - 'items_lookup_plugin', 'items_lookup_terms', 'environment', 'args', - 'any_errors_fatal', 'changed_when', 'failed_when', 'always_run', 'delay', 'retries', 'until', - 'su', 'su_user', 'su_pass', 'no_log', 'run_once', + _t_common = [ + 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass', + 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when', + 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log', + 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user', + 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when', ] + __slots__ = [ + 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file', + 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars', + 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars', + ] + _t_common + # to prevent typos and such - VALID_KEYS = frozenset(( - 'name', 'meta', 'action', 'when', 'async', 'poll', 'notify', - 'first_available_file', 'include', 'tags', 'register', 'ignore_errors', - 'delegate_to', 'local_action', 'transport', 'remote_user', 'sudo', 'sudo_user', - 'sudo_pass', 'when', 'connection', 'environment', 'args', - 'any_errors_fatal', 'changed_when', 'failed_when', 'always_run', 'delay', 'retries', 'until', - 'su', 'su_user', 'su_pass', 'no_log', 'run_once', - )) + VALID_KEYS = frozenset([ + 'async', 'connection', 'include', 'poll', + ] + _t_common) def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None): ''' constructor loads from a task or handler datastructure ''' @@ -131,14 +129,12 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No self.name = ds.get('name', None) self.tags = [ 'untagged' ] self.register = ds.get('register', None) - self.sudo = utils.boolean(ds.get('sudo', play.sudo)) - self.su = utils.boolean(ds.get('su', play.su)) self.environment = ds.get('environment', play.environment) self.role_name = role_name self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log self.run_once = utils.boolean(ds.get('run_once', 'false')) - #Code to allow do until feature in a Task + #Code to allow do until feature in a Task if 'until' in ds: if not ds.get('register'): raise errors.AnsibleError("register keyword is mandatory when using do until feature") @@ -160,24 +156,36 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No else: self.remote_user = ds.get('remote_user', play.playbook.remote_user) - self.sudo_user = None - self.sudo_pass = None - self.su_user = None - self.su_pass = None - - if self.sudo: - self.sudo_user = ds.get('sudo_user', play.sudo_user) - self.sudo_pass = ds.get('sudo_pass', play.playbook.sudo_pass) - elif self.su: - self.su_user = ds.get('su_user', play.su_user) - self.su_pass = ds.get('su_pass', play.playbook.su_pass) - - # Fail out if user specifies a sudo param with a su param in a given play - if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and \ - (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): - raise errors.AnsibleError('sudo params ("sudo", "sudo_user", "sudo_pass") ' - 'and su params "su", "su_user", "su_pass") ' - 'cannot be used together') + # Fail out if user specifies privilege escalation params in conflict + if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')): + raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) + + if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): + raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name) + + if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): + raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) + + self.become = utils.boolean(ds.get('become', play.become)) + self.become_method = ds.get('become_method', play.become_method) + self.become_user = ds.get('become_user', play.become_user) + self.become_pass = ds.get('become_pass', play.playbook.become_pass) + + # set only if passed in current task data + if 'sudo' in ds or 'sudo_user' in ds: + self.become=ds['sudo'] + self.become_method='sudo' + if 'sudo_user' in ds: + self.become_user = ds['sudo_user'] + if 'sudo_pass' in ds: + self.become_pass = ds['sudo_pass'] + if 'su' in ds or 'su_user' in ds: + self.become=ds['su'] + self.become_method='su' + if 'su_user' in ds: + self.become_user = ds['su_user'] + if 'su_pass' in ds: + self.become_pass = ds['su_pass'] # Both are defined if ('action' in ds) and ('local_action' in ds): diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index af1b674a0ee248..fea76f26ada266 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -123,7 +123,6 @@ def __init__(self, remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key remote_port=None, # if SSH on different ports private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords - sudo_pass=C.DEFAULT_SUDO_PASS, # ex: 'password123' or None background=0, # async poll every X seconds, else 0 for non-async basedir=None, # directory of playbook, if applicable setup_cache=None, # used to share fact data w/ other tasks @@ -131,8 +130,6 @@ def __init__(self, transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local' conditional='True', # run only if this fact expression evals to true callbacks=None, # used for output - sudo=False, # whether to run sudo or not - sudo_user=C.DEFAULT_SUDO_USER, # ex: 'root' module_vars=None, # a playbooks internals thing play_vars=None, # play_file_vars=None, # @@ -151,14 +148,15 @@ def __init__(self, accelerate=False, # use accelerated connection accelerate_ipv6=False, # accelerated connection w/ IPv6 accelerate_port=None, # port to use with accelerated connection - su=False, # Are we running our command via su? - su_user=None, # User to su to when running command, ex: 'root' - su_pass=C.DEFAULT_SU_PASS, vault_pass=None, run_hosts=None, # an optional list of pre-calculated hosts to run on no_log=False, # option to enable/disable logging for a given task run_once=False, # option to enable/disable host bypass loop for a given task - sudo_exe=C.DEFAULT_SUDO_EXE, # ex: /usr/local/bin/sudo + become=False, # whether to run privelege escalation or not + become_method=C.DEFAULT_BECOME_METHOD, + become_user=C.DEFAULT_BECOME_USER, # ex: 'root' + become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None + become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo ): # used to lock multiprocess inputs and outputs at various levels @@ -201,10 +199,12 @@ def __init__(self, self.remote_port = remote_port self.private_key_file = private_key_file self.background = background - self.sudo = sudo - self.sudo_user_var = sudo_user - self.sudo_user = None - self.sudo_pass = sudo_pass + self.become = become + self.become_method = become_method + self.become_user_var = become_user + self.become_user = None + self.become_pass = become_pass + self.become_exe = become_exe self.is_playbook = is_playbook self.environment = environment self.complex_args = complex_args @@ -213,15 +213,10 @@ def __init__(self, self.accelerate_port = accelerate_port self.accelerate_ipv6 = accelerate_ipv6 self.callbacks.runner = self - self.su = su - self.su_user_var = su_user - self.su_user = None - self.su_pass = su_pass self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest() self.vault_pass = vault_pass self.no_log = no_log self.run_once = run_once - self.sudo_exe = sudo_exe if self.transport == 'smart': # If the transport is 'smart', check to see if certain conditions @@ -369,7 +364,7 @@ def _compute_delegate(self, password, remote_inject): delegate['pass'] = this_info.get('ansible_ssh_pass', password) delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file) delegate['transport'] = this_info.get('ansible_connection', self.transport) - delegate['sudo_pass'] = this_info.get('ansible_sudo_pass', self.sudo_pass) + delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass)) # Last chance to get private_key_file from global variables. # this is useful if delegated host is not defined in the inventory @@ -481,13 +476,13 @@ def _execute_module(self, conn, tmp, module_name, args, or not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES - or self.su): + or self.become_method == 'su'): self._transfer_str(conn, tmp, module_name, module_data) environment_string = self._compute_environment_string(conn, inject) - if "tmp" in tmp and ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): - # deal with possible umask issues once sudo'ed to other user + if "tmp" in tmp and (self.become and self.become_user != 'root'): + # deal with possible umask issues once you become another user self._remote_chmod(conn, 'a+r', remote_module_path, tmp) cmd = "" @@ -514,8 +509,8 @@ def _execute_module(self, conn, tmp, module_name, args, else: argsfile = self._transfer_str(conn, tmp, 'arguments', args) - if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): - # deal with possible umask issues once sudo'ed to other user + if self.become and self.become_user != 'root': + # deal with possible umask issues once become another user self._remote_chmod(conn, 'a+r', argsfile, tmp) if async_jid is None: @@ -524,7 +519,7 @@ def _execute_module(self, conn, tmp, module_name, args, cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]]) else: if async_jid is None: - if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.su: + if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su': in_data = module_data else: cmd = "%s" % (remote_module_path) @@ -536,7 +531,7 @@ def _execute_module(self, conn, tmp, module_name, args, rm_tmp = None if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if not self.sudo or self.su or self.sudo_user == 'root' or self.su_user == 'root': + if not self.become or self.become_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp @@ -546,17 +541,14 @@ def _execute_module(self, conn, tmp, module_name, args, sudoable = True if module_name == "accelerate": # always run the accelerate module as the user - # specified in the play, not the sudo_user + # specified in the play, not the become_user sudoable = False - if self.su: - res = self._low_level_exec_command(conn, cmd, tmp, su=True, in_data=in_data) - else: - res = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, in_data=in_data) + res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data) if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): - # not sudoing to root, so maybe can't delete files as that other user + if self.become and self.become_user != 'root': + # not becoming root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = conn.shell.remove(tmp, recurse=True) self._low_level_exec_command(conn, cmd2, tmp, sudoable=False) @@ -849,11 +841,9 @@ def _safe_template_complex_args(args, inject): def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None): ''' decides how to invoke a module ''' - # late processing of parameterized sudo_user (with_items,..) - if self.sudo_user_var is not None: - self.sudo_user = template.template(self.basedir, self.sudo_user_var, inject) - if self.su_user_var is not None: - self.su_user = template.template(self.basedir, self.su_user_var, inject) + # late processing of parameterized become_user (with_items,..) + if self.become_user_var is not None: + self.become_user = template.template(self.basedir, self.become_user_var, inject) # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }}) module_name = template.template(self.basedir, module_name, inject) @@ -893,18 +883,16 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port, actual_transport = inject.get('ansible_connection', self.transport) actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True) - self.sudo = utils.boolean(inject.get('ansible_sudo', self.sudo)) - self.sudo_user = inject.get('ansible_sudo_user', self.sudo_user) - self.sudo_pass = inject.get('ansible_sudo_pass', self.sudo_pass) - self.su = inject.get('ansible_su', self.su) - self.su_pass = inject.get('ansible_su_pass', self.su_pass) - self.sudo_exe = inject.get('ansible_sudo_exe', self.sudo_exe) - - # select default root user in case self.sudo requested + self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become)))) + self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user))) + self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass))) + self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe)) + + # select default root user in case self.become requested # but no user specified; happens e.g. in host vars when - # just ansible_sudo=True is specified - if self.sudo and self.sudo_user is None: - self.sudo_user = 'root' + # just ansible_become=True is specified + if self.become and self.become_user is None: + self.become_user = 'root' if actual_private_key_file is not None: actual_private_key_file = os.path.expanduser(actual_private_key_file) @@ -937,7 +925,7 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port, actual_user = delegate['user'] actual_pass = delegate['pass'] actual_private_key_file = delegate['private_key_file'] - self.sudo_pass = delegate['sudo_pass'] + self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass')) inject = delegate['inject'] # set resolved delegate_to into inject so modules can call _remote_checksum inject['delegate_to'] = self.delegate_to @@ -945,7 +933,7 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port, # user/pass may still contain variables at this stage actual_user = template.template(self.basedir, actual_user, inject) actual_pass = template.template(self.basedir, actual_pass, inject) - self.sudo_pass = template.template(self.basedir, self.sudo_pass, inject) + self.become_pass = template.template(self.basedir, self.become_pass, inject) # make actual_user available as __magic__ ansible_ssh_user variable inject['ansible_ssh_user'] = actual_user @@ -1134,7 +1122,7 @@ def _late_needs_tmp_path(self, conn, tmp, module_style): if "tmp" in tmp: # tmp has already been created return False - if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.su: + if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su': # tmp is necessary to store module source code return True if not conn.has_pipelining: @@ -1150,62 +1138,54 @@ def _late_needs_tmp_path(self, conn, tmp, module_style): # ***************************************************** def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, - executable=None, su=False, in_data=None): + executable=None, become=False, in_data=None): ''' execute a command string over SSH, return the output ''' + # this can be skipped with powershell modules when there is no analog to a Windows command (like chmod) + if cmd: - if not cmd: - # this can happen with powershell modules when there is no analog to a Windows command (like chmod) - return dict(stdout='', stderr='') + if executable is None: + executable = C.DEFAULT_EXECUTABLE - if executable is None: - executable = C.DEFAULT_EXECUTABLE + become_user = self.become_user - sudo_user = self.sudo_user - su_user = self.su_user + # compare connection user to (su|sudo)_user and disable if the same + # assume connection type is local if no user attribute + this_user = getattr(conn, 'user', getpass.getuser()) + if (not become and this_user == become_user): + sudoable = False + become = False - # compare connection user to (su|sudo)_user and disable if the same - # assume connection type is local if no user attribute - this_user = getattr(conn, 'user', getpass.getuser()) - if (not su and this_user == sudo_user) or (su and this_user == su_user): - sudoable = False - su = False - - if su: - rc, stdin, stdout, stderr = conn.exec_command(cmd, - tmp, - su=su, - su_user=su_user, - executable=executable, - in_data=in_data) - else: rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, - sudo_user, + become_user=become_user, sudoable=sudoable, executable=executable, in_data=in_data) - if type(stdout) not in [ str, unicode ]: - out = ''.join(stdout.readlines()) - else: - out = stdout + if type(stdout) not in [ str, unicode ]: + out = ''.join(stdout.readlines()) + else: + out = stdout - if type(stderr) not in [ str, unicode ]: - err = ''.join(stderr.readlines()) - else: - err = stderr + if type(stderr) not in [ str, unicode ]: + err = ''.join(stderr.readlines()) + else: + err = stderr + + if rc is not None: + return dict(rc=rc, stdout=out, stderr=err) + else: + return dict(stdout=out, stderr=err) + + return dict(rc=None, stdout='', stderr='') - if rc is not None: - return dict(rc=rc, stdout=out, stderr=err) - else: - return dict(stdout=out, stderr=err) # ***************************************************** - def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, su=False): + def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False): ''' issue a remote chmod command ''' cmd = conn.shell.chmod(mode, path) - return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, su=su) + return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become) # ***************************************************** @@ -1217,13 +1197,11 @@ def _remote_expand_user(self, conn, path, tmp): split_path = path.split(os.path.sep, 1) expand_path = split_path[0] if expand_path == '~': - if self.sudo and self.sudo_user: - expand_path = '~%s' % self.sudo_user - elif self.su and self.su_user: - expand_path = '~%s' % self.su_user + if self.become and self.become_user: + expand_path = '~%s' % self.become_user cmd = conn.shell.expand_user(expand_path) - data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, su=False) + data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False) initial_fragment = utils.last_non_blank_line(data['stdout']) if not initial_fragment: @@ -1287,11 +1265,11 @@ def _make_tmp_path(self, conn): ''' make and return a temporary path on a remote box ''' basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) use_system_tmp = False - if (self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root'): + if self.become and self.become_user != 'root': use_system_tmp = True tmp_mode = None - if self.remote_user != 'root' or ((self.sudo and self.sudo_user != 'root') or (self.su and self.su_user != 'root')): + if self.remote_user != 'root' or (self.become and self.become_user != 'root'): tmp_mode = 'a+rx' cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode) diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py index 287e9348655dee..33a4838e322b6d 100644 --- a/lib/ansible/runner/action_plugins/assemble.py +++ b/lib/ansible/runner/action_plugins/assemble.py @@ -125,7 +125,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** xfered = self.runner._transfer_str(conn, tmp, 'src', resultant) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if self.runner.become and self.runner.become_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py index 9f6797a02aa57a..a6a5cb5a27b625 100644 --- a/lib/ansible/runner/action_plugins/copy.py +++ b/lib/ansible/runner/action_plugins/copy.py @@ -234,7 +234,7 @@ def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=Non self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw: + if self.runner.become and self.runner.become_user != 'root' and not raw: self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path) if raw: diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/runner/action_plugins/fetch.py index 94e930fdb3fded..27d2f6b3c63aed 100644 --- a/lib/ansible/runner/action_plugins/fetch.py +++ b/lib/ansible/runner/action_plugins/fetch.py @@ -78,7 +78,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** # use slurp if sudo and permissions are lacking remote_data = None - if remote_checksum in ('1', '2') or self.runner.sudo: + if remote_checksum in ('1', '2') or self.runner.become: slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject) if slurpres.is_successful(): if slurpres.result['encoding'] == 'base64': diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py index 8af5dabae8a358..dbba4c53dd7889 100644 --- a/lib/ansible/runner/action_plugins/patch.py +++ b/lib/ansible/runner/action_plugins/patch.py @@ -50,7 +50,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** tmp_src = tmp + src conn.put_file(src, tmp_src) - if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if self.runner.become and self.runner.become_user != 'root': if not self.runner.noop_on_check(inject): self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py index 1159428b4c1c2f..e4c5ec075f30ab 100644 --- a/lib/ansible/runner/action_plugins/script.py +++ b/lib/ansible/runner/action_plugins/script.py @@ -113,8 +113,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** sudoable = True # set file permissions, more permissive when the copy is done as a different user - if ((self.runner.sudo and self.runner.sudo_user != 'root') or - (self.runner.su and self.runner.su_user != 'root')): + if self.runner.become and self.runner.become_user != 'root': chmod_mode = 'a+rx' sudoable = False else: diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py index 8a8555a204c4b5..f8e57ae314e395 100644 --- a/lib/ansible/runner/action_plugins/synchronize.py +++ b/lib/ansible/runner/action_plugins/synchronize.py @@ -78,7 +78,7 @@ def setup(self, module_name, inject): # Store original transport and sudo values. self.original_transport = inject.get('ansible_connection', self.runner.transport) - self.original_sudo = self.runner.sudo + self.original_become = self.runner.become self.transport_overridden = False if inject.get('delegate_to') is None: @@ -87,7 +87,7 @@ def setup(self, module_name, inject): if self.original_transport != 'local': inject['ansible_connection'] = 'local' self.transport_overridden = True - self.runner.sudo = False + self.runner.become = False def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs): @@ -143,7 +143,7 @@ def run(self, conn, tmp, module_name, module_args, # use a delegate host instead of localhost use_delegate = True - # COMPARE DELEGATE, HOST AND TRANSPORT + # COMPARE DELEGATE, HOST AND TRANSPORT process_args = False if not dest_host is src_host and self.original_transport != 'local': # interpret and inject remote host info into src or dest @@ -160,7 +160,7 @@ def run(self, conn, tmp, module_name, module_args, if not use_delegate or not user: user = inject.get('ansible_ssh_user', self.runner.remote_user) - + if use_delegate: # FIXME private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file) @@ -172,7 +172,7 @@ def run(self, conn, tmp, module_name, module_args, if not private_key is None: private_key = os.path.expanduser(private_key) options['private_key'] = private_key - + # use the mode to define src and dest's url if options.get('mode', 'push') == 'pull': # src is a remote path: @, dest is a local path @@ -192,7 +192,7 @@ def run(self, conn, tmp, module_name, module_args, rsync_path = options.get('rsync_path', None) # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument. - if not rsync_path and self.transport_overridden and self.original_sudo and not dest_is_local: + if not rsync_path and self.transport_overridden and self.original_become and not dest_is_local and self.runner.become_method == 'sudo': rsync_path = 'sudo rsync' # make sure rsync path is quoted. @@ -206,8 +206,8 @@ def run(self, conn, tmp, module_name, module_args, # run the module and store the result result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject) - # reset the sudo property - self.runner.sudo = self.original_sudo + # reset the sudo property + self.runner.become = self.original_become return result diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index cceee020316684..e6e33d354f6bd8 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -133,7 +133,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if self.runner.become and self.runner.become_user != 'root' or self.runner.su and self.runner.su_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index 7cf61006c3f565..db94ac26e7d707 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -99,7 +99,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** # handle check mode client side # fix file permissions when the copy is done as a different user if copy: - if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if self.runner.become and self.runner.become_user != 'root': if not self.runner.noop_on_check(inject): self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) # Build temporary module_args. diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/lib/ansible/runner/action_plugins/win_copy.py index 28362195c965a6..a62dfb99857d15 100644 --- a/lib/ansible/runner/action_plugins/win_copy.py +++ b/lib/ansible/runner/action_plugins/win_copy.py @@ -230,7 +230,7 @@ def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=Non self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw: + if self.runner.become and self.runner.become_user != 'root' and not raw: self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path) if raw: diff --git a/lib/ansible/runner/action_plugins/win_template.py b/lib/ansible/runner/action_plugins/win_template.py index e32a5806c4b4c3..7bde4bd510e2f8 100644 --- a/lib/ansible/runner/action_plugins/win_template.py +++ b/lib/ansible/runner/action_plugins/win_template.py @@ -109,7 +109,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) # fix file permissions when the copy is done as a different user - if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if self.runner.become and self.runner.become_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/lib/ansible/runner/connection_plugins/accelerate.py index a31124e119f655..0627267c16b215 100644 --- a/lib/ansible/runner/connection_plugins/accelerate.py +++ b/lib/ansible/runner/connection_plugins/accelerate.py @@ -50,6 +50,7 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args, self.accport = port[1] self.is_connected = False self.has_pipelining = False + self.become_methods_supported=['sudo'] if not self.port: self.port = constants.DEFAULT_REMOTE_PORT @@ -226,11 +227,11 @@ def validate_user(self): else: return response.get('rc') == 0 - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' - if su or su_user: - raise AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") @@ -238,8 +239,8 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable if executable == "": executable = constants.DEFAULT_EXECUTABLE - if self.runner.sudo and sudoable and sudo_user: - cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) + if self.runner.become and sudoable: + cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe) vvv("EXEC COMMAND %s" % cmd) @@ -292,8 +293,8 @@ def put_file(self, in_path, out_path): if fd.tell() >= fstat.st_size: last = True data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last) - if self.runner.sudo: - data['user'] = self.runner.sudo_user + if self.runner.become: + data['user'] = self.runner.become_user data = utils.jsonify(data) data = utils.encrypt(self.key, data) diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/lib/ansible/runner/connection_plugins/chroot.py index 38c8af7a69096c..3e960472879603 100644 --- a/lib/ansible/runner/connection_plugins/chroot.py +++ b/lib/ansible/runner/connection_plugins/chroot.py @@ -24,6 +24,7 @@ from ansible import errors from ansible import utils from ansible.callbacks import vvv +import ansible.constants as C class Connection(object): ''' Local chroot based connections ''' @@ -31,6 +32,7 @@ class Connection(object): def __init__(self, runner, host, port, *args, **kwargs): self.chroot = host self.has_pipelining = False + self.become_methods_supported=C.BECOME_METHODS if os.geteuid() != 0: raise errors.AnsibleError("chroot connection requires running as root") @@ -60,16 +62,16 @@ def connect(self, port=None): return self - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the chroot ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter chroot as root so sudo stuff can be ignored + # We enter chroot as root so we ignore privlege escalation? if executable: local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] diff --git a/lib/ansible/runner/connection_plugins/fireball.py b/lib/ansible/runner/connection_plugins/fireball.py index dd9e09bacda6d6..562fc2eccf94da 100644 --- a/lib/ansible/runner/connection_plugins/fireball.py +++ b/lib/ansible/runner/connection_plugins/fireball.py @@ -53,6 +53,8 @@ def __init__(self, runner, host, port, *args, **kwargs): else: self.port = port + self.become_methods_supported=[] + def connect(self): ''' activates the connection object ''' @@ -64,11 +66,11 @@ def connect(self): socket = self.context.socket(zmq.REQ) addr = "tcp://%s:%s" % (self.host, self.port) socket.connect(addr) - self.socket = socket + self.socket = socket return self - def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=None): + def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' if in_data: @@ -76,7 +78,7 @@ def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bi vvv("EXEC COMMAND %s" % cmd) - if (self.runner.sudo and sudoable) or (self.runner.su and su): + if self.runner.become and sudoable: raise errors.AnsibleError( "When using fireball, do not specify sudo or su to run your tasks. " + "Instead sudo the fireball action with sudo. " + diff --git a/lib/ansible/runner/connection_plugins/funcd.py b/lib/ansible/runner/connection_plugins/funcd.py index 7244abcbe9a65b..92b7f53605baab 100644 --- a/lib/ansible/runner/connection_plugins/funcd.py +++ b/lib/ansible/runner/connection_plugins/funcd.py @@ -53,16 +53,14 @@ def connect(self, port=None): self.client = fc.Client(self.host) return self - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, - executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, + executable='/bin/sh', in_data=None): ''' run a command on the remote minion ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") - if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + # totally ignores privlege escalation vvv("EXEC %s" % (cmd), host=self.host) p = self.client.command.run(cmd)[self.host] return (p[0], '', p[1], p[2]) diff --git a/lib/ansible/runner/connection_plugins/jail.py b/lib/ansible/runner/connection_plugins/jail.py index b721ad62b50ab1..c7b61bc638cd4f 100644 --- a/lib/ansible/runner/connection_plugins/jail.py +++ b/lib/ansible/runner/connection_plugins/jail.py @@ -24,6 +24,7 @@ import subprocess from ansible import errors from ansible.callbacks import vvv +import ansible.constants as C class Connection(object): ''' Local chroot based connections ''' @@ -61,6 +62,7 @@ def __init__(self, runner, host, port, *args, **kwargs): self.runner = runner self.host = host self.has_pipelining = False + self.become_methods_supported=C.BECOME_METHODS if os.geteuid() != 0: raise errors.AnsibleError("jail connection requires running as root") @@ -91,16 +93,16 @@ def _generate_cmd(self, executable, cmd): local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd) return local_cmd - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the chroot ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter chroot as root so sudo stuff can be ignored + # Ignores privilege escalation local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.jail) diff --git a/lib/ansible/runner/connection_plugins/libvirt_lxc.py b/lib/ansible/runner/connection_plugins/libvirt_lxc.py index c6cf11f2667fb2..34cdb592b246b7 100644 --- a/lib/ansible/runner/connection_plugins/libvirt_lxc.py +++ b/lib/ansible/runner/connection_plugins/libvirt_lxc.py @@ -22,6 +22,7 @@ import subprocess from ansible import errors from ansible.callbacks import vvv +import ansible.constants as C class Connection(object): ''' Local lxc based connections ''' @@ -50,6 +51,7 @@ def __init__(self, runner, host, port, *args, **kwargs): self.host = host # port is unused, since this is local self.port = port + self.become_methods_supported=C.BECOME_METHODS def connect(self, port=None): ''' connect to the lxc; nothing to do here ''' @@ -65,16 +67,16 @@ def _generate_cmd(self, executable, cmd): local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd) return local_cmd - def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the chroot ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter lxc as root so sudo stuff can be ignored + # We ignore privelege escalation! local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.lxc) diff --git a/lib/ansible/runner/connection_plugins/local.py b/lib/ansible/runner/connection_plugins/local.py index e282076ee1efb2..beaeb1ae50e45f 100644 --- a/lib/ansible/runner/connection_plugins/local.py +++ b/lib/ansible/runner/connection_plugins/local.py @@ -26,6 +26,7 @@ from ansible import utils from ansible.callbacks import vvv + class Connection(object): ''' Local based connections ''' @@ -33,31 +34,34 @@ def __init__(self, runner, host, port, *args, **kwargs): self.runner = runner self.host = host # port is unused, since this is local - self.port = port + self.port = port self.has_pipelining = False + # TODO: add su(needs tty), pbrun, pfexec + self.become_methods_supported=['sudo'] + def connect(self, port=None): ''' connect to the local host; nothing to do here ''' return self - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the local host ''' # su requires to be run from a terminal, and therefore isn't supported here (yet?) - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - if not self.runner.sudo or not sudoable: + if self.runner.become and sudoable: + local_cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '-H', self.runner.become_exe) + else: if executable: local_cmd = executable.split() + ['-c', cmd] else: local_cmd = cmd - else: - local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) executable = executable.split()[0] if executable else None vvv("EXEC %s" % (local_cmd), host=self.host) @@ -66,13 +70,19 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if self.runner.sudo and sudoable and self.runner.sudo_pass: + if self.runner.become and sudoable and self.runner.become_pass: fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) - sudo_output = '' - while not sudo_output.endswith(prompt) and success_key not in sudo_output: + become_output = '' + while success_key not in become_output: + + if prompt and become_output.endswith(prompt): + break + if utils.su_prompts.check_su_prompt(become_output): + break + rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self.runner.timeout) if p.stdout in rfd: @@ -81,13 +91,13 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable chunk = p.stderr.read() else: stdout, stderr = p.communicate() - raise errors.AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output) + raise errors.AnsibleError('timeout waiting for %s password prompt:\n' % self.runner.become_method + become_output) if not chunk: stdout, stderr = p.communicate() - raise errors.AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output) - sudo_output += chunk - if success_key not in sudo_output: - p.stdin.write(self.runner.sudo_pass + '\n') + raise errors.AnsibleError('%s output closed while waiting for password prompt:\n' % self.runner.become_method + become_output) + become_output += chunk + if success_key not in become_output: + p.stdin.write(self.runner.become_pass + '\n') fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py index 4bb06e01c36147..2ba3d76d26a7aa 100644 --- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py +++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py @@ -125,6 +125,9 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args, self.private_key_file = private_key_file self.has_pipelining = False + # TODO: add pbrun, pfexec + self.become_methods_supported=['sudo', 'su', 'pbrun'] + def _cache_key(self): return "%s__%s__" % (self.host, self.user) @@ -184,9 +187,12 @@ def _connect_uncached(self): return ssh - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' + if self.runner.become and sudoable and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) + if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") @@ -206,7 +212,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable no_prompt_out = '' no_prompt_err = '' - if not (self.runner.sudo and sudoable) and not (self.runner.su and su): + if not (self.runner.become and sudoable): if executable: quoted_command = executable + ' -c ' + pipes.quote(cmd) @@ -224,50 +230,46 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0))) - if self.runner.sudo or sudoable: - shcmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) - elif self.runner.su or su: - shcmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd) + if self.runner.become and sudoable: + shcmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe) vvv("EXEC %s" % shcmd, host=self.host) - sudo_output = '' + become_output = '' try: chan.exec_command(shcmd) - if self.runner.sudo_pass or self.runner.su_pass: + if self.runner.become_pass: while True: - if success_key in sudo_output or \ - (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \ - (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)): + if success_key in become_output or \ + (prompt and become_output.endswith(prompt)) or \ + utils.su_prompts.check_su_prompt(become_output)): break chunk = chan.recv(bufsize) if not chunk: - if 'unknown user' in sudo_output: + if 'unknown user' in become_output: raise errors.AnsibleError( - 'user %s does not exist' % sudo_user) + 'user %s does not exist' % become_user) else: raise errors.AnsibleError('ssh connection ' + 'closed waiting for password prompt') - sudo_output += chunk + become_output += chunk - if success_key not in sudo_output: + if success_key not in become_output: if sudoable: - chan.sendall(self.runner.sudo_pass + '\n') - elif su: - chan.sendall(self.runner.su_pass + '\n') + chan.sendall(self.runner.become_pass + '\n') else: - no_prompt_out += sudo_output - no_prompt_err += sudo_output + no_prompt_out += become_output + no_prompt_err += become_output except socket.timeout: - raise errors.AnsibleError('ssh timed out waiting for sudo.\n' + sudo_output) + raise errors.AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output) stdout = ''.join(chan.makefile('rb', bufsize)) stderr = ''.join(chan.makefile_stderr('rb', bufsize)) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index b1743963c0f0aa..02b7f0b4072619 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -34,6 +34,7 @@ from ansible import errors from ansible import utils + class Connection(object): ''' ssh based connections ''' @@ -48,6 +49,9 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args, self.HASHED_KEY_MAGIC = "|1|" self.has_pipelining = True + # TODO: add pbrun, pfexec + self.become_methods_supported=['sudo', 'su', 'pbrun'] + fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700) fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) @@ -140,7 +144,7 @@ def _send_password(self): os.write(self.wfd, "%s\n" % self.password) os.close(self.wfd) - def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): + def _communicate(self, p, stdin, indata, sudoable=False, prompt=None): fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) # We can't use p.communicate here because the ControlMaster may have stdout open as well @@ -157,23 +161,20 @@ def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): while True: rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) - # fail early if the sudo/su password is wrong - if self.runner.sudo and sudoable: - if self.runner.sudo_pass: + # fail early if the become password is wrong + if self.runner.become and sudoable: + if self.runner.become_pass: incorrect_password = gettext.dgettext( - "sudo", "Sorry, try again.") + "Privilege Escalation", "Sorry, try again.") if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): - raise errors.AnsibleError('Incorrect sudo password') - - if stdout.endswith(prompt): - raise errors.AnsibleError('Missing sudo password') + raise errors.AnsibleError('Incorrect become password') - if self.runner.su and su and self.runner.su_pass: - incorrect_password = gettext.dgettext( - "su", "Sorry") - if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): - raise errors.AnsibleError('Incorrect su password') + if prompt: + if stdout.endswith(prompt): + raise errors.AnsibleError('Missing become password') + elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): + raise errors.AnsibleError('Incorrect becom password') if p.stdout in rfd: dat = os.read(p.stdout.fileno(), 9000) @@ -256,9 +257,12 @@ def not_in_host_file(self, host): vvv("EXEC previous known host file not found for %s" % host) return True - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) + ssh_cmd = self._password_cmd() ssh_cmd += ["ssh", "-C"] if not in_data: @@ -276,25 +280,22 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable ssh_cmd += ['-6'] ssh_cmd += [self.host] - if su and su_user: - sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd) - ssh_cmd.append(sudocmd) - elif not self.runner.sudo or not sudoable: + if self.runner.become and sudoable: + becomecmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe) + ssh_cmd.append(becomecmd) + else: prompt = None if executable: ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) else: ssh_cmd.append(cmd) - else: - sudocmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) - ssh_cmd.append(sudocmd) vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host) not_in_host_file = self.not_in_host_file(self.host) if C.HOST_KEY_CHECKING and not_in_host_file: - # lock around the initial SSH connectivity so the user prompt about whether to add + # lock around the initial SSH connectivity so the user prompt about whether to add # the host to known hosts is not intermingled with multiprocess output. fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) @@ -306,9 +307,8 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable no_prompt_out = '' no_prompt_err = '' - if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \ - (self.runner.su and su and self.runner.su_pass): - # several cases are handled for sudo privileges with password + if self.runner.become and sudoable and self.runner.become_pass: + # several cases are handled for escalated privileges with password # * NOPASSWD (tty & no-tty): detect success_key on stdout # * without NOPASSWD: # * detect prompt on stdout (tty) @@ -317,13 +317,14 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) - sudo_output = '' - sudo_errput = '' + become_output = '' + become_errput = '' - while True: - if success_key in sudo_output or \ - (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \ - (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)): + while success_key not in become_output: + + if prompt and become_output.endswith(prompt): + break + if utils.su_prompts.check_su_prompt(become_output): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], @@ -331,36 +332,34 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable if p.stderr in rfd: chunk = p.stderr.read() if not chunk: - raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt') - sudo_errput += chunk + raise errors.AnsibleError('ssh connection closed waiting for a privilege escalation password prompt') + become_errput += chunk incorrect_password = gettext.dgettext( - "sudo", "Sorry, try again.") - if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)): - raise errors.AnsibleError('Incorrect sudo password') - elif prompt and sudo_errput.endswith(prompt): - stdin.write(self.runner.sudo_pass + '\n') + "become", "Sorry, try again.") + if become_errput.strip().endswith("%s%s" % (prompt, incorrect_password)): + raise errors.AnsibleError('Incorrect become password') + elif prompt and become_errput.endswith(prompt): + stdin.write(self.runner.become_pass + '\n') if p.stdout in rfd: chunk = p.stdout.read() if not chunk: - raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt') - sudo_output += chunk + raise errors.AnsibleError('ssh connection closed waiting for %s password prompt' % self.runner.become_method) + become_output += chunk if not rfd: # timeout. wrap up process communication stdout = p.communicate() - raise errors.AnsibleError('ssh connection error waiting for sudo or su password prompt') + raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method) - if success_key not in sudo_output: + if success_key not in become_output: if sudoable: - stdin.write(self.runner.sudo_pass + '\n') - elif su: - stdin.write(self.runner.su_pass + '\n') + stdin.write(self.runner.become_pass + '\n') else: - no_prompt_out += sudo_output - no_prompt_err += sudo_errput + no_prompt_out += become_output + no_prompt_err += become_errput - (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt) + (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt) if C.HOST_KEY_CHECKING and not_in_host_file: # lock around the initial SSH connectivity so the user prompt about whether to add diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py index 93145e46968713..7a2d6d3318ddbb 100644 --- a/lib/ansible/runner/connection_plugins/winrm.py +++ b/lib/ansible/runner/connection_plugins/winrm.py @@ -72,6 +72,10 @@ def __init__(self, runner, host, port, user, password, *args, **kwargs): self.shell_id = None self.delegate = None + # Add runas support + #self.become_methods_supported=['runas'] + self.become_methods_supported=[] + def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. @@ -143,7 +147,11 @@ def connect(self): self.protocol = self._winrm_connect() return self - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None): + + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) + cmd = cmd.encode('utf-8') cmd_parts = shlex.split(cmd, posix=False) if '-EncodedCommand' in cmd_parts: diff --git a/lib/ansible/runner/connection_plugins/zone.py b/lib/ansible/runner/connection_plugins/zone.py index 16bef1a2134cd7..211bd0fbcc63f8 100644 --- a/lib/ansible/runner/connection_plugins/zone.py +++ b/lib/ansible/runner/connection_plugins/zone.py @@ -26,6 +26,7 @@ from subprocess import Popen,PIPE from ansible import errors from ansible.callbacks import vvv +import ansible.constants as C class Connection(object): ''' Local zone based connections ''' @@ -68,6 +69,7 @@ def __init__(self, runner, host, port, *args, **kwargs): self.runner = runner self.host = host self.has_pipelining = False + self.become_methods_supported=C.BECOME_METHODS if os.geteuid() != 0: raise errors.AnsibleError("zone connection requires running as root") @@ -98,17 +100,16 @@ def _generate_cmd(self, executable, cmd): local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd) return local_cmd - #def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None): ''' run a command on the zone ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter zone as root so sudo stuff can be ignored + # We happily ignore privelege escalation if executable == '/bin/sh': executable = None local_cmd = self._generate_cmd(executable, cmd) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 433c30db6a0b62..3745f0d43089f8 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -992,14 +992,12 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, default=constants.DEFAULT_HOST_LIST) parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) + parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user', + help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER) parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', help='ask for SSH password') - parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', + parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection') - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', - help='ask for su password') parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE, @@ -1025,22 +1023,35 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, help='log output to this directory') if runas_opts: - parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", - dest='sudo', help="run operations with sudo (nopasswd)") + # priv user defaults to root later on to enable detecting when this option was given here + parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password (deprecated, use become)') + parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + help='ask for su password (deprecated, use become)') + parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', + help="run operations with sudo (nopasswd) (deprecated, use become)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, - help='desired sudo user (default=root)') # Can't default to root because we need to detect when this option was given - parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, - dest='remote_user', help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER) + help='desired sudo user (default=root) (deprecated, use become)') + parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true', + help='run operations with su (deprecated, use become)') + parser.add_option('-R', '--su-user', default=None, + help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER) + + # consolidated privilege escalation (become) + parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become', + help="run operations with become (nopasswd implied)") + parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string', + help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS))) + parser.add_option('--become-user', default=None, dest='become_user', type='string', + help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER) + parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', + help='ask for privilege escalation password') - parser.add_option('-S', '--su', default=constants.DEFAULT_SU, - action='store_true', help='run operations with su') - parser.add_option('-R', '--su-user', help='run operations with su as this ' - 'user (default=%s)' % constants.DEFAULT_SU_USER) if connect_opts: parser.add_option('-c', '--connection', dest='connection', - default=C.DEFAULT_TRANSPORT, - help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) + default=constants.DEFAULT_TRANSPORT, + help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT) if async_opts: parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int', @@ -1059,7 +1070,6 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, help="when changing (small) files and templates, show the differences in those files; works great with --check" ) - return parser def parse_extra_vars(extra_vars_opts, vault_pass): @@ -1106,41 +1116,58 @@ def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_ return vault_pass, new_vault_pass -def ask_passwords(ask_pass=False, ask_sudo_pass=False, ask_su_pass=False, ask_vault_pass=False): +def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD): sshpass = None - sudopass = None - supass = None + becomepass = None vaultpass = None - sudo_prompt = "sudo password: " - su_prompt = "su password: " + become_prompt = '' if ask_pass: sshpass = getpass.getpass(prompt="SSH password: ") + become_prompt = "%s password[defaults to SSH password]: " % become_method.upper() if sshpass: sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') - sudo_prompt = "sudo password [defaults to SSH password]: " - su_prompt = "su password [defaults to SSH password]: " - - if ask_sudo_pass: - sudopass = getpass.getpass(prompt=sudo_prompt) - if ask_pass and sudopass == '': - sudopass = sshpass - if sudopass: - sudopass = to_bytes(sudopass, errors='strict', nonstring='simplerepr') - - if ask_su_pass: - supass = getpass.getpass(prompt=su_prompt) - if ask_pass and supass == '': - supass = sshpass - if supass: - supass = to_bytes(supass, errors='strict', nonstring='simplerepr') + else: + become_prompt = "%s password: " % become_method.upper() + + if become_ask_pass: + becomepass = getpass.getpass(prompt=become_prompt) + if ask_pass and becomepass == '': + becomepass = sshpass + if becomepass: + becomepass = to_bytes(becomepass) if ask_vault_pass: vaultpass = getpass.getpass(prompt="Vault password: ") if vaultpass: vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip() - return (sshpass, sudopass, supass, vaultpass) + return (sshpass, becomepass, vaultpass) + + +def choose_pass_prompt(options): + + if options.ask_su_pass: + return 'su' + elif options.ask_sudo_pass: + return 'sudo' + + return options.become_method + +def normalize_become_options(options): + + options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS + options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER + + if options.become: + pass + elif options.sudo: + options.become = True + options.become_method = 'sudo' + elif options.su: + options.become = True + options.become_method = 'su' + def do_encrypt(result, encrypt, salt_size=None, salt=None): if PASSLIB_AVAILABLE: @@ -1194,38 +1221,63 @@ def boolean(value): else: return False +def make_become_cmd(cmd, user, shell, method, flags=None, exe=None): + """ + helper function for connection plugins to create privilege escalation commands + """ + + randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) + success_key = 'BECOME-SUCCESS-%s' % randbits + prompt = None + becomecmd = None + + shell = shell or '$SHELL' + + if method == 'sudo': + # Rather than detect if sudo wants a password this time, -k makes sudo always ask for + # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) + # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted + # string to the user's shell. We loop reading output until we see the randomly-generated + # sudo prompt set with the -p option. + prompt = '[sudo via ansible, key=%s] password: ' % randbits + exe = exe or C.DEFAULT_SUDO_EXE + becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \ + (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, 'echo %s; %s' % (success_key, cmd)) + + elif method == 'su': + exe = exe or C.DEFAULT_SU_EXE + flags = flags or C.DEFAULT_SU_FLAGS + becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) + + elif method == 'pbrun': + exe = exe or 'pbrun' + flags = flags or '' + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, 'echo %s; %s' % (success_key,cmd)) + + elif method == 'pfexec': + exe = exe or 'pfexec' + flags = flags or '' + # No user as it uses it's own exec_attr to figure it out + becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd)) + + if becomecmd is None: + raise errors.AnsibleError("Privilege escalation method not found: %s" % method) + + return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key) + + def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd): """ helper function for connection plugins to create sudo commands """ - # Rather than detect if sudo wants a password this time, -k makes - # sudo always ask for a password if one is required. - # Passing a quoted compound command to sudo (or sudo -s) - # directly doesn't work, so we shellquote it with pipes.quote() - # and pass the quoted string to the user's shell. We loop reading - # output until we see the randomly-generated sudo prompt set with - # the -p option. - randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) - prompt = '[sudo via ansible, key=%s] password: ' % randbits - success_key = 'SUDO-SUCCESS-%s' % randbits - sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( - sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, - prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd))) - return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key) + return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe) def make_su_cmd(su_user, executable, cmd): """ Helper function for connection plugins to create direct su commands """ - # TODO: work on this function - randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) - success_key = 'SUDO-SUCCESS-%s' % randbits - sudocmd = '%s %s %s -c "%s -c %s"' % ( - C.DEFAULT_SU_EXE, C.DEFAULT_SU_FLAGS, su_user, executable or '$SHELL', - pipes.quote('echo %s; %s' % (success_key, cmd)) - ) - return ('/bin/sh -c ' + pipes.quote(sudocmd), None, success_key) + return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE) def get_diff(diff): # called by --diff usage in playbook and runner via callbacks @@ -1577,9 +1629,9 @@ def update_hash(hash, key, new_value): hash[key] = value def censor_unlogged_data(data): - ''' + ''' used when the no_log: True attribute is passed to a task to keep data from a callback. - NOT intended to prevent variable registration, but only things from showing up on + NOT intended to prevent variable registration, but only things from showing up on screen ''' new_data = {} @@ -1589,5 +1641,19 @@ def censor_unlogged_data(data): new_data['censored'] = 'results hidden due to no_log parameter' return new_data +def check_mutually_exclusive_privilege(options, parser): + + # privilege escalation command line arguments need to be mutually exclusive + if (options.su or options.su_user or options.ask_su_pass) and \ + (options.sudo or options.sudo_user or options.ask_sudo_pass) or \ + (options.su or options.su_user or options.ask_su_pass) and \ + (options.become or options.become_user or options.become_ask_pass) or \ + (options.sudo or options.sudo_user or options.ask_sudo_pass) and \ + (options.become or options.become_user or options.become_ask_pass): + + parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " + "and su arguments ('-su', '--su-user', and '--ask-su-pass') " + "and become arguments ('--become', '--become-user', and '--ask-become-pass')" + " are exclusive of each other") + - diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 47203194821b7e..54c905bdf6e413 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -3,6 +3,8 @@ roles: # In destructive because it creates and removes a user - { role: test_sudo, tags: test_sudo} + #- { role: test_su, tags: test_su} # wait till su support is added to local connection, needs tty + - { role: test_become, tags: test_become} - { role: test_service, tags: test_service } # Current pip unconditionally uses md5. We can re-enable if pip switches # to a different hash or allows us to not check md5 diff --git a/test/integration/roles/test_become/files/baz.txt b/test/integration/roles/test_become/files/baz.txt new file mode 100644 index 00000000000000..b8d834daa430c3 --- /dev/null +++ b/test/integration/roles/test_become/files/baz.txt @@ -0,0 +1 @@ +testing tilde expansion with become diff --git a/test/integration/roles/test_become/tasks/main.yml b/test/integration/roles/test_become/tasks/main.yml new file mode 100644 index 00000000000000..1b007596453085 --- /dev/null +++ b/test/integration/roles/test_become/tasks/main.yml @@ -0,0 +1,77 @@ +- include_vars: default.yml + +- name: Create test user + become: True + become_user: root + user: + name: "{{ become_test_user }}" + +- name: test becoming user + shell: whoami + become: True + become_user: "{{ become_test_user }}" + register: results + +- assert: + that: + - "results.stdout == '{{ become_test_user }}'" + +- name: tilde expansion honors become in file + become: True + become_user: "{{ become_test_user }}" + file: + path: "~/foo.txt" + state: touch + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ become_test_user }}/foo.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in template + become: True + become_user: "{{ become_test_user }}" + template: + src: "bar.j2" + dest: "~/bar.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ become_test_user }}/bar.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: tilde expansion honors become in copy + become: True + become_user: "{{ become_test_user }}" + copy: + src: baz.txt + dest: "~/baz.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ become_test_user }}/baz.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ become_test_user }}'" + +- name: Remove test user and their home dir + become: True + become_user: root + user: + name: "{{ become_test_user }}" + state: "absent" + remove: "yes" + diff --git a/test/integration/roles/test_become/templates/bar.j2 b/test/integration/roles/test_become/templates/bar.j2 new file mode 100644 index 00000000000000..7c5fe0ab49cd4b --- /dev/null +++ b/test/integration/roles/test_become/templates/bar.j2 @@ -0,0 +1 @@ +{{ become_test_user }} diff --git a/test/integration/roles/test_become/vars/default.yml b/test/integration/roles/test_become/vars/default.yml new file mode 100644 index 00000000000000..223d44ed24ea1b --- /dev/null +++ b/test/integration/roles/test_become/vars/default.yml @@ -0,0 +1 @@ +become_test_user: ansibletest1 diff --git a/test/integration/roles/test_su/files/baz.txt b/test/integration/roles/test_su/files/baz.txt new file mode 100644 index 00000000000000..7e677748a26718 --- /dev/null +++ b/test/integration/roles/test_su/files/baz.txt @@ -0,0 +1 @@ +testing tilde expansion with su diff --git a/test/integration/roles/test_su/tasks/main.yml b/test/integration/roles/test_su/tasks/main.yml new file mode 100644 index 00000000000000..65e9b2306f7ddd --- /dev/null +++ b/test/integration/roles/test_su/tasks/main.yml @@ -0,0 +1,75 @@ +- include_vars: default.yml + +- name: Create test user + su: True + user: + name: "{{ su_test_user }}" + +- name: test becoming user + shell: whoami + su: True + su_user: "{{ su_test_user }}" + register: results + +- assert: + that: + - "results.stdout == '{{ su_test_user }}'" + +- name: tilde expansion honors su in file + su: True + su_user: "{{ su_test_user }}" + file: + path: "~/foo.txt" + state: touch + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ su_test_user }}/foo.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ su_test_user }}'" + +- name: tilde expansion honors su in template + su: True + su_user: "{{ su_test_user }}" + template: + src: "bar.j2" + dest: "~/bar.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ su_test_user }}/bar.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ su_test_user }}'" + +- name: tilde expansion honors su in copy + su: True + su_user: "{{ su_test_user }}" + copy: + src: baz.txt + dest: "~/baz.txt" + +- name: check that the path in the user's home dir was created + stat: + path: "~{{ su_test_user }}/baz.txt" + register: results + +- assert: + that: + - "results.stat.exists == True" + - "results.stat.path|dirname|basename == '{{ su_test_user }}'" + +- name: Remove test user and their home dir + su: True + user: + name: "{{ su_test_user }}" + state: "absent" + remove: "yes" + diff --git a/test/integration/roles/test_su/templates/bar.j2 b/test/integration/roles/test_su/templates/bar.j2 new file mode 100644 index 00000000000000..0f420227e06d7f --- /dev/null +++ b/test/integration/roles/test_su/templates/bar.j2 @@ -0,0 +1 @@ +{{ su_test_user }} diff --git a/test/integration/roles/test_su/vars/default.yml b/test/integration/roles/test_su/vars/default.yml new file mode 100644 index 00000000000000..bb0da6b25d68f5 --- /dev/null +++ b/test/integration/roles/test_su/vars/default.yml @@ -0,0 +1 @@ +su_test_user: ansibletest1 diff --git a/test/integration/roles/test_sudo/tasks/main.yml b/test/integration/roles/test_sudo/tasks/main.yml index 022e7d742280df..372f175d294b4a 100644 --- a/test/integration/roles/test_sudo/tasks/main.yml +++ b/test/integration/roles/test_sudo/tasks/main.yml @@ -1,9 +1,20 @@ - include_vars: default.yml - name: Create test user + sudo: true user: name: "{{ sudo_test_user }}" +- name: test becoming user + shell: whoami + sudo: True + sudo_user: "{{ sudo_test_user }}" + register: results + +- assert: + that: + - "results.stdout == '{{ sudo_test_user }}'" + - name: tilde expansion honors sudo in file sudo: True sudo_user: "{{ sudo_test_user }}" @@ -56,6 +67,7 @@ - "results.stat.path|dirname|basename == '{{ sudo_test_user }}'" - name: Remove test user and their home dir + sudo: true user: name: "{{ sudo_test_user }}" state: "absent" diff --git a/test/units/TestPlayVarsFiles.py b/test/units/TestPlayVarsFiles.py index f241936a12efed..497c3112ede0d4 100644 --- a/test/units/TestPlayVarsFiles.py +++ b/test/units/TestPlayVarsFiles.py @@ -41,6 +41,9 @@ def __init__(self): self.sudo_user = None self.su = None self.su_user = None + self.become = None + self.become_method = None + self.become_user = None self.transport = None self.only_tags = None self.skip_tags = None diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py index be8a8af1293add..d8a85e20e76e95 100644 --- a/test/units/TestSynchronize.py +++ b/test/units/TestSynchronize.py @@ -18,6 +18,9 @@ def __init__(self): self.remote_user = None self.private_key_file = None self.check = False + self.become = False + self.become_method = False + self.become_user = False def _execute_module(self, conn, tmp, module_name, args, async_jid=None, async_module=None, async_limit=None, inject=None, @@ -76,7 +79,7 @@ def test_synchronize_action_sudo(self): """ verify the synchronize action plugin unsets and then sets sudo """ runner = FakeRunner() - runner.sudo = True + runner.become = True runner.remote_user = "root" runner.transport = "ssh" conn = FakeConn() @@ -97,7 +100,7 @@ def test_synchronize_action_sudo(self): assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', 'src':'/tmp/foo', 'rsync_path':'"sudo rsync"'}, "wrong args used" - assert runner.sudo == True, "sudo was not reset to True" + assert runner.become == True, "sudo was not reset to True" def test_synchronize_action_local(self): diff --git a/test/units/TestUtils.py b/test/units/TestUtils.py index 0ba1586cda6476..c0ca9ba5388ce8 100644 --- a/test/units/TestUtils.py +++ b/test/units/TestUtils.py @@ -498,7 +498,7 @@ def test_make_sudo_cmd(self): self.assertEqual(len(cmd), 3) self.assertTrue('-u root' in cmd[0]) self.assertTrue('-p "[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key')) - self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-')) + self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-')) self.assertTrue('sudo -k' in cmd[0]) def test_make_su_cmd(self): @@ -506,7 +506,7 @@ def test_make_su_cmd(self): self.assertTrue(isinstance(cmd, tuple)) self.assertEqual(len(cmd), 3) self.assertTrue('root -c "/bin/sh' in cmd[0] or ' root -c /bin/sh' in cmd[0]) - self.assertTrue('echo SUDO-SUCCESS-' in cmd[0] and cmd[2].startswith('SUDO-SUCCESS-')) + self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-')) def test_to_unicode(self): uni = ansible.utils.unicode.to_unicode(u'ansible') diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 1c2bc092b23cbc..78eeaf8c20c5d2 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -141,16 +141,16 @@ def shell_expand_path(path): DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() - -DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '/usr/share/ansible_plugins/action_plugins') -DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '/usr/share/ansible_plugins/cache_plugins') -DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '/usr/share/ansible_plugins/callback_plugins') -DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '/usr/share/ansible_plugins/connection_plugins') -DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '/usr/share/ansible_plugins/lookup_plugins') -DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '/usr/share/ansible_plugins/vars_plugins') -DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '/usr/share/ansible_plugins/filter_plugins') DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') +DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') +DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') +DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins') +DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') +DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') +DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') + CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') From 2d73892acfd1a895854fef37eaf798beeeefcdbb Mon Sep 17 00:00:00 2001 From: Shirou WAKAYAMA Date: Wed, 11 Mar 2015 14:50:27 +0900 Subject: [PATCH 0050/3617] use to_unicode() in _jinja2_vars if type is str. --- lib/ansible/utils/template.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 722e33e4c8fa2d..919436895a9a3a 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -33,7 +33,7 @@ import traceback from ansible.utils.string_functions import count_newlines_from_end -from ansible.utils import to_bytes +from ansible.utils import to_bytes, to_unicode class Globals(object): @@ -184,6 +184,8 @@ def __getitem__(self, varname): var = self.vars[varname] # HostVars is special, return it as-is, as is the special variable # 'vars', which contains the vars structure + if type(var) == str: + var = to_unicode(var) if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars): return var else: From f6d8e457abd78b760b04a2e07e9772b8040df616 Mon Sep 17 00:00:00 2001 From: Jeff Widman Date: Wed, 11 Mar 2015 01:20:17 -0700 Subject: [PATCH 0051/3617] Typo: lead --> led --- docsite/rst/intro_dynamic_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index ddb452e7756779..6734efca1905d4 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -24,7 +24,7 @@ For information about writing your own dynamic inventory source, see :doc:`devel Example: The Cobbler External Inventory Script `````````````````````````````````````````````` -It is expected that many Ansible users with a reasonable amount of physical hardware may also be `Cobbler `_ users. (note: Cobbler was originally written by Michael DeHaan and is now lead by James Cammarata, who also works for Ansible, Inc). +It is expected that many Ansible users with a reasonable amount of physical hardware may also be `Cobbler `_ users. (note: Cobbler was originally written by Michael DeHaan and is now led by James Cammarata, who also works for Ansible, Inc). While primarily used to kickoff OS installations and manage DHCP and DNS, Cobbler has a generic layer that allows it to represent data for multiple configuration management systems (even at the same time), and has From a5f533e25d986380f9b0bf661fc580f80d866167 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Mar 2015 09:30:07 -0400 Subject: [PATCH 0052/3617] fixed bad paren in connection plugin --- lib/ansible/runner/connection_plugins/paramiko_ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/runner/connection_plugins/paramiko_ssh.py index 2ba3d76d26a7aa..8eaf97c3f6d2ec 100644 --- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py +++ b/lib/ansible/runner/connection_plugins/paramiko_ssh.py @@ -246,7 +246,7 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab if success_key in become_output or \ (prompt and become_output.endswith(prompt)) or \ - utils.su_prompts.check_su_prompt(become_output)): + utils.su_prompts.check_su_prompt(become_output): break chunk = chan.recv(bufsize) From 1fd0a78b0e376d6ea4c8d3f1ad8ed68b9470cdfa Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Mar 2015 10:28:10 -0400 Subject: [PATCH 0053/3617] fix issue with ask pass signature --- bin/ansible-playbook | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 79cbc43d80a4ec..118a0198e4293f 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -121,7 +121,7 @@ def main(args): options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS if options.listhosts or options.syntax or options.listtasks or options.listtags: - (_, _, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass) + (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass) else: options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS # Never ask for an SSH password when we run with local connection From de5eae2007d4138730582e876770d3b24c863753 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Mar 2015 12:18:53 -0400 Subject: [PATCH 0054/3617] fixed traceback when x_user implicitly sets the become method Fixes #10430 Also removed redundant resolution of sudo/su for backwards compatibility which confused the conflict detection code. --- lib/ansible/playbook/play.py | 23 ----------------------- lib/ansible/playbook/task.py | 21 ++++++++++++++++++--- 2 files changed, 18 insertions(+), 26 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 74c6998b22f823..babc059e65f154 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -583,29 +583,6 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, bec included_become_vars[k] = become_vars[k] x[k] = become_vars[k] - ## backwards compat with old sudo/su directives - if 'sudo' in x or 'sudo_user' in x: - included_become_vars['become'] = x['sudo'] - x['become'] = x['sudo'] - x['become_method'] = 'sudo' - del x['sudo'] - - if x.get('sudo_user', False): - included_become_vars['become_user'] = x['sudo_user'] - x['become_user'] = x['sudo_user'] - del x['sudo_user'] - - elif 'su' in x or 'su_user' in x: - included_become_vars['become'] = x['su'] - x['become'] = x['su'] - x['become_method'] = 'su' - del x['su'] - - if x.get('su_user', False): - included_become_vars['become_user'] = x['su_user'] - x['become_user'] = x['su_user'] - del x['su_user'] - if 'meta' in x: if x['meta'] == 'flush_handlers': results.append(Task(self, x)) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index a43c2ab89d5872..77cb97e5c0fda1 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -173,19 +173,34 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No # set only if passed in current task data if 'sudo' in ds or 'sudo_user' in ds: - self.become=ds['sudo'] self.become_method='sudo' + + if 'sudo' in ds: + self.become=ds['sudo'] + del ds['sudo'] + else: + self.become=True if 'sudo_user' in ds: self.become_user = ds['sudo_user'] + del ds['sudo_user'] if 'sudo_pass' in ds: self.become_pass = ds['sudo_pass'] - if 'su' in ds or 'su_user' in ds: - self.become=ds['su'] + del ds['sudo_pass'] + + elif 'su' in ds or 'su_user' in ds: self.become_method='su' + + if 'su' in ds: + self.become=ds['su'] + else: + self.become=True + del ds['su'] if 'su_user' in ds: self.become_user = ds['su_user'] + del ds['su_user'] if 'su_pass' in ds: self.become_pass = ds['su_pass'] + del ds['su_pass'] # Both are defined if ('action' in ds) and ('local_action' in ds): From 747c7aaffa365a397435a05481719148b5ab772f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Mar 2015 12:33:05 -0400 Subject: [PATCH 0055/3617] removed uneeded reference to su_user --- lib/ansible/runner/action_plugins/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index e6e33d354f6bd8..a824a6e4b8e152 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -133,7 +133,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) # fix file permissions when the copy is done as a different user - if self.runner.become and self.runner.become_user != 'root' or self.runner.su and self.runner.su_user != 'root': + if self.runner.become and self.runner.become_user != 'root': self.runner._remote_chmod(conn, 'a+r', xfered, tmp) # run the copy module From 587ab17f10cbb1a24782ec404eccfe91cb3ae852 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Mar 2015 15:55:37 -0400 Subject: [PATCH 0056/3617] fixes password error detection for ssh connection plugin removes sycnronize test that does not work with current sudo setup Fixes #10434 --- lib/ansible/constants.py | 3 ++- lib/ansible/runner/connection_plugins/ssh.py | 13 ++++++------- test/units/TestSynchronize.py | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 1779b792fb3c42..20079863e7d636 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -137,7 +137,8 @@ def shell_expand_path(path): DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) #TODO: get rid of ternary chain mess -BECOME_METHODS = ['sudo','su','pbrun','runas','pfexec'] +BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] +BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True) DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root') diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 02b7f0b4072619..25a330dcef51a4 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -163,18 +163,17 @@ def _communicate(self, p, stdin, indata, sudoable=False, prompt=None): # fail early if the become password is wrong if self.runner.become and sudoable: - if self.runner.become_pass: - incorrect_password = gettext.dgettext( - "Privilege Escalation", "Sorry, try again.") - if stdout.endswith("%s\r\n%s" % (incorrect_password, - prompt)): - raise errors.AnsibleError('Incorrect become password') + incorrect_password = gettext.dgettext(self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method]) if prompt: + if self.runner.become_pass: + if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): + raise errors.AnsibleError('Incorrect become password') + if stdout.endswith(prompt): raise errors.AnsibleError('Missing become password') elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): - raise errors.AnsibleError('Incorrect becom password') + raise errors.AnsibleError('Incorrect become password') if p.stdout in rfd: dat = os.read(p.stdout.fileno(), 9000) diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py index d8a85e20e76e95..991f272001c7e2 100644 --- a/test/units/TestSynchronize.py +++ b/test/units/TestSynchronize.py @@ -97,9 +97,9 @@ def test_synchronize_action_sudo(self): x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject) assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" - assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', - 'src':'/tmp/foo', - 'rsync_path':'"sudo rsync"'}, "wrong args used" + #assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', + # 'src':'/tmp/foo', + # 'rsync_path':'"sudo rsync"'}, "wrong args used" assert runner.become == True, "sudo was not reset to True" From f803c1e1f824041bba2d1706e86d76a1551e2cf3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Mar 2015 16:28:37 -0400 Subject: [PATCH 0057/3617] fix tag test that broke with new tag info displayed in list tasks --- test/integration/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 4f2d4d9338dffc..ac526cf752ecbc 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -84,11 +84,11 @@ test_winrm: test_tags: # Run everything by default - [ "$$(ansible-playbook --list-tasks test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag Task_with_always_tag Task_without_tag" ] + [ "$$(ansible-playbook --list-tasks test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] # Run the exact tags, and always - [ "$$(ansible-playbook --list-tasks --tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag Task_with_always_tag" ] + [ "$$(ansible-playbook --list-tasks --tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_tag TAGS: [tag] Task_with_always_tag TAGS: [always]" ] # Skip one tag - [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag Task_without_tag" ] + [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] cloud: amazon rackspace From f229b770b2f016b4fc3acb1a6f6c620d96ba8e1c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 11 Mar 2015 19:23:02 -0400 Subject: [PATCH 0058/3617] fixed missed su to become conversion --- lib/ansible/runner/action_plugins/script.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py index e4c5ec075f30ab..1b1aadc7aadefb 100644 --- a/lib/ansible/runner/action_plugins/script.py +++ b/lib/ansible/runner/action_plugins/script.py @@ -118,7 +118,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** sudoable = False else: chmod_mode = '+rx' - self.runner._remote_chmod(conn, chmod_mode, tmp_src, tmp, sudoable=sudoable, su=self.runner.su) + self.runner._remote_chmod(conn, chmod_mode, tmp_src, tmp, sudoable=sudoable, become=self.runner.become) # add preparation steps to one ssh roundtrip executing the script env_string = self.runner._compute_environment_string(conn, inject) From 597c0f48f53067fa6bce785b86d934d903dd4d1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=BCrgen=20Hermann?= Date: Thu, 12 Mar 2015 02:28:33 +0100 Subject: [PATCH 0059/3617] Generic package_dir mapping in setup.py (closes #10437) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e855ea3bfaf84a..37527414067c4f 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ url='http://ansible.com/', license='GPLv3', install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'], - package_dir={ 'ansible': 'lib/ansible' }, + package_dir={ '': 'lib' }, packages=find_packages('lib'), package_data={ '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'], From e413dba3a64b27efaec2fd1b173104c65f406358 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Mar 2015 19:10:38 -0700 Subject: [PATCH 0060/3617] Update the module pointers --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index bd997b1066e1e9..31cc5f543f4166 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit bd997b1066e1e98a66cf98643c78adf8e080e4b4 +Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e60b2167f5ebfd..8baba98ebe5053 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e60b2167f5ebfd642fe04cb22805203764959f7c +Subproject commit 8baba98ebe5053e0c1e71881975ce8a1788f171c From d92e8edf6e7f7b9eff503f268d1c0d11c2ac44a8 Mon Sep 17 00:00:00 2001 From: Shirou WAKAYAMA Date: Thu, 12 Mar 2015 12:36:50 +0900 Subject: [PATCH 0061/3617] set 'nonstring' arg to passthru. --- lib/ansible/utils/template.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 919436895a9a3a..a58b93997157f6 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -184,8 +184,7 @@ def __getitem__(self, varname): var = self.vars[varname] # HostVars is special, return it as-is, as is the special variable # 'vars', which contains the vars structure - if type(var) == str: - var = to_unicode(var) + var = to_unicode(var, nonstring="passthru") if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars): return var else: From 4710a07fb0fad509dbdd546852961c6473276a61 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Mar 2015 20:58:21 -0700 Subject: [PATCH 0062/3617] Test case for #10426 --- test/integration/inventory | 1 + test/integration/unicode.yml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/test/integration/inventory b/test/integration/inventory index 72d80aabebd06a..bee36ce022eaa2 100644 --- a/test/integration/inventory +++ b/test/integration/inventory @@ -15,6 +15,7 @@ invenoverride ansible_ssh_host=127.0.0.1 ansible_connection=local [all:vars] extra_var_override=FROM_INVENTORY inven_var=inventory_var +unicode_host_var=CaféEñyei [inven_overridehosts:vars] foo=foo diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 4e7fe635f452cc..2889155055d647 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -38,6 +38,9 @@ - name: 'A task with unicode extra vars' debug: var=extra_var + - name: 'A task with unicode host vars' + debug: var=unicode_host_var + - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' From ee831e10712c41511b3d7a3d849a99a0e819773e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Mar 2015 21:28:45 -0700 Subject: [PATCH 0063/3617] Fix v2 for #10426 Note: In v1 we fix this by transforming into unicode just before we use it (when we send it to jinja2) because jinja2 cannot handle non-ascii characters in str. In v2 our model is that all text values need to be stored as unicode type internally. So we transform this to unicode when we read it from the inventory file and save it into the internal dict instead. --- v2/ansible/inventory/ini.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/v2/ansible/inventory/ini.py b/v2/ansible/inventory/ini.py index 075701c056c635..4236140ac88486 100644 --- a/v2/ansible/inventory/ini.py +++ b/v2/ansible/inventory/ini.py @@ -27,6 +27,7 @@ from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range +from ansible.utils.unicode import to_unicode class InventoryParser(object): """ @@ -53,7 +54,7 @@ def _parse(self): def _parse_value(v): if "#" not in v: try: - return ast.literal_eval(v) + v = ast.literal_eval(v) # Using explicit exceptions. # Likely a string that literal_eval does not like. We wil then just set it. except ValueError: @@ -62,7 +63,7 @@ def _parse_value(v): except SyntaxError: # Is this a hash with an equals at the end? pass - return v + return to_unicode(v, nonstring='passthru', errors='strict') # [webservers] # alpha From ac1493faae40c5d7fd91bf7cde0ac058d9f5c66f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Mar 2015 10:01:00 -0400 Subject: [PATCH 0064/3617] fixed missed conversion of su to become --- lib/ansible/runner/action_plugins/raw.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/raw.py b/lib/ansible/runner/action_plugins/raw.py index 548eafbf7069ae..b1ba2c99d94749 100644 --- a/lib/ansible/runner/action_plugins/raw.py +++ b/lib/ansible/runner/action_plugins/raw.py @@ -44,7 +44,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** module_args = r.sub("", module_args) result = self.runner._low_level_exec_command(conn, module_args, tmp, sudoable=True, executable=executable, - su=self.runner.su) + become=self.runner.become) # for some modules (script, raw), the sudo success key # may leak into the stdout due to the way the sudo/su # command is constructed, so we filter that out here From eb850bf81a99d1c5d695459ea25bfbf2fd9806e7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 12 Mar 2015 10:22:06 -0500 Subject: [PATCH 0065/3617] Fix issue with unarchive disabling pipelining mode Was using persist_files=True when specifying the create paramater, which breaks pipelining. Switched to use delete_remote_tmp=False instead, which is the proper way to preserve the remove tmp dir when running other modules from the action plugin. --- lib/ansible/runner/action_plugins/unarchive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py index db94ac26e7d707..312a2265c0f55a 100644 --- a/lib/ansible/runner/action_plugins/unarchive.py +++ b/lib/ansible/runner/action_plugins/unarchive.py @@ -62,7 +62,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** module_args_tmp = "" complex_args_tmp = dict(path=creates, get_md5=False, get_checksum=False) module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject, - complex_args=complex_args_tmp, persist_files=True) + complex_args=complex_args_tmp, delete_remote_tmp=False) stat = module_return.result.get('stat', None) if stat and stat.get('exists', False): return ReturnData( From b1d78a61fca18b95dbf1dfd6a32382ce546c0980 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 12 Mar 2015 12:14:57 -0500 Subject: [PATCH 0066/3617] Initial support for vault in v2 TODO: * password prompting needs to be implemented, but is being worked on as part of the become privilege escalation changes --- v2/ansible/plugins/strategies/linear.py | 13 +++++- v2/ansible/utils/vault.py | 56 +++++++++++++++++++++++++ v2/bin/ansible-playbook | 54 +++++------------------- 3 files changed, 79 insertions(+), 44 deletions(-) create mode 100644 v2/ansible/utils/vault.py diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py index f8c0e3bee8e25f..c6b9445b2e673b 100644 --- a/v2/ansible/plugins/strategies/linear.py +++ b/v2/ansible/plugins/strategies/linear.py @@ -224,6 +224,7 @@ def __eq__(self, other): def __repr__(self): return "%s (%s): %s" % (self._filename, self._args, self._hosts) + # FIXME: this should also be moved to the base class in a method included_files = [] for res in host_results: if res._task.action == 'include': @@ -253,6 +254,9 @@ def __repr__(self): inc_file.add_host(res._host) + # FIXME: should this be moved into the iterator class? Main downside would be + # that accessing the TQM's callback member would be more difficult, if + # we do want to send callbacks from here if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' @@ -263,7 +267,14 @@ def __repr__(self): for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step - new_tasks = self._load_included_file(included_file) + try: + new_tasks = self._load_included_file(included_file) + except AnsibleError, e: + for host in included_file._hosts: + iterator.mark_host_failed(host) + # FIXME: callback here? + print(e) + noop_tasks = [noop_task for t in new_tasks] for host in hosts_left: if host in included_file._hosts: diff --git a/v2/ansible/utils/vault.py b/v2/ansible/utils/vault.py new file mode 100644 index 00000000000000..04634aa377b498 --- /dev/null +++ b/v2/ansible/utils/vault.py @@ -0,0 +1,56 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import subprocess + +from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.utils.path import is_executable + +def read_vault_file(vault_password_file): + """ + Read a vault password from a file or if executable, execute the script and + retrieve password from STDOUT + """ + + this_path = os.path.realpath(os.path.expanduser(vault_password_file)) + if not os.path.exists(this_path): + raise AnsibleError("The vault password file %s was not found" % this_path) + + if is_executable(this_path): + try: + # STDERR not captured to make it easier for users to prompt for input in their scripts + p = subprocess.Popen(this_path, stdout=subprocess.PIPE) + except OSError, e: + raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) + stdout, stderr = p.communicate() + vault_pass = stdout.strip('\r\n') + else: + try: + f = open(this_path, "rb") + vault_pass=f.read().strip() + f.close() + except (OSError, IOError), e: + raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) + + return vault_pass + diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index eafccedcba52c6..bdd9598ec82174 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -15,6 +15,7 @@ from ansible.playbook.task import Task from ansible.utils.cli import base_parser from ansible.utils.unicode import to_unicode from ansible.utils.vars import combine_vars +from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager # Implement an ansible.utils.warning() function later @@ -34,8 +35,8 @@ def main(args): check_opts=True, diff_opts=True ) - #parser.add_option('--vault-password', dest="vault_password", - # help="password for vault encrypted files") + parser.add_option('--vault-password', dest="vault_password", + help="password for vault encrypted files") parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) parser.add_option('-t', '--tags', dest='tags', default='all', @@ -61,47 +62,14 @@ def main(args): parser.print_help(file=sys.stderr) return 1 - #--------------------------------------------------------------------------------------------------- - # FIXME: su/sudo stuff needs to be generalized - # su and sudo command line arguments need to be mutually exclusive - #if (options.su or options.su_user or options.ask_su_pass) and \ - # (options.sudo or options.sudo_user or options.ask_sudo_pass): - # parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - # "and su arguments ('-su', '--su-user', and '--ask-su-pass') are " - # "mutually exclusive") - # - #if (options.ask_vault_pass and options.vault_password_file): - # parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - # - #sshpass = None - #sudopass = None - #su_pass = None - #vault_pass = None - # - #options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - # - #if options.listhosts or options.syntax or options.listtasks: - # (_, _, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass) - #else: - # options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - # # Never ask for an SSH password when we run with local connection - # if options.connection == "local": - # options.ask_pass = False - # options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS - # options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS - # (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass) - # options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER - # options.su_user = options.su_user or C.DEFAULT_SU_USER - # - ## read vault_pass from a file - #if not options.ask_vault_pass and options.vault_password_file: - # vault_pass = utils.read_vault_file(options.vault_password_file) - # END FIXME - #--------------------------------------------------------------------------------------------------- - - # FIXME: this hard-coded value will be removed after fixing the removed block - # above, which dealt wtih asking for passwords during runtime - vault_pass = 'testing' + vault_pass = None + if options.ask_vault_pass: + # FIXME: prompt here + pass + elif options.vault_password_file: + # read vault_pass from a file + vault_pass = read_vault_file(options.vault_password_file) + loader = DataLoader(vault_password=vault_pass) extra_vars = {} From b5d23543f0e71ad16dd7926a37acf0c661fe7144 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Mar 2015 14:22:24 -0400 Subject: [PATCH 0067/3617] fixed and reintroduced syncronize test, fakerunner object needed become_method to be it's default 'sudo' --- test/units/TestSynchronize.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py index 991f272001c7e2..cf28ea5d809083 100644 --- a/test/units/TestSynchronize.py +++ b/test/units/TestSynchronize.py @@ -19,7 +19,7 @@ def __init__(self): self.private_key_file = None self.check = False self.become = False - self.become_method = False + self.become_method = 'sudo' self.become_user = False def _execute_module(self, conn, tmp, module_name, args, @@ -97,9 +97,9 @@ def test_synchronize_action_sudo(self): x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject) assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" - #assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', - # 'src':'/tmp/foo', - # 'rsync_path':'"sudo rsync"'}, "wrong args used" + assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', + 'src':'/tmp/foo', + 'rsync_path':'"sudo rsync"'}, "wrong args used" assert runner.become == True, "sudo was not reset to True" From 644e50fe34cc89e381b6edb12fb65130709bcfff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 12 Mar 2015 11:37:57 -0700 Subject: [PATCH 0068/3617] Hash randomization makes one of the heuristic_log_sanitize checks not work. Nothing we can do, when it sanitizes ssh_urls it's simply overzealous. --- test/units/TestModuleUtilsBasic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index 2ac77764d746a9..5fd3d6b1db462e 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -321,7 +321,7 @@ def test_log_sanitize_correctness(self): # the password we can tell some things about the beginning and end of # the data, though: self.assertTrue(ssh_output.startswith("{'")) - self.assertTrue(ssh_output.endswith("'}}}}")) + self.assertTrue(ssh_output.endswith("}")) try: self.assertIn(":********@foo.com/data',", ssh_output) except AttributeError: From 74bf59082df50dbf216caf0de633d63eee1bdcc7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Mar 2015 14:22:24 -0400 Subject: [PATCH 0069/3617] fixed and reintroduced syncronize test, fakerunner object needed become_method to be it's default 'sudo' --- test/units/TestSynchronize.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/units/TestSynchronize.py b/test/units/TestSynchronize.py index 991f272001c7e2..cf28ea5d809083 100644 --- a/test/units/TestSynchronize.py +++ b/test/units/TestSynchronize.py @@ -19,7 +19,7 @@ def __init__(self): self.private_key_file = None self.check = False self.become = False - self.become_method = False + self.become_method = 'sudo' self.become_user = False def _execute_module(self, conn, tmp, module_name, args, @@ -97,9 +97,9 @@ def test_synchronize_action_sudo(self): x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject) assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1" - #assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', - # 'src':'/tmp/foo', - # 'rsync_path':'"sudo rsync"'}, "wrong args used" + assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar', + 'src':'/tmp/foo', + 'rsync_path':'"sudo rsync"'}, "wrong args used" assert runner.become == True, "sudo was not reset to True" From 90886594faccc4a2bed6221172c1e7a74eaa55e5 Mon Sep 17 00:00:00 2001 From: jhermann Date: Thu, 12 Mar 2015 03:07:41 +0100 Subject: [PATCH 0070/3617] added test requirements for pip --- test-requirements.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 test-requirements.txt diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 00000000000000..714b65b7646146 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,7 @@ +# +# Test requirements +# + +nose +mock +passlib From f05cda6ffc214072afe3e54f280a7ead3ce5623e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 12 Mar 2015 13:20:20 -0700 Subject: [PATCH 0071/3617] Comma is also dependent on position within the hash --- test/units/TestModuleUtilsBasic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/units/TestModuleUtilsBasic.py b/test/units/TestModuleUtilsBasic.py index 5fd3d6b1db462e..5b8be283071951 100644 --- a/test/units/TestModuleUtilsBasic.py +++ b/test/units/TestModuleUtilsBasic.py @@ -323,10 +323,10 @@ def test_log_sanitize_correctness(self): self.assertTrue(ssh_output.startswith("{'")) self.assertTrue(ssh_output.endswith("}")) try: - self.assertIn(":********@foo.com/data',", ssh_output) + self.assertIn(":********@foo.com/data'", ssh_output) except AttributeError: # python2.6 or less's unittest - self.assertTrue(":********@foo.com/data'," in ssh_output, '%s is not present in %s' % (":********@foo.com/data',", ssh_output)) + self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output)) # The overzealous-ness here may lead to us changing the algorithm in # the future. We could make it consume less of the data (with the From 3d67e9e0c0df18e5c82e62fdb79820724dbe2577 Mon Sep 17 00:00:00 2001 From: James Laska Date: Tue, 10 Mar 2015 19:38:37 -0400 Subject: [PATCH 0072/3617] Add tox and travis-ci support Add tox integration to run unittests in supported python releases. Travis-CI is used for test execution. Additionally, the unittest TestQuotePgIdentifier was updated to support using assert_raises_regexp on python-2.6. Sample travis-ci output available at https://travis-ci.org/ansible/ansible/builds/54189977 --- .coveragerc | 4 ++++ .gitignore | 1 + .travis.yml | 11 +++++++++++ Makefile | 2 +- README.md | 4 +++- test-requirements.txt | 2 ++ tox.ini | 7 +++++++ 7 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 .coveragerc create mode 100644 .travis.yml create mode 100644 tox.ini diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000000000..812fc3b139483c --- /dev/null +++ b/.coveragerc @@ -0,0 +1,4 @@ +[report] +omit = + */python?.?/* + */site-packages/nose/* diff --git a/.gitignore b/.gitignore index 5fe1d994e3c43e..5d3970a168353d 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,7 @@ deb-build credentials.yml # test output .coverage +.tox results.xml coverage.xml /test/units/cover-html diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000000000..6e18e06050cd88 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +sudo: false +language: python +env: + - TOXENV=py26 + - TOXENV=py27 +install: + - pip install tox +script: + - tox +after_success: + - coveralls diff --git a/Makefile b/Makefile index f688bd73bf607b..81e24efab367d5 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,7 @@ NOSETESTS3 ?= nosetests-3.3 all: clean python tests: - PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v # Could do: --with-coverage --cover-package=ansible + PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v --with-coverage --cover-package=ansible --cover-branches newtests: PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches diff --git a/README.md b/README.md index 8bfe58a5433377..e052e78dcde29e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) [![PyPI downloads](https://pypip.in/d/ansible/badge.png)](https://pypi.python.org/pypi/ansible) +[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) +[![PyPI downloads](https://pypip.in/d/ansible/badge.png)](https://pypi.python.org/pypi/ansible) +[![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=tox_and_travis)](https://travis-ci.org/ansible/ansible) Ansible diff --git a/test-requirements.txt b/test-requirements.txt index 714b65b7646146..abb61ed1e97c1d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,3 +5,5 @@ nose mock passlib +coverage +coveralls diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000000000..7c86e7e08f1ff4 --- /dev/null +++ b/tox.ini @@ -0,0 +1,7 @@ +[tox] +envlist = py26,py27 + +[testenv] +deps = -r{toxinidir}/test-requirements.txt +whitelist_externals = make +commands = make tests From 60acdee0dc95a77f3f322689c75d9e5f965e71a1 Mon Sep 17 00:00:00 2001 From: James Laska Date: Thu, 12 Mar 2015 21:18:29 -0400 Subject: [PATCH 0073/3617] Enable assert_raises_regexp on py26 --- test/units/TestModuleUtilsDatabase.py | 49 +++++++++++++++++---------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/test/units/TestModuleUtilsDatabase.py b/test/units/TestModuleUtilsDatabase.py index 5278d6db5aab12..67da0b60e0bd03 100644 --- a/test/units/TestModuleUtilsDatabase.py +++ b/test/units/TestModuleUtilsDatabase.py @@ -1,8 +1,26 @@ import collections import mock import os - -from nose import tools +import re + +from nose.tools import eq_ +try: + from nose.tools import assert_raises_regexp +except ImportError: + # Python < 2.7 + def assert_raises_regexp(expected, regexp, callable, *a, **kw): + try: + callable(*a, **kw) + except expected as e: + if isinstance(regexp, basestring): + regexp = re.compile(regexp) + if not regexp.search(str(e)): + raise Exception('"%s" does not match "%s"' % + (regexp.pattern, str(e))) + else: + if hasattr(expected,'__name__'): excName = expected.__name__ + else: excName = str(expected) + raise AssertionError("%s not raised" % excName) from ansible.module_utils.database import ( pg_quote_identifier, @@ -70,34 +88,31 @@ class TestQuotePgIdentifier(object): } def check_valid_quotes(self, identifier, quoted_identifier): - tools.eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier) + eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier) def test_valid_quotes(self): for identifier in self.valid: yield self.check_valid_quotes, identifier, self.valid[identifier] def check_invalid_quotes(self, identifier, id_type, msg): - if hasattr(tools, 'assert_raises_regexp'): - tools.assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type)) - else: - tools.assert_raises(SQLParseError, pg_quote_identifier, *(identifier, id_type)) + assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type)) def test_invalid_quotes(self): for test in self.invalid: yield self.check_invalid_quotes, test[0], test[1], self.invalid[test] def test_how_many_dots(self): - tools.eq_(pg_quote_identifier('role', 'role'), '"role"') - tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role')) + eq_(pg_quote_identifier('role', 'role'), '"role"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role')) - tools.eq_(pg_quote_identifier('db', 'database'), '"db"') - tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database')) + eq_(pg_quote_identifier('db', 'database'), '"db"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database')) - tools.eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"') - tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema')) + eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema')) - tools.eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"') - tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table')) + eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table')) - tools.eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"') - tools.assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column')) + eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column')) From f451974efe1bc462a21652887c873eaf0c7c335c Mon Sep 17 00:00:00 2001 From: James Laska Date: Fri, 13 Mar 2015 10:56:30 -0400 Subject: [PATCH 0074/3617] Use correct URL for travis status badge This uses the `devel` branch when displaying the travis-ci status badge. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e052e78dcde29e..2a7d8e03af7181 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) [![PyPI downloads](https://pypip.in/d/ansible/badge.png)](https://pypi.python.org/pypi/ansible) -[![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=tox_and_travis)](https://travis-ci.org/ansible/ansible) +[![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=devel)](https://travis-ci.org/ansible/ansible) Ansible From 070c7c319ff6c2246c8df402a80370e656e99135 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 13 Mar 2015 11:57:27 -0500 Subject: [PATCH 0075/3617] Started implementing become in v2 --- v2/ansible/constants.py | 20 ++++- v2/ansible/executor/connection_info.py | 102 +++++++++++++++--------- v2/ansible/executor/play_iterator.py | 2 +- v2/ansible/executor/task_executor.py | 1 + v2/ansible/playbook/base.py | 5 ++ v2/ansible/playbook/become.py | 88 ++++++++++++++++++++ v2/ansible/playbook/block.py | 4 +- v2/ansible/playbook/play.py | 20 ++--- v2/ansible/playbook/playbook_include.py | 2 +- v2/ansible/playbook/role/definition.py | 2 +- v2/ansible/playbook/task.py | 12 +-- v2/ansible/plugins/action/__init__.py | 25 +++--- v2/ansible/plugins/connections/local.py | 6 +- v2/ansible/plugins/connections/ssh.py | 28 ++++--- v2/ansible/utils/cli.py | 9 +++ v2/samples/test_become.yml | 7 ++ 16 files changed, 238 insertions(+), 95 deletions(-) create mode 100644 v2/ansible/playbook/become.py create mode 100644 v2/samples/test_become.yml diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 78eeaf8c20c5d2..f2da07ffb02059 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -24,7 +24,6 @@ import sys from . compat import configparser - from string import ascii_letters, digits # copied from utils, avoid circular reference fun :) @@ -143,6 +142,19 @@ def shell_expand_path(path): DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +#TODO: get rid of ternary chain mess +BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] +BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} +DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True) +DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() +DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root') +DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True) +# need to rethink impementing these 2 +DEFAULT_BECOME_EXE = None +#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo') +#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H') + + DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') @@ -168,12 +180,15 @@ def shell_expand_path(path): COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) +RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) +RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') + # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) -# obsolete -- will be formally removed in 1.6 +# obsolete -- will be formally removed ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True) @@ -189,6 +204,7 @@ def shell_expand_path(path): DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things +DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None DEFAULT_SUBSET = None diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 7522ac210c2858..f2eaec630d440b 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -48,16 +48,16 @@ def __init__(self, play=None, options=None): self.password = '' self.port = 22 self.private_key_file = None - self.su = False - self.su_user = '' - self.su_pass = '' - self.sudo = False - self.sudo_user = '' - self.sudo_pass = '' self.verbosity = 0 self.only_tags = set() self.skip_tags = set() + # privilege escalation + self.become = False + self.become_method = C.DEFAULT_BECOME_METHOD + self.become_user = '' + self.become_pass = '' + self.no_log = False self.check_mode = False @@ -84,15 +84,13 @@ def set_play(self, play): if play.connection: self.connection = play.connection - self.remote_user = play.remote_user - self.password = '' - self.port = int(play.port) if play.port else 22 - self.su = play.su - self.su_user = play.su_user - self.su_pass = play.su_pass - self.sudo = play.sudo - self.sudo_user = play.sudo_user - self.sudo_pass = play.sudo_pass + self.remote_user = play.remote_user + self.password = '' + self.port = int(play.port) if play.port else 22 + self.become = play.become + self.become_method = play.become_method + self.become_user = play.become_user + self.become_pass = play.become_pass # non connection related self.no_log = play.no_log @@ -158,7 +156,7 @@ def set_task_override(self, task): new_info = ConnectionInformation() new_info.copy(self) - for attr in ('connection', 'remote_user', 'su', 'su_user', 'su_pass', 'sudo', 'sudo_user', 'sudo_pass', 'environment', 'no_log'): + for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'): if hasattr(task, attr): attr_val = getattr(task, attr) if attr_val: @@ -166,31 +164,58 @@ def set_task_override(self, task): return new_info - def make_sudo_cmd(self, sudo_exe, executable, cmd): + def make_become_cmd(self, cmd, shell, become_settings=None): + """ - Helper function for wrapping commands with sudo. - - Rather than detect if sudo wants a password this time, -k makes - sudo always ask for a password if one is required. Passing a quoted - compound command to sudo (or sudo -s) directly doesn't work, so we - shellquote it with pipes.quote() and pass the quoted string to the - user's shell. We loop reading output until we see the randomly- - generated sudo prompt set with the -p option. + helper function to create privilege escalation commands """ - randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) - prompt = '[sudo via ansible, key=%s] password: ' % randbits - success_key = 'SUDO-SUCCESS-%s' % randbits - - sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( - sudo_exe, sudo_exe, C.DEFAULT_SUDO_FLAGS, prompt, - self.sudo_user, executable or '$SHELL', - pipes.quote('echo %s; %s' % (success_key, cmd)) - ) - - # FIXME: old code, can probably be removed as it's been commented out for a while - #return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key) - return (sudocmd, prompt, success_key) + # FIXME: become settings should probably be stored in the connection info itself + if become_settings is None: + become_settings = {} + + randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) + success_key = 'BECOME-SUCCESS-%s' % randbits + prompt = None + becomecmd = None + + shell = shell or '$SHELL' + + if self.become_method == 'sudo': + # Rather than detect if sudo wants a password this time, -k makes sudo always ask for + # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) + # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted + # string to the user's shell. We loop reading output until we see the randomly-generated + # sudo prompt set with the -p option. + prompt = '[sudo via ansible, key=%s] password: ' % randbits + exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE) + flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS) + becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \ + (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, shell, 'echo %s; %s' % (success_key, cmd)) + + elif self.become_method == 'su': + exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE) + flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS) + becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) + + elif self.become_method == 'pbrun': + exe = become_settings.get('pbrun_exe', 'pbrun') + flags = become_settings.get('pbrun_flags', '') + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd)) + + elif self.become_method == 'pfexec': + exe = become_settings.get('pfexec_exe', 'pbrun') + flags = become_settings.get('pfexec_flags', '') + # No user as it uses it's own exec_attr to figure it out + becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd)) + elif self.become: + raise errors.AnsibleError("Privilege escalation method not found: %s" % method) + + return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key) + + def check_become_success(self, output, become_settings): + #TODO: implement + pass def _get_fields(self): return [i for i in self.__dict__.keys() if i[:1] != '_'] @@ -204,4 +229,3 @@ def post_validate(self, variables, loader): for field in self._get_fields(): value = templar.template(getattr(self, field)) setattr(self, field, value) - diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py index 0461fc87f2c594..4a149243d9118d 100644 --- a/v2/ansible/executor/play_iterator.py +++ b/v2/ansible/executor/play_iterator.py @@ -197,7 +197,7 @@ def mark_host_failed(self, host): self._host_states[host.name] = s def get_failed_hosts(self): - return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.failed_state != self.FAILED_NONE) + return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE) def get_original_task(self, host, task): ''' diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 012fb991949b54..bad47279a5e8f3 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -33,6 +33,7 @@ import json import time +import pipes class TaskExecutor: diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 691de0c9f0f85b..949e6a09fdc652 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -72,6 +72,11 @@ def _get_base_attributes(self): def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' + for base_class in self.__class__.__bases__: + method = getattr(self, ("_munge_%s" % base_class.__name__).lower(), None) + if method: + ds = method(ds) + return ds def load_data(self, ds, variable_manager=None, loader=None): diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py new file mode 100644 index 00000000000000..6ac1d2bad986ca --- /dev/null +++ b/v2/ansible/playbook/become.py @@ -0,0 +1,88 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.playbook.attribute import Attribute, FieldAttribute +#from ansible.utils.display import deprecated + +class Become: + + # Privlege escalation + _become = FieldAttribute(isa='bool', default=False) + _become_method = FieldAttribute(isa='string') + _become_user = FieldAttribute(isa='string') + _become_pass = FieldAttribute(isa='string') + + def __init__(self): + return super(Become, self).__init__() + + def _detect_privilege_escalation_conflict(self, ds): + + # Fail out if user specifies conflicting privelege escalations + has_become = 'become' in ds or 'become_user'in ds + has_sudo = 'sudo' in ds or 'sudo_user' in ds + has_su = 'su' in ds or 'su_user' in ds + + if has_become: + msg = 'The become params ("become", "become_user") and' + if has_sudo: + raise errors.AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg) + elif has_su: + raise errors.AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg) + elif has_sudo and has_su: + raise errors.AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together') + + def _munge_become(self, ds): + + self._detect_privilege_escalation_conflict(ds) + + # Setting user implies setting become/sudo/su to true + if 'become_user' in ds and not ds.get('become', False): + ds['become'] = True + + # Privilege escalation, backwards compatibility for sudo/su + if 'sudo' in ds or 'sudo_user' in ds: + ds['become_method'] = 'sudo' + if 'sudo' in ds: + ds['become'] = ds['sudo'] + del ds['sudo'] + else: + ds['become'] = True + if 'sudo_user' in ds: + ds['become_user'] = ds['sudo_user'] + del ds['sudo_user'] + + #deprecated("Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)") + + elif 'su' in ds or 'su_user' in ds: + ds['become_method'] = 'su' + if 'su' in ds: + ds['become'] = ds['su'] + del ds['su'] + else: + ds['become'] = True + if 'su_user' in ds: + ds['become_user'] = ds['su_user'] + del ds['su_user'] + + #deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)") + + return ds diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 533b552f22e0a9..49f65a15349452 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -21,6 +21,7 @@ from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base +#from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional from ansible.playbook.helpers import load_list_of_tasks from ansible.playbook.role import Role @@ -80,7 +81,8 @@ def munge(self, ds): return dict(block=ds) else: return dict(block=[ds]) - return ds + + return super(Block, self).munge(ds) def _load_block(self, attr, ds): return load_list_of_tasks( diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 29c9c04cc8e098..e9847fccd90640 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -23,6 +23,7 @@ from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base +from ansible.playbook.become import Become from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable @@ -33,7 +34,7 @@ __all__ = ['Play'] -class Play(Base, Taggable): +class Play(Base, Taggable, Become): """ A play is a language feature that represents a list of roles and/or @@ -47,21 +48,19 @@ class Play(Base, Taggable): # ================================================================================= # Connection-Related Attributes + + # TODO: generalize connection _accelerate = FieldAttribute(isa='bool', default=False) _accelerate_ipv6 = FieldAttribute(isa='bool', default=False) - _accelerate_port = FieldAttribute(isa='int', default=5099) + _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port + + # Connection _connection = FieldAttribute(isa='string', default='smart') _gather_facts = FieldAttribute(isa='string', default='smart') _hosts = FieldAttribute(isa='list', default=[], required=True) _name = FieldAttribute(isa='string', default='') _port = FieldAttribute(isa='int', default=22) _remote_user = FieldAttribute(isa='string', default='root') - _su = FieldAttribute(isa='bool', default=False) - _su_user = FieldAttribute(isa='string', default='root') - _su_pass = FieldAttribute(isa='string') - _sudo = FieldAttribute(isa='bool', default=False) - _sudo_user = FieldAttribute(isa='string', default='root') - _sudo_pass = FieldAttribute(isa='string') # Variable Attributes _vars = FieldAttribute(isa='dict', default=dict()) @@ -101,6 +100,7 @@ def get_name(self): @staticmethod def load(data, variable_manager=None, loader=None): p = Play() + print("in play load, become is: %s" % getattr(p, 'become')) return p.load_data(data, variable_manager=variable_manager, loader=loader) def munge(self, ds): @@ -122,7 +122,7 @@ def munge(self, ds): ds['remote_user'] = ds['user'] del ds['user'] - return ds + return super(Play, self).munge(ds) def _load_vars(self, attr, ds): ''' @@ -187,7 +187,7 @@ def _load_roles(self, attr, ds): roles.append(Role.load(ri)) return roles - # FIXME: post_validation needs to ensure that su/sudo are not both set + # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set def _compile_roles(self): ''' diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py index 159c3d25da8e58..e1d7f6be34f24b 100644 --- a/v2/ansible/playbook/playbook_include.py +++ b/v2/ansible/playbook/playbook_include.py @@ -98,7 +98,7 @@ def munge(self, ds): raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds) new_ds[k] = v - return new_ds + return super(PlaybookInclude, self).munge(new_ds) def _munge_include(self, ds, new_ds, k, v): ''' diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py index c9ec4259c17582..d52c6795fb92d4 100644 --- a/v2/ansible/playbook/role/definition.py +++ b/v2/ansible/playbook/role/definition.py @@ -88,7 +88,7 @@ def munge(self, ds): self._ds = ds # and return the cleaned-up data structure - return new_ds + return super(RoleDefinition, self).munge(new_ds) def _load_role_name(self, ds): ''' diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index e6fcc13d2591d5..79ec2df3401ad5 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -29,6 +29,7 @@ from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base +from ansible.playbook.become import Become from ansible.playbook.block import Block from ansible.playbook.conditional import Conditional from ansible.playbook.role import Role @@ -36,7 +37,7 @@ __all__ = ['Task'] -class Task(Base, Conditional, Taggable): +class Task(Base, Conditional, Taggable, Become): """ A task is a language feature that represents a call to a module, with given arguments and other parameters. @@ -86,12 +87,6 @@ class Task(Base, Conditional, Taggable): _remote_user = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=1) _run_once = FieldAttribute(isa='bool') - _su = FieldAttribute(isa='bool') - _su_pass = FieldAttribute(isa='string') - _su_user = FieldAttribute(isa='string') - _sudo = FieldAttribute(isa='bool') - _sudo_user = FieldAttribute(isa='string') - _sudo_pass = FieldAttribute(isa='string') _transport = FieldAttribute(isa='string') _until = FieldAttribute(isa='list') # ? _vars = FieldAttribute(isa='dict', default=dict()) @@ -172,6 +167,7 @@ def munge(self, ds): args_parser = ModuleArgsParser(task_ds=ds) (action, args, delegate_to) = args_parser.parse() + new_ds['action'] = action new_ds['args'] = args new_ds['delegate_to'] = delegate_to @@ -186,7 +182,7 @@ def munge(self, ds): else: new_ds[k] = v - return new_ds + return super(Task, self).munge(new_ds) def post_validate(self, all_vars=dict(), fail_on_undefined=True): ''' diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 1dc9d59aa0841f..46f25ec503c3e3 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -130,7 +130,7 @@ def _late_needs_tmp_path(self, tmp, module_style): if tmp and "tmp" in tmp: # tmp has already been created return False - if not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.su: + if not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become: # tmp is necessary to store module source code return True if not self._connection._has_pipelining: @@ -152,12 +152,11 @@ def _make_tmp_path(self): basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) use_system_tmp = False - if (self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root'): + if self._connection_info.become and self._connection_info.become_user != 'root': use_system_tmp = True tmp_mode = None - if self._connection_info.remote_user != 'root' or \ - ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root')): + if self._connection_info.remote_user != 'root' or self._connection_info.become and self._connection_info.become_user != 'root': tmp_mode = 'a+rx' cmd = self._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) @@ -291,10 +290,8 @@ def _remote_expand_user(self, path, tmp): split_path = path.split(os.path.sep, 1) expand_path = split_path[0] if expand_path == '~': - if self._connection_info.sudo and self._connection_info.sudo_user: - expand_path = '~%s' % self._connection_info.sudo_user - elif self._connection_info.su and self._connection_info.su_user: - expand_path = '~%s' % self._connection_info.su_user + if self._connection_info.become and self._connection_info.become_user: + expand_path = '~%s' % self._connection_info.become_user cmd = self._shell.expand_user(expand_path) debug("calling _low_level_execute_command to expand the remote user path") @@ -373,7 +370,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ environment_string = self._compute_environment_string() - if tmp and "tmp" in tmp and ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root')): + if tmp and "tmp" in tmp and self._connection_info.become and self._connection_info.become_user != 'root': # deal with possible umask issues once sudo'ed to other user self._remote_chmod(tmp, 'a+r', remote_module_path) @@ -391,7 +388,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ rm_tmp = None if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if not self._connection_info.sudo or self._connection_info.su or self._connection_info.sudo_user == 'root' or self._connection_info.su_user == 'root': + if not self._connection_info.become or self._connection_info.become_user == 'root': # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp @@ -409,7 +406,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ debug("_low_level_execute_command returned ok") if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: - if (self._connection_info.sudo and self._connection_info.sudo_user != 'root') or (self._connection_info.su and self._connection_info.su_user != 'root'): + if self._connection_info.become and self._connection_info.become_user != 'root': # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step cmd2 = self._shell.remove(tmp, recurse=True) @@ -457,11 +454,7 @@ def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, i success_key = None if sudoable: - if self._connection_info.su and self._connection_info.su_user: - cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd) - elif self._connection_info.sudo and self._connection_info.sudo_user: - # FIXME: hard-coded sudo_exe here - cmd, prompt, success_key = self._connection_info.make_sudo_cmd('/usr/bin/sudo', executable, cmd) + cmd, prompt, success_key = self._connection_info.make_become_cmd(executable, cmd) debug("executing the command %s through the connection" % cmd) rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data) diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py index 963c8c2d4ece57..d75ee70159eab3 100644 --- a/v2/ansible/plugins/connections/local.py +++ b/v2/ansible/plugins/connections/local.py @@ -44,8 +44,8 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): debug("in local.exec_command()") # su requires to be run from a terminal, and therefore isn't supported here (yet?) - if self._connection_info.su: - raise AnsibleError("Internal Error: this module does not support running commands via su") + #if self._connection_info.su: + # raise AnsibleError("Internal Error: this module does not support running commands via su") if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") @@ -57,7 +57,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): # else: # local_cmd = cmd #else: - # local_cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) + # local_cmd, prompt, success_key = utils.make_become_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) if executable: local_cmd = executable.split() + ['-c', cmd] else: diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index 6c0ab9917c1aff..e5b397f5659fc1 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -281,19 +281,19 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): # ssh_cmd += ['-6'] ssh_cmd += [self._connection_info.remote_addr] - if not (self._connection_info.sudo or self._connection_info.su): - prompt = None - if executable: - ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) - else: - ssh_cmd.append(cmd) - elif self._connection_info.su and self._connection_info.su_user: - su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd) - ssh_cmd.append(su_cmd) - else: - # FIXME: hard-coded sudo_exe here - sudo_cmd, prompt, success_key = self._connection_info.make_sudo_cmd('/usr/bin/sudo', executable, cmd) - ssh_cmd.append(sudo_cmd) + #if not (self._connection_info.sudo or self._connection_info.su): + # prompt = None + # if executable: + # ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) + # else: + # ssh_cmd.append(cmd) + #elif self._connection_info.su and self._connection_info.su_user: + # su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd) + # ssh_cmd.append(su_cmd) + #else: + # # FIXME: hard-coded sudo_exe here + # sudo_cmd, prompt, success_key = self._connection_info.make_become_cmd('/usr/bin/sudo', executable, cmd) + # ssh_cmd.append(sudo_cmd) self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._connection_info.remote_addr) @@ -369,6 +369,8 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): # no_prompt_err += sudo_errput #(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt) + # FIXME: the prompt won't be here anymore + prompt="" (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt) #if C.HOST_KEY_CHECKING and not_in_host_file: diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index 43aa21470d3c44..f846d6f73ca336 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -59,6 +59,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, help='ask for sudo password') parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', help='ask for su password') + parser.add_option('--ask-become-pass', default=False, dest='ask_become_pass', action='store_true', + help='ask for privlege escalation password') parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, @@ -84,6 +86,10 @@ def base_parser(usage="", output_opts=False, runas_opts=False, help='log output to this directory') if runas_opts: + parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", + dest='become', help="run operations with become (nopasswd implied)") + parser.add_option('-B', '--become-user', help='run operations with as this ' + 'user (default=%s)' % C.DEFAULT_BECOME_USER) parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, @@ -100,6 +106,9 @@ def base_parser(usage="", output_opts=False, runas_opts=False, parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) + parser.add_option('--become-method', dest='become_method', + default=C.DEFAULT_BECOME_METHOD, + help="privlege escalation method to use (default=%s)" % C.DEFAULT_BECOME_METHOD) if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml new file mode 100644 index 00000000000000..7e229af5de2639 --- /dev/null +++ b/v2/samples/test_become.yml @@ -0,0 +1,7 @@ +- hosts: all + gather_facts: no + tasks: + - command: whoami + become: yes + become_user: jamesc + become_method: su From 8e346186b2df34b976e7e268cc7446da3f6fac5b Mon Sep 17 00:00:00 2001 From: Michael Crilly Date: Fri, 13 Mar 2015 18:07:18 +0000 Subject: [PATCH 0076/3617] Correct version number. 1.8.4 is the latest stable now, I believe. --- docsite/rst/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index c8d263d01aecf3..1afa47db87d82c 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu Ansible manages machines in an agentless manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.8.2) and also some development version features (1.9). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.8.4) and also some development version features (1.9). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: From 70f56c135cbb14e4b7206594695e1623dcc1d6c8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 13 Mar 2015 11:44:58 -0700 Subject: [PATCH 0077/3617] Port #10357 to v2 --- v2/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index 6d602af7366eca..d18615857cc665 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -2288,7 +2288,7 @@ def get_virtual_facts(self): if os.path.exists('/proc/1/cgroup'): for line in get_file_lines('/proc/1/cgroup'): - if re.search('/docker/', line): + if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line): self.facts['virtualization_type'] = 'docker' self.facts['virtualization_role'] = 'guest' return From 22304afd1db399cfb94ae485486384ca3c9c0e33 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 13 Mar 2015 15:31:20 -0500 Subject: [PATCH 0078/3617] More fixing of become stuff in v2 --- v2/ansible/executor/connection_info.py | 72 +++++++++++++------------ v2/ansible/playbook/play.py | 1 - v2/ansible/plugins/action/__init__.py | 2 +- v2/ansible/plugins/connections/local.py | 19 ++----- v2/ansible/plugins/connections/ssh.py | 15 +----- v2/samples/test_become.yml | 3 +- 6 files changed, 44 insertions(+), 68 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index f2eaec630d440b..b918dc6b2144cf 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -164,7 +164,7 @@ def set_task_override(self, task): return new_info - def make_become_cmd(self, cmd, shell, become_settings=None): + def make_become_cmd(self, cmd, executable, become_settings=None): """ helper function to create privilege escalation commands @@ -179,39 +179,43 @@ def make_become_cmd(self, cmd, shell, become_settings=None): prompt = None becomecmd = None - shell = shell or '$SHELL' - - if self.become_method == 'sudo': - # Rather than detect if sudo wants a password this time, -k makes sudo always ask for - # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) - # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted - # string to the user's shell. We loop reading output until we see the randomly-generated - # sudo prompt set with the -p option. - prompt = '[sudo via ansible, key=%s] password: ' % randbits - exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE) - flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS) - becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \ - (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, shell, 'echo %s; %s' % (success_key, cmd)) - - elif self.become_method == 'su': - exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE) - flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS) - becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) - - elif self.become_method == 'pbrun': - exe = become_settings.get('pbrun_exe', 'pbrun') - flags = become_settings.get('pbrun_flags', '') - becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd)) - - elif self.become_method == 'pfexec': - exe = become_settings.get('pfexec_exe', 'pbrun') - flags = become_settings.get('pfexec_flags', '') - # No user as it uses it's own exec_attr to figure it out - becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd)) - elif self.become: - raise errors.AnsibleError("Privilege escalation method not found: %s" % method) - - return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key) + executable = executable or '$SHELL' + + if self.become: + if self.become_method == 'sudo': + # Rather than detect if sudo wants a password this time, -k makes sudo always ask for + # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) + # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted + # string to the user's shell. We loop reading output until we see the randomly-generated + # sudo prompt set with the -p option. + prompt = '[sudo via ansible, key=%s] password: ' % randbits + exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE) + flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS) + becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \ + (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, 'echo %s; %s' % (success_key, cmd)) + + elif self.become_method == 'su': + exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE) + flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS) + becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd))) + + elif self.become_method == 'pbrun': + exe = become_settings.get('pbrun_exe', 'pbrun') + flags = become_settings.get('pbrun_flags', '') + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd)) + + elif self.become_method == 'pfexec': + exe = become_settings.get('pfexec_exe', 'pbrun') + flags = become_settings.get('pfexec_flags', '') + # No user as it uses it's own exec_attr to figure it out + becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd)) + + else: + raise errors.AnsibleError("Privilege escalation method not found: %s" % method) + + return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key) + + return (cmd, "", "") def check_become_success(self, output, become_settings): #TODO: implement diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index e9847fccd90640..cbe4e038617a82 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -100,7 +100,6 @@ def get_name(self): @staticmethod def load(data, variable_manager=None, loader=None): p = Play() - print("in play load, become is: %s" % getattr(p, 'become')) return p.load_data(data, variable_manager=variable_manager, loader=loader) def munge(self, ds): diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 46f25ec503c3e3..d430bd748beb1f 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -454,7 +454,7 @@ def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, i success_key = None if sudoable: - cmd, prompt, success_key = self._connection_info.make_become_cmd(executable, cmd) + cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd, executable) debug("executing the command %s through the connection" % cmd) rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data) diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py index d75ee70159eab3..c847ee79d5d0ef 100644 --- a/v2/ansible/plugins/connections/local.py +++ b/v2/ansible/plugins/connections/local.py @@ -50,27 +50,14 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - # FIXME: su/sudo stuff needs to be generalized - #if not self.runner.sudo or not sudoable: - # if executable: - # local_cmd = executable.split() + ['-c', cmd] - # else: - # local_cmd = cmd - #else: - # local_cmd, prompt, success_key = utils.make_become_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) - if executable: - local_cmd = executable.split() + ['-c', cmd] - else: - local_cmd = cmd - executable = executable.split()[0] if executable else None - self._display.vvv("%s EXEC %s" % (self._connection_info.remote_addr, local_cmd)) + self._display.vvv("%s EXEC %s" % (self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook debug("opening command with Popen()") p = subprocess.Popen( - local_cmd, - shell=isinstance(local_cmd, basestring), + cmd, + shell=isinstance(cmd, basestring), executable=executable, #cwd=... stdin=subprocess.PIPE, stdout=subprocess.PIPE, diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index e5b397f5659fc1..e233a704f987a4 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -281,20 +281,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): # ssh_cmd += ['-6'] ssh_cmd += [self._connection_info.remote_addr] - #if not (self._connection_info.sudo or self._connection_info.su): - # prompt = None - # if executable: - # ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd)) - # else: - # ssh_cmd.append(cmd) - #elif self._connection_info.su and self._connection_info.su_user: - # su_cmd, prompt, success_key = self._connection_info.make_su_cmd(executable, cmd) - # ssh_cmd.append(su_cmd) - #else: - # # FIXME: hard-coded sudo_exe here - # sudo_cmd, prompt, success_key = self._connection_info.make_become_cmd('/usr/bin/sudo', executable, cmd) - # ssh_cmd.append(sudo_cmd) - + ssh_cmd.append(cmd) self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._connection_info.remote_addr) not_in_host_file = self.not_in_host_file(self._connection_info.remote_addr) diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml index 7e229af5de2639..8e753beade313b 100644 --- a/v2/samples/test_become.yml +++ b/v2/samples/test_become.yml @@ -2,6 +2,5 @@ gather_facts: no tasks: - command: whoami - become: yes - become_user: jamesc + become_user: testing become_method: su From 7813ffd719e73670e715e09f0e4256facf453002 Mon Sep 17 00:00:00 2001 From: Chris Blumentritt Date: Fri, 13 Mar 2015 15:35:31 -0500 Subject: [PATCH 0079/3617] Adding uptime_seconds fact for linux and darwin platforms Adds ansible_uptime_seconds facts for linux and darwin platforms. BSD platforms may also work. --- lib/ansible/module_utils/facts.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 424c388fb6ee2e..c1951925e4ba71 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -652,6 +652,7 @@ def populate(self): self.get_memory_facts() self.get_dmi_facts() self.get_device_facts() + self.get_uptime_facts() try: self.get_mount_facts() except TimeoutError: @@ -990,6 +991,9 @@ def get_device_facts(self): self.facts['devices'][diskname] = d + def get_uptime_facts(self): + uptime_seconds_string = get_file_content('/proc/uptime').split(' ')[0] + self.facts['uptime_seconds'] = int(float(uptime_seconds_string)) class SunOSHardware(Hardware): """ @@ -1588,6 +1592,7 @@ def populate(self): self.get_mac_facts() self.get_cpu_facts() self.get_memory_facts() + self.get_uptime_facts() return self.facts def get_sysctl(self): @@ -1635,6 +1640,12 @@ def get_memory_facts(self): if rc == 0: self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024 + def get_uptime_facts(self): + kern_boottime = self.sysctl['kern.boottime'] + boottime = datetime.datetime.strptime(kern_boottime, "%a %b %d %H:%M:%S %Y") + delta = datetime.datetime.now() - boottime + self.facts['uptime_seconds'] = int(delta.total_seconds()) + class Network(Facts): """ This is a generic Network subclass of Facts. This should be further From 731b268cd6c76572cd5d66c27be254ccdb2952c6 Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Sun, 15 Mar 2015 07:59:54 +0100 Subject: [PATCH 0080/3617] env-setup: Don't use ${.sh.file} if shell is pdksh The default ksh in OpenBSD throws the following error: === $ . hacking/env-setup ksh: hacking/env-setup[23]: ${.sh.file}": bad substitution [...] === The same error can be seen on Linux if pdksh is used. --- hacking/env-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/env-setup b/hacking/env-setup index 9b9a529d13a4e8..16baa9b1b75d25 100644 --- a/hacking/env-setup +++ b/hacking/env-setup @@ -16,7 +16,7 @@ if [ -n "$BASH_SOURCE" ] ; then HACKING_DIR=$(dirname "$BASH_SOURCE") elif [ $(basename -- "$0") = "env-setup" ]; then HACKING_DIR=$(dirname "$0") -elif [ -n "$KSH_VERSION" ]; then +elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then HACKING_DIR=$(dirname "${.sh.file}") else HACKING_DIR="$PWD/hacking" From fbff0449ce08fe2a57724e66938886e203173611 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Sun, 15 Mar 2015 12:20:34 +0000 Subject: [PATCH 0081/3617] fix for issue #10422. outputs informative error message when AWS credentials are not available --- plugins/inventory/ec2.py | 57 ++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 0f7c19857520d1..617463355f064f 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -334,23 +334,24 @@ def do_api_calls_update_cache(self): self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) + def connect(self, region): + ''' create connection to api server''' + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = ec2.connect_to_region(region) + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + raise Exception("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + return conn def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: - if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host) - conn.APIVersion = '2010-08-31' - else: - conn = ec2.connect_to_region(region) - - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) - + conn = self.connect(region) reservations = [] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.iteritems(): @@ -363,6 +364,9 @@ def get_instances_by_region(self, region): self.add_instance(instance, region) except boto.exception.BotoServerError, e: + if e.error_code == 'AuthFailure': + self.display_auth_error() + if not self.eucalyptus: print "Looks like AWS is down again:" print e @@ -379,23 +383,33 @@ def get_rds_instances_by_region(self, region): for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError, e: + if e.error_code == 'AuthFailure': + self.display_auth_error() + if not e.reason == "Forbidden": print "Looks like AWS RDS is down: " print e sys.exit(1) - def get_instance(self, region, instance_id): - ''' Gets details about a specific instance ''' - if self.eucalyptus: - conn = boto.connect_euca(self.eucalyptus_host) - conn.APIVersion = '2010-08-31' + def display_auth_error(self): + ''' Raise an error with an informative message if there is an issue authenticating''' + errors = ["Authentication error retrieving ec2 inventory."] + if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: + errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: - conn = ec2.connect_to_region(region) + errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - sys.exit(1) + boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] + boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) + if len(boto_config_found) > 0: + errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) + else: + errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) + + raise Exception('\n'.join(errors)) + + def get_instance(self, region, instance_id): + conn = self.connect(region) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: @@ -785,4 +799,3 @@ def json_format_dict(self, data, pretty=False): # Run the script Ec2Inventory() - From caf2a96ef9808436f00522b7792a3541301d90eb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 15 Mar 2015 13:22:07 -0700 Subject: [PATCH 0082/3617] Merge pdksh fix to v2 --- hacking/env-setup | 1 + v2/hacking/env-setup | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hacking/env-setup b/hacking/env-setup index 16baa9b1b75d25..f52c91a8b9cf8c 100644 --- a/hacking/env-setup +++ b/hacking/env-setup @@ -16,6 +16,7 @@ if [ -n "$BASH_SOURCE" ] ; then HACKING_DIR=$(dirname "$BASH_SOURCE") elif [ $(basename -- "$0") = "env-setup" ]; then HACKING_DIR=$(dirname "$0") +# Works with ksh93 but not pdksh elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then HACKING_DIR=$(dirname "${.sh.file}") else diff --git a/v2/hacking/env-setup b/v2/hacking/env-setup index fed8e892fdad52..c03fa0874e1ef7 100644 --- a/v2/hacking/env-setup +++ b/v2/hacking/env-setup @@ -16,7 +16,8 @@ if [ -n "$BASH_SOURCE" ] ; then HACKING_DIR=$(dirname "$BASH_SOURCE") elif [ $(basename -- "$0") = "env-setup" ]; then HACKING_DIR=$(dirname "$0") -elif [ -n "$KSH_VERSION" ]; then +# Works with ksh93 but not pdksh +elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then HACKING_DIR=$(dirname "${.sh.file}") else HACKING_DIR="$PWD/hacking" From 5eae4353573b35710980cf082c15251e765884ce Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Mar 2015 13:41:36 -0400 Subject: [PATCH 0083/3617] removed Darwin get_uptime_facts as it seems to crash on OS X, will waiy for a patch tested by someone that has access to the platform --- lib/ansible/module_utils/facts.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index c1951925e4ba71..93fe68786d80cf 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1592,7 +1592,6 @@ def populate(self): self.get_mac_facts() self.get_cpu_facts() self.get_memory_facts() - self.get_uptime_facts() return self.facts def get_sysctl(self): @@ -1640,11 +1639,6 @@ def get_memory_facts(self): if rc == 0: self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024 - def get_uptime_facts(self): - kern_boottime = self.sysctl['kern.boottime'] - boottime = datetime.datetime.strptime(kern_boottime, "%a %b %d %H:%M:%S %Y") - delta = datetime.datetime.now() - boottime - self.facts['uptime_seconds'] = int(delta.total_seconds()) class Network(Facts): """ From b783ea94bb83ab62d3351e68320b6b05a95ccb34 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Mar 2015 14:00:07 -0400 Subject: [PATCH 0084/3617] fixed raw return check for privilege escalation --- lib/ansible/runner/action_plugins/raw.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/action_plugins/raw.py b/lib/ansible/runner/action_plugins/raw.py index b1ba2c99d94749..e52296b2e78391 100644 --- a/lib/ansible/runner/action_plugins/raw.py +++ b/lib/ansible/runner/action_plugins/raw.py @@ -48,7 +48,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** # for some modules (script, raw), the sudo success key # may leak into the stdout due to the way the sudo/su # command is constructed, so we filter that out here - if result.get('stdout','').strip().startswith('SUDO-SUCCESS-'): - result['stdout'] = re.sub(r'^((\r)?\n)?SUDO-SUCCESS.*(\r)?\n', '', result['stdout']) + if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'): + result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout']) return ReturnData(conn=conn, result=result) From 1bf0e606466b158b539a4229906d3d4c9dcdfc5a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 16 Mar 2015 11:34:55 -0700 Subject: [PATCH 0085/3617] Update core module pointer --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 31cc5f543f4166..ceda82603a5c1d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940 +Subproject commit ceda82603a5c1d2c911a952440d0545fa011edf9 From 37ab61c542dd5758ef2668bbdfa163cadbcc6f24 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 16 Mar 2015 11:46:44 -0700 Subject: [PATCH 0086/3617] Update core pointer to make use of DOCKER_TLS_VERIFY env var: https://github.com/ansible/ansible-modules-core/issues/946 --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index ceda82603a5c1d..34c4e0d4959eea 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit ceda82603a5c1d2c911a952440d0545fa011edf9 +Subproject commit 34c4e0d4959eeaf5dc4d2b69d2bd435267e8ff91 From ada2567dfb912428b4b23f2de9e91ac6b2cbb4b3 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Mon, 16 Mar 2015 20:00:18 +0000 Subject: [PATCH 0087/3617] log errors and explicitly exit rather than raising exceptions --- plugins/inventory/ec2.py | 43 ++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 617463355f064f..5f7bd061d7210d 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -343,7 +343,7 @@ def connect(self, region): conn = ec2.connect_to_region(region) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: - raise Exception("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) return conn def get_instances_by_region(self, region): @@ -365,12 +365,11 @@ def get_instances_by_region(self, region): except boto.exception.BotoServerError, e: if e.error_code == 'AuthFailure': - self.display_auth_error() - - if not self.eucalyptus: - print "Looks like AWS is down again:" - print e - sys.exit(1) + error = self.get_auth_error_message() + else: + backend = 'Eucalyptus' if self.eucalyptus else 'AWS' + error = "Error connecting to %s backend.\n%s" % (backend, e.message) + self.fail_with_error(error) def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular @@ -384,15 +383,13 @@ def get_rds_instances_by_region(self, region): self.add_rds_instance(instance, region) except boto.exception.BotoServerError, e: if e.error_code == 'AuthFailure': - self.display_auth_error() - + error = self.get_auth_error_message() if not e.reason == "Forbidden": - print "Looks like AWS RDS is down: " - print e - sys.exit(1) + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error) - def display_auth_error(self): - ''' Raise an error with an informative message if there is an issue authenticating''' + def get_auth_error_message(self): + ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') @@ -406,7 +403,12 @@ def display_auth_error(self): else: errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) - raise Exception('\n'.join(errors)) + return '\n'.join(errors) + + def fail_with_error(self, err_msg): + '''log an error to std err for ansible-playbook to consume and exit''' + sys.stderr.write(err_msg) + sys.exit(1) def get_instance(self, region, instance_id): conn = self.connect(region) @@ -506,9 +508,8 @@ def add_instance(self, instance, region): if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: @@ -601,9 +602,9 @@ def add_rds_instance(self, instance, region): self.push_group(self.inventory, 'security_groups', key) except AttributeError: - print 'Package boto seems a bit older.' - print 'Please upgrade boto >= 2.3.0.' - sys.exit(1) + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + # Inventory: Group by engine if self.group_by_rds_engine: From a47c1326953c443f1dea723eee18d4ca83518237 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Mar 2015 19:08:34 -0400 Subject: [PATCH 0088/3617] slight changes to allow for checksum and other commands to work correctly with quoting --- lib/ansible/runner/connection_plugins/ssh.py | 18 ++++++++---------- lib/ansible/utils/__init__.py | 8 ++++---- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index 25a330dcef51a4..a7a57a01cf25f1 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -306,7 +306,7 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab no_prompt_out = '' no_prompt_err = '' - if self.runner.become and sudoable and self.runner.become_pass: + if sudoable and self.runner.become and self.runner.become_pass: # several cases are handled for escalated privileges with password # * NOPASSWD (tty & no-tty): detect success_key on stdout # * without NOPASSWD: @@ -319,11 +319,10 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab become_output = '' become_errput = '' - while success_key not in become_output: - - if prompt and become_output.endswith(prompt): - break - if utils.su_prompts.check_su_prompt(become_output): + while True: + if success_key in become_output or \ + (prompt and become_output.endswith(prompt)) or \ + utils.su_prompts.check_su_prompt(become_output): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], @@ -351,12 +350,11 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab stdout = p.communicate() raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method) - if success_key not in become_output: - if sudoable: - stdin.write(self.runner.become_pass + '\n') - else: + if success_key in become_output: no_prompt_out += become_output no_prompt_err += become_errput + elif sudoable: + stdin.write(self.runner.become_pass + '\n') (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 3745f0d43089f8..f164b25bd47cba 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1241,8 +1241,8 @@ def make_become_cmd(cmd, user, shell, method, flags=None, exe=None): # sudo prompt set with the -p option. prompt = '[sudo via ansible, key=%s] password: ' % randbits exe = exe or C.DEFAULT_SUDO_EXE - becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \ - (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, 'echo %s; %s' % (success_key, cmd)) + becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ + (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) elif method == 'su': exe = exe or C.DEFAULT_SU_EXE @@ -1252,13 +1252,13 @@ def make_become_cmd(cmd, user, shell, method, flags=None, exe=None): elif method == 'pbrun': exe = exe or 'pbrun' flags = flags or '' - becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, 'echo %s; %s' % (success_key,cmd)) + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd))) elif method == 'pfexec': exe = exe or 'pfexec' flags = flags or '' # No user as it uses it's own exec_attr to figure it out - becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd)) + becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd))) if becomecmd is None: raise errors.AnsibleError("Privilege escalation method not found: %s" % method) From bbdcba53da302d10effc57a8232188028060cd44 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Mar 2015 19:37:03 -0400 Subject: [PATCH 0089/3617] fixed bug on using su on play level not setting become method correctly --- lib/ansible/modules/core | 2 +- lib/ansible/playbook/play.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 34c4e0d4959eea..31cc5f543f4166 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 34c4e0d4959eeaf5dc4d2b69d2bd435267e8ff91 +Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940 diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ef097d04813093..edec30df758651 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -172,6 +172,7 @@ def __init__(self, playbook, ds, basedir, vault_password=None): elif 'su' in ds: self.become=True self.become=ds['su'] + self.become_method='su' if 'su_user' in ds: self.become_user=ds['su_user'] From b11be68249eec50a602c401ca31578598ea9dd1a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 16 Mar 2015 19:40:37 -0400 Subject: [PATCH 0090/3617] updated module ref --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 31cc5f543f4166..8658b82de7d279 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 31cc5f543f4166eddb334340fd559765dc6c3940 +Subproject commit 8658b82de7d279ea935c5d04db239fc300003090 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8baba98ebe5053..696bc60caad2ea 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8baba98ebe5053e0c1e71881975ce8a1788f171c +Subproject commit 696bc60caad2ea96c0a70c8091e24b2da060f35c From 316284c56b2f5eab18563a13694e98fc86b68894 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 17 Mar 2015 10:35:24 -0500 Subject: [PATCH 0091/3617] Making blocks support become, and cleaning up sudo/su references --- v2/ansible/executor/connection_info.py | 7 +++++-- v2/ansible/executor/task_executor.py | 2 +- v2/ansible/playbook/become.py | 9 +++++++++ v2/ansible/playbook/block.py | 24 +++++++++++++++++------- v2/ansible/plugins/action/assemble.py | 2 +- v2/ansible/plugins/action/copy.py | 2 +- v2/ansible/plugins/action/fetch.py | 2 +- v2/ansible/plugins/action/script.py | 3 +-- v2/ansible/plugins/action/template.py | 8 ++++---- v2/ansible/plugins/action/unarchive.py | 2 +- v2/samples/test_become.yml | 4 +++- 11 files changed, 44 insertions(+), 21 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index b918dc6b2144cf..0ae51e6b612e9f 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -157,10 +157,13 @@ def set_task_override(self, task): new_info.copy(self) for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'): + attr_val = None if hasattr(task, attr): attr_val = getattr(task, attr) - if attr_val: - setattr(new_info, attr, attr_val) + if task._block and hasattr(task._block, attr) and not attr_val: + attr_val = getattr(task._block, attr) + if attr_val: + setattr(new_info, attr, attr_val) return new_info diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index bad47279a5e8f3..7eaba0061ef29e 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -382,7 +382,7 @@ def _compute_delegate(self, variables): self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password) self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file) self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection) - self._connection_info.sudo_pass = this_info.get('ansible_sudo_pass', self._connection_info.sudo_pass) + self._connection_info.become_pass = this_info.get('ansible_sudo_pass', self._connection_info.become_pass) if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'): self._connection_info.connection = 'local' diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py index 6ac1d2bad986ca..0b0ad10176002e 100644 --- a/v2/ansible/playbook/become.py +++ b/v2/ansible/playbook/become.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError from ansible.playbook.attribute import Attribute, FieldAttribute #from ansible.utils.display import deprecated @@ -85,4 +86,12 @@ def _munge_become(self, ds): #deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)") + # if we are becoming someone else, but some fields are unset, + # make sure they're initialized to the default config values + if ds.get('become', False): + if ds.get('become_method', None) is None: + ds['become_method'] = C.DEFAULT_BECOME_METHOD + if ds.get('become_user', None) is None: + ds['become_user'] = C.DEFAULT_BECOME_USER + return ds diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 49f65a15349452..fa67b6ae1b99d8 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -21,13 +21,13 @@ from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base -#from ansible.playbook.become import Become +from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional from ansible.playbook.helpers import load_list_of_tasks from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable -class Block(Base, Conditional, Taggable): +class Block(Base, Become, Conditional, Taggable): _block = FieldAttribute(isa='list', default=[]) _rescue = FieldAttribute(isa='list', default=[]) @@ -71,16 +71,18 @@ def munge(self, ds): If a simple task is given, an implicit block for that single task is created, which goes in the main portion of the block ''' + is_block = False for attr in ('block', 'rescue', 'always'): if attr in ds: is_block = True break + if not is_block: if isinstance(ds, list): - return dict(block=ds) + return super(Block, self).munge(dict(block=ds)) else: - return dict(block=[ds]) + return super(Block, self).munge(dict(block=[ds])) return super(Block, self).munge(ds) @@ -166,7 +168,11 @@ def serialize(self): a task we don't want to include the attribute list of tasks. ''' - data = dict(when=self.when) + data = dict() + for attr in self._get_base_attributes(): + if attr not in ('block', 'rescue', 'always'): + data[attr] = getattr(self, attr) + data['dep_chain'] = self._dep_chain if self._role is not None: @@ -184,8 +190,12 @@ def deserialize(self, data): from ansible.playbook.task import Task - # unpack the when attribute, which is the only one we want - self.when = data.get('when') + # we don't want the full set of attributes (the task lists), as that + # would lead to a serialize/deserialize loop + for attr in self._get_base_attributes(): + if attr in data and attr not in ('block', 'rescue', 'always'): + setattr(self, attr, data.get(attr)) + self._dep_chain = data.get('dep_chain', []) # if there was a serialized role, unpack it too diff --git a/v2/ansible/plugins/action/assemble.py b/v2/ansible/plugins/action/assemble.py index 1ae8be02039141..b1bdc06c6d3873 100644 --- a/v2/ansible/plugins/action/assemble.py +++ b/v2/ansible/plugins/action/assemble.py @@ -117,7 +117,7 @@ def run(self, tmp=None, task_vars=dict()): xfered = self._transfer_data('src', resultant) # fix file permissions when the copy is done as a different user - if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root': + if self._connection_info.become and self._connection_info.become_user != 'root': self._remote_chmod('a+r', xfered, tmp) # run the copy module diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py index 46cb89550265f8..088a806b61b0ae 100644 --- a/v2/ansible/plugins/action/copy.py +++ b/v2/ansible/plugins/action/copy.py @@ -231,7 +231,7 @@ def run(self, tmp=None, task_vars=dict()): self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if (self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root') and not raw: + if (self._connection_info.become and self._connection_info.become_user != 'root': self._remote_chmod('a+r', tmp_src, tmp) if raw: diff --git a/v2/ansible/plugins/action/fetch.py b/v2/ansible/plugins/action/fetch.py index 9bd73136b48d3a..e63fd88ea5c3c4 100644 --- a/v2/ansible/plugins/action/fetch.py +++ b/v2/ansible/plugins/action/fetch.py @@ -57,7 +57,7 @@ def run(self, tmp=None, task_vars=dict()): # use slurp if sudo and permissions are lacking remote_data = None - if remote_checksum in ('1', '2') or self._connection_info.sudo: + if remote_checksum in ('1', '2') or self._connection_info.become: slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), tmp=tmp) if slurpres.get('rc') == 0: if slurpres['encoding'] == 'base64': diff --git a/v2/ansible/plugins/action/script.py b/v2/ansible/plugins/action/script.py index 6e8c1e1b9a4b45..21a9f41c59bfd1 100644 --- a/v2/ansible/plugins/action/script.py +++ b/v2/ansible/plugins/action/script.py @@ -74,8 +74,7 @@ def run(self, tmp=None, task_vars=None): sudoable = True # set file permissions, more permissive when the copy is done as a different user - if ((self._connection_info.sudo and self._connection_info.sudo_user != 'root') or - (self._connection_info.su and self._connection_info.su_user != 'root')): + if self._connection_info.become and self._connection_info.become_user != 'root': chmod_mode = 'a+rx' sudoable = False else: diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py index 372c07544d357b..1f7a6955a3220b 100644 --- a/v2/ansible/plugins/action/template.py +++ b/v2/ansible/plugins/action/template.py @@ -26,8 +26,6 @@ class ActionModule(ActionBase): TRANSFERS_FILES = True - - def get_checksum(self, tmp, dest, try_directory=False, source=None): remote_checksum = self._remote_checksum(tmp, dest) @@ -92,7 +90,9 @@ def run(self, tmp=None, task_vars=dict()): # Expand any user home dir specification dest = self._remote_expand_user(dest, tmp) + directory_prepended = False if dest.endswith("/"): # CCTODO: Fix path for Windows hosts. + directory_prepended = True base = os.path.basename(source) dest = os.path.join(dest, base) @@ -105,7 +105,7 @@ def run(self, tmp=None, task_vars=dict()): except Exception, e: return dict(failed=True, msg=type(e).__name__ + ": " + str(e)) - local_checksum = utils.checksum_s(resultant) + local_checksum = checksum_s(resultant) remote_checksum = self.get_checksum(tmp, dest, not directory_prepended, source=source) if isinstance(remote_checksum, dict): # Error from remote_checksum is a dict. Valid return is a str @@ -129,7 +129,7 @@ def run(self, tmp=None, task_vars=dict()): xfered = self._transfer_data(self._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user - if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root': + if self._connection_info.become and self._connection_info.become_user != 'root': self._remote_chmod('a+r', xfered, tmp) # run the copy module diff --git a/v2/ansible/plugins/action/unarchive.py b/v2/ansible/plugins/action/unarchive.py index fab0843e9fe751..f99d7e28e64e08 100644 --- a/v2/ansible/plugins/action/unarchive.py +++ b/v2/ansible/plugins/action/unarchive.py @@ -81,7 +81,7 @@ def run(self, tmp=None, task_vars=dict()): # handle check mode client side # fix file permissions when the copy is done as a different user if copy: - if self._connection_info.sudo and self._connection_info.sudo_user != 'root' or self._connection_info.su and self._connection_info.su_user != 'root': + if self._connection_info.become and self._connection_info.become_user != 'root': # FIXME: noop stuff needs to be reworked #if not self.runner.noop_on_check(task_vars): # self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml index 8e753beade313b..4b02563ca79257 100644 --- a/v2/samples/test_become.yml +++ b/v2/samples/test_become.yml @@ -3,4 +3,6 @@ tasks: - command: whoami become_user: testing - become_method: su + - block: + - command: whoami + become_user: testing From 3473a3bbece56e15a957fd5252d14e5775becb6b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 17 Mar 2015 10:50:41 -0500 Subject: [PATCH 0092/3617] Changes to become cmd formatting, per a47c132 --- v2/ansible/executor/connection_info.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 0ae51e6b612e9f..26a14a23f9d1d4 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -194,8 +194,8 @@ def make_become_cmd(self, cmd, executable, become_settings=None): prompt = '[sudo via ansible, key=%s] password: ' % randbits exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE) flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS) - becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c "%s"' % \ - (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, 'echo %s; %s' % (success_key, cmd)) + becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ + (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd))) elif self.become_method == 'su': exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE) @@ -205,13 +205,13 @@ def make_become_cmd(self, cmd, executable, become_settings=None): elif self.become_method == 'pbrun': exe = become_settings.get('pbrun_exe', 'pbrun') flags = become_settings.get('pbrun_flags', '') - becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, 'echo %s; %s' % (success_key,cmd)) + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key, cmd))) elif self.become_method == 'pfexec': exe = become_settings.get('pfexec_exe', 'pbrun') flags = become_settings.get('pfexec_flags', '') # No user as it uses it's own exec_attr to figure it out - becomecmd = '%s %s "%s"' % (exe, flags, 'echo %s; %s' % (success_key,cmd)) + becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key, cmd))) else: raise errors.AnsibleError("Privilege escalation method not found: %s" % method) From e42848e0fee906967d36e6606153a1cd0f920b2d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 16 Mar 2015 18:01:59 -0700 Subject: [PATCH 0093/3617] Better comment for why we have get_checksum call itself sometimes --- lib/ansible/runner/action_plugins/template.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py index a824a6e4b8e152..5c9be9e079d280 100644 --- a/lib/ansible/runner/action_plugins/template.py +++ b/lib/ansible/runner/action_plugins/template.py @@ -36,7 +36,12 @@ def get_checksum(self, conn, tmp, dest, inject, try_directory=False, source=None if remote_checksum in ('0', '2', '3', '4'): # Note: 1 means the file is not present which is fine; template # will create it. 3 means directory was specified instead of file + # which requires special handling if try_directory and remote_checksum == '3' and source: + # If the user specified a directory name as their dest then we + # have to check the checksum of dest/basename(src). This is + # the same behaviour as cp foo.txt /var/tmp/ so users expect + # it to work. base = os.path.basename(source) dest = os.path.join(dest, base) remote_checksum = self.get_checksum(conn, tmp, dest, inject, try_directory=False) From f9a66a7ff7836274e025b9681f526918b028736d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 11:03:15 -0700 Subject: [PATCH 0094/3617] Update core module pointer --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8658b82de7d279..ae253593e3a0e3 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8658b82de7d279ea935c5d04db239fc300003090 +Subproject commit ae253593e3a0e3339a136bf57e0a54e62229e8e6 From a64de2e000d9732a5689545c20b527a8ee950c1f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 12:32:07 -0700 Subject: [PATCH 0095/3617] Initial test of the docker module --- test/integration/destructive.yml | 1 + .../roles/test_docker/meta/main.yml | 20 +++++++ .../roles/test_docker/tasks/main.yml | 54 +++++++++++++++++++ 3 files changed, 75 insertions(+) create mode 100644 test/integration/roles/test_docker/meta/main.yml create mode 100644 test/integration/roles/test_docker/tasks/main.yml diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 54c905bdf6e413..b8f56d113bfb36 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -17,3 +17,4 @@ - { role: test_mysql_db, tags: test_mysql_db} - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} + - { role: test_docker, tags: test_docker} diff --git a/test/integration/roles/test_docker/meta/main.yml b/test/integration/roles/test_docker/meta/main.yml new file mode 100644 index 00000000000000..399f3fb6e77f51 --- /dev/null +++ b/test/integration/roles/test_docker/meta/main.yml @@ -0,0 +1,20 @@ +# test code for the service module +# (c) 2014, James Cammarata + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +dependencies: + - prepare_tests diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml new file mode 100644 index 00000000000000..6141fe05348444 --- /dev/null +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -0,0 +1,54 @@ +- name: Install docker packages (yum) + yum: + state: present + name: docker,docker-registry,python-docker-py + when: ansible_distribution in ['RedHat', 'CentOS', 'Fedora'] + +- name: Install docker packages (apt) + apt: + state: present + # Note: add docker-registry when available + name: docker.io,python-docker + when: ansible_distribution in ['Ubuntu', 'Debian'] + +- name: Start docker daemon + service: + name: docker + state: started + +- name: Download busybox image + docker: + image: busybox + state: present + pull: missing + +- name: Run a small script in busybox + docker: + image: busybox + state: reloaded + pull: always + command: "nc -l -p 2000 -e xargs -n1 echo hello" + detach: True + +- name: Get the docker container id + shell: "docker ps | grep busybox | awk '{ print $1 }'" + register: container_id + +- debug: var=container_id + +- name: Get the docker container ip + shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" + register: container_ip + +- debug: var=container_ip + +- name: Try to access the server + shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + register: docker_output + +- debug: var=docker_output + +- name: check that the script ran + assert: + that: + - "'hello world' in docker_output.stdout_lines" From 23291e8d8c0bf5f06303a62a4ba7a8c801bb53a8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 13:18:42 -0700 Subject: [PATCH 0096/3617] Ugh, looks like very few distros have the proper packages to run the docker module. break up the tests so that we can maybe run this on at least one platform --- .../test_docker/tasks/docker-setup-rht.yml | 4 ++ .../roles/test_docker/tasks/main.yml | 62 ++++--------------- 2 files changed, 15 insertions(+), 51 deletions(-) create mode 100644 test/integration/roles/test_docker/tasks/docker-setup-rht.yml diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml new file mode 100644 index 00000000000000..26373e4d3c7be8 --- /dev/null +++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml @@ -0,0 +1,4 @@ +- name: Install docker packages (yum) + yum: + state: present + name: docker,docker-registry,python-docker-py,nmap-ncat diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml index 6141fe05348444..d1cd7f4e593e8f 100644 --- a/test/integration/roles/test_docker/tasks/main.yml +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -1,54 +1,14 @@ -- name: Install docker packages (yum) - yum: - state: present - name: docker,docker-registry,python-docker-py - when: ansible_distribution in ['RedHat', 'CentOS', 'Fedora'] +- include: docker-setup-rht.yml + when: ansible_distribution in ['Fedora'] + # Packages on RHEL and CentOS are broken, broken, broken. Revisit when + # they've got that sorted out + #when: ansible_distribution in ['Fedora', 'RedHat', 'CentOS'] -- name: Install docker packages (apt) - apt: - state: present - # Note: add docker-registry when available - name: docker.io,python-docker - when: ansible_distribution in ['Ubuntu', 'Debian'] +# python-docker isn't available until 14.10. Revist at the next Ubuntu LTS +#- include: docker-setup-debian.yml +# when: ansible_distribution in ['Ubuntu'] -- name: Start docker daemon - service: - name: docker - state: started +- include: docker-tests.yml + # Add other distributions as the proper packages become available + when: ansible_distribution in ['Fedora'] -- name: Download busybox image - docker: - image: busybox - state: present - pull: missing - -- name: Run a small script in busybox - docker: - image: busybox - state: reloaded - pull: always - command: "nc -l -p 2000 -e xargs -n1 echo hello" - detach: True - -- name: Get the docker container id - shell: "docker ps | grep busybox | awk '{ print $1 }'" - register: container_id - -- debug: var=container_id - -- name: Get the docker container ip - shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" - register: container_ip - -- debug: var=container_ip - -- name: Try to access the server - shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" - register: docker_output - -- debug: var=docker_output - -- name: check that the script ran - assert: - that: - - "'hello world' in docker_output.stdout_lines" From f8ec1451eae9a0bf3003a5d047144a43d3dee9e0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 13:31:54 -0700 Subject: [PATCH 0097/3617] Would help if I added these files in the right directory --- .../test_docker/tasks/docker-setup-debian.yml | 6 +++ .../roles/test_docker/tasks/docker-tests.yml | 41 +++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 test/integration/roles/test_docker/tasks/docker-setup-debian.yml create mode 100644 test/integration/roles/test_docker/tasks/docker-tests.yml diff --git a/test/integration/roles/test_docker/tasks/docker-setup-debian.yml b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml new file mode 100644 index 00000000000000..01a67eee6bb8f1 --- /dev/null +++ b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml @@ -0,0 +1,6 @@ +- name: Install docker packages (apt) + apt: + state: present + # Note: add docker-registry when available + name: docker.io,python-docker,netcat-openbsd + diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml new file mode 100644 index 00000000000000..e3ce04a56c44cb --- /dev/null +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -0,0 +1,41 @@ +- name: Start docker daemon + service: + name: docker + state: started + +- name: Download busybox image + docker: + image: busybox + state: present + pull: missing + +- name: Run a small script in busybox + docker: + image: busybox + state: reloaded + pull: always + command: "nc -l -p 2000 -e xargs -n1 echo hello" + detach: True + +- name: Get the docker container id + shell: "docker ps | grep busybox | awk '{ print $1 }'" + register: container_id + +- debug: var=container_id + +- name: Get the docker container ip + shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" + register: container_ip + +- debug: var=container_ip + +- name: Try to access the server + shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + register: docker_output + +- debug: var=docker_output + +- name: check that the script ran + assert: + that: + - "'hello world' in docker_output.stdout_lines" From 85e137bbadaf7d72569e52c047f2f5fd28919deb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 13:52:44 -0700 Subject: [PATCH 0098/3617] Attempt to enable docker tests for rhel/centos6 as well --- .../roles/test_docker/tasks/docker-setup-rht.yml | 16 +++++++++++++++- .../integration/roles/test_docker/tasks/main.yml | 7 +++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml index 26373e4d3c7be8..d141bddc55efb0 100644 --- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml +++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml @@ -1,4 +1,18 @@ - name: Install docker packages (yum) yum: state: present - name: docker,docker-registry,python-docker-py,nmap-ncat + name: docker-io,docker-registry,python-docker-py + +- name: Install netcat + yum: + state: present + name: nmap-ncat + # RHEL7 as well... + when: ansible_distribution == 'Fedora' + +- name: Install netcat + yum: + state: present + name: nc + when: ansible_distribution != 'Fedora' + diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml index d1cd7f4e593e8f..d0abc5a9c6eac0 100644 --- a/test/integration/roles/test_docker/tasks/main.yml +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -1,8 +1,9 @@ - include: docker-setup-rht.yml when: ansible_distribution in ['Fedora'] - # Packages on RHEL and CentOS are broken, broken, broken. Revisit when +- include: docker-setup-rht.yml + # Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when # they've got that sorted out - #when: ansible_distribution in ['Fedora', 'RedHat', 'CentOS'] + when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 # python-docker isn't available until 14.10. Revist at the next Ubuntu LTS #- include: docker-setup-debian.yml @@ -12,3 +13,5 @@ # Add other distributions as the proper packages become available when: ansible_distribution in ['Fedora'] +- include: docker-tests.yml + when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 From 9e14471471ba20c11e5c81dd9dd8dc24fa83f169 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 14:14:26 -0700 Subject: [PATCH 0099/3617] And ran into a different problem with centos6. Sigh. --- test/integration/roles/test_docker/tasks/main.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml index d0abc5a9c6eac0..bdf252c42f6b3d 100644 --- a/test/integration/roles/test_docker/tasks/main.yml +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -1,9 +1,10 @@ - include: docker-setup-rht.yml when: ansible_distribution in ['Fedora'] -- include: docker-setup-rht.yml +#- include: docker-setup-rht.yml # Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when # they've got that sorted out - when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 + # CentOS 6 currently broken by conflicting files in pyhton-backports and python-backports-ssl_match_hostname + #when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 # python-docker isn't available until 14.10. Revist at the next Ubuntu LTS #- include: docker-setup-debian.yml From 4ce791fe84e1ba800ad57fb790455181f08a0687 Mon Sep 17 00:00:00 2001 From: Steve Gargan Date: Tue, 17 Mar 2015 21:25:45 +0000 Subject: [PATCH 0100/3617] avoid path issues by determining the path of ansible-pull and using its path to run ansible and ansible-playbook --- bin/ansible-pull | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/bin/ansible-pull b/bin/ansible-pull index a9a0897fbff821..d4887631e0fdfb 100755 --- a/bin/ansible-pull +++ b/bin/ansible-pull @@ -186,9 +186,12 @@ def main(args): if path is None: sys.stderr.write("module '%s' not found.\n" % options.module_name) return 1 - cmd = 'ansible localhost -i "%s" %s -m %s -a "%s"' % ( - inv_opts, base_opts, options.module_name, repo_opts + + bin_path = os.path.dirname(os.path.abspath(__file__)) + cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( + bin_path, inv_opts, base_opts, options.module_name, repo_opts ) + for ev in options.extra_vars: cmd += ' -e "%s"' % ev @@ -221,7 +224,7 @@ def main(args): print >>sys.stderr, "Could not find a playbook to run." return 1 - cmd = 'ansible-playbook %s %s' % (base_opts, playbook) + cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) if options.vault_password_file: cmd += " --vault-password-file=%s" % options.vault_password_file if options.inventory: From 2cfeec3683a3e6387c126b9975bf63eb5d5ce69a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 14:40:10 -0700 Subject: [PATCH 0101/3617] Add tests using a docker private registry --- .../roles/test_docker/tasks/main.yml | 9 ++- .../test_docker/tasks/registry-tests.yml | 62 +++++++++++++++++++ 2 files changed, 69 insertions(+), 2 deletions(-) create mode 100644 test/integration/roles/test_docker/tasks/registry-tests.yml diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml index bdf252c42f6b3d..2ea15644d5f847 100644 --- a/test/integration/roles/test_docker/tasks/main.yml +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -14,5 +14,10 @@ # Add other distributions as the proper packages become available when: ansible_distribution in ['Fedora'] -- include: docker-tests.yml - when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 +#- include: docker-tests.yml +# when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 + +- include: registry-tests.yml + # Add other distributions as the proper packages become available + when: ansible_distribution in ['Fedora'] + diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml new file mode 100644 index 00000000000000..52d840601975db --- /dev/null +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -0,0 +1,62 @@ +- name: Configure a private docker registry + service: + name: docker-registry + state: started + +- name: Get busybox image id + shell: "docker images | grep busybox | awk '{ print $3 }'" + register: image_id + +- name: Tag docker image into the local repository + shell: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine" + +- name: Push docker image into the local repository + shell: "docker push localhost:5000/mine" + +- name: Remove the busybox image from the local docker + shell: "docker rmi -f {{ image_id.stdout_lines[0] }}" + +- name: Remove the new image from the local docker + shell: "docker rmi -f localhost:5000/mine" + +- name: Get number of images in docker + shell: "docker images |wc -l" + register: docker_output + +- name: Check that there are no images in docker + assert: + that: + - "'1' in docker_output.stdout_lines" + +- name: Retrieve the image from private docker server + docker: + image: "localhost:5000/mine" + state: present + pull: missing + insecure_registry: True + +- name: Run a small script in the new image + docker: + image: "localhost:5000/mine" + state: reloaded + pull: always + command: "nc -l -p 2000 -e xargs -n1 echo hello" + detach: True + insecure_registry: True + +- name: Get the docker container id + shell: "docker ps | grep mine | awk '{ print $1 }'" + register: container_id + +- name: Get the docker container ip + shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" + register: container_ip + +- name: Try to access the server + shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + register: docker_output + +- name: check that the script ran + assert: + that: + - "'hello world' in docker_output.stdout_lines" From 259744d5f43f7bb36b9f707f02d074c03364740d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 17 Mar 2015 14:40:43 -0700 Subject: [PATCH 0102/3617] Remove debug statements --- test/integration/roles/test_docker/tasks/docker-tests.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml index e3ce04a56c44cb..11f2f9ac2c1927 100644 --- a/test/integration/roles/test_docker/tasks/docker-tests.yml +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -21,20 +21,14 @@ shell: "docker ps | grep busybox | awk '{ print $1 }'" register: container_id -- debug: var=container_id - - name: Get the docker container ip shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" register: container_ip -- debug: var=container_ip - - name: Try to access the server shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" register: docker_output -- debug: var=docker_output - - name: check that the script ran assert: that: From ba4e9a4c82e5543f2333f2eab1917c4c7ff3d8d4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 17 Mar 2015 18:23:40 -0400 Subject: [PATCH 0103/3617] added missing become method inventory override --- lib/ansible/runner/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 59e4d96924ffc4..5c5554816179f8 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -883,10 +883,12 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port, actual_transport = inject.get('ansible_connection', self.transport) actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file) actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True) + self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become)))) self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user))) self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass))) self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe)) + self.become_method = inject.get('ansible_become_method', self.become_method) # select default root user in case self.become requested # but no user specified; happens e.g. in host vars when From f4c1260d0359e5b5ad43477f36afabfd1c8c87e4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 12:15:42 -0700 Subject: [PATCH 0104/3617] Add more tests for private docker registries --- .../roles/test_docker/files/devdockerCA.crt | 23 ++++ .../roles/test_docker/files/devdockerCA.key | 27 +++++ .../roles/test_docker/files/devdockerCA.srl | 1 + .../files/docker-registry.htpasswd | 1 + .../files/dockertest.ansible.com.crt | 21 ++++ .../files/dockertest.ansible.com.csr | 17 +++ .../files/dockertest.ansible.com.key | 27 +++++ .../files/nginx-docker-registry.conf | 40 +++++++ .../test_docker/tasks/docker-setup-debian.yml | 2 +- .../test_docker/tasks/docker-setup-rht.yml | 2 +- .../roles/test_docker/tasks/docker-tests.yml | 31 +++++ .../test_docker/tasks/registry-tests.yml | 108 +++++++++++++++++- 12 files changed, 294 insertions(+), 6 deletions(-) create mode 100644 test/integration/roles/test_docker/files/devdockerCA.crt create mode 100644 test/integration/roles/test_docker/files/devdockerCA.key create mode 100644 test/integration/roles/test_docker/files/devdockerCA.srl create mode 100644 test/integration/roles/test_docker/files/docker-registry.htpasswd create mode 100644 test/integration/roles/test_docker/files/dockertest.ansible.com.crt create mode 100644 test/integration/roles/test_docker/files/dockertest.ansible.com.csr create mode 100644 test/integration/roles/test_docker/files/dockertest.ansible.com.key create mode 100644 test/integration/roles/test_docker/files/nginx-docker-registry.conf diff --git a/test/integration/roles/test_docker/files/devdockerCA.crt b/test/integration/roles/test_docker/files/devdockerCA.crt new file mode 100644 index 00000000000000..14f1b2f7ee6180 --- /dev/null +++ b/test/integration/roles/test_docker/files/devdockerCA.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIJAPczDjnFOjH/MA0GCSqGSIb3DQEBCwUAMIGEMQswCQYD +VQQGEwJVUzELMAkGA1UECAwCTkMxDzANBgNVBAcMBkR1cmhhbTEQMA4GA1UECgwH +QW5zaWJsZTEfMB0GA1UEAwwWZG9ja2VydGVzdC5hbnNpYmxlLmNvbTEkMCIGCSqG +SIb3DQEJARYVdGt1cmF0b21pQGFuc2libGUuY29tMB4XDTE1MDMxNzIyMjc1OVoX +DTQyMDgwMjIyMjc1OVowgYQxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOQzEPMA0G +A1UEBwwGRHVyaGFtMRAwDgYDVQQKDAdBbnNpYmxlMR8wHQYDVQQDDBZkb2NrZXJ0 +ZXN0LmFuc2libGUuY29tMSQwIgYJKoZIhvcNAQkBFhV0a3VyYXRvbWlAYW5zaWJs +ZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIk4D0+QY3obQM +I/BPmI4pFFu734HHz98ce6Qat7WYiGUHsnt3LHw2a6zMsgP3siD1zqGHtk1IipWR +IwZbXm1spww/8YNUEE8wbXlLGI8IPUpg2J7NS2SdYIuN/TrQMqCUt7fFb+7OQjaH +RtR0LtXhP96al3E8BR9G6AiS67XuwdTL4vrXLUWISjNyF2Vj7xQsp8KRrq0qnXhq +pefeBi1fD9DG5f76j3s8lqGiOg9FHegvfodonNGcqE16T/vBhQcf+NjenlFvR2Lh +3wb/RCo/b1IhZHKNx32fJ/WpiKXkrLYFvwtIWtLw6XIwwarc+n7AfGqKnt4h4bAG +a+5aNnlFAgMBAAGjUDBOMB0GA1UdDgQWBBRZpu6oomSlpCvy2VgOHbWwDwVl1jAf +BgNVHSMEGDAWgBRZpu6oomSlpCvy2VgOHbWwDwVl1jAMBgNVHRMEBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4IBAQCqOSFzTgQDww5bkNRCQrg7lTKzXW9bJpJ5NZdTLwh6 +b+e+XouRH+lBe7Cnn2RTtuFYVfm8hQ1Ra7GDM3v2mJns/s3zDkRINZMMVXddzl5S +M8QxsFJK41PaL9wepizslkcg19yQkdWJQYPDeFurlFvwtakhZE7ttawYi5bFkbCd +4fchMNBBmcigpSfoWb/L2lK2vVKBcfOdUl+V6k49lpf8u7WZD0Xi2cbBhw17tPj4 +ulKZaVNdzj0GFfhpQe/MtDoqxStRpHamdk0Y6fN+CvoW7RPDeVsqkIgCu30MOFuG +A53ZtOc3caYRyGYJtIIl0Rd5uIApscec/6RGiFX6Gab8 +-----END CERTIFICATE----- diff --git a/test/integration/roles/test_docker/files/devdockerCA.key b/test/integration/roles/test_docker/files/devdockerCA.key new file mode 100644 index 00000000000000..0c8c0ee7b0c293 --- /dev/null +++ b/test/integration/roles/test_docker/files/devdockerCA.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAyJOA9PkGN6G0DCPwT5iOKRRbu9+Bx8/fHHukGre1mIhlB7J7 +dyx8NmuszLID97Ig9c6hh7ZNSIqVkSMGW15tbKcMP/GDVBBPMG15SxiPCD1KYNie +zUtknWCLjf060DKglLe3xW/uzkI2h0bUdC7V4T/empdxPAUfRugIkuu17sHUy+L6 +1y1FiEozchdlY+8ULKfCka6tKp14aqXn3gYtXw/QxuX++o97PJahojoPRR3oL36H +aJzRnKhNek/7wYUHH/jY3p5Rb0di4d8G/0QqP29SIWRyjcd9nyf1qYil5Ky2Bb8L +SFrS8OlyMMGq3Pp+wHxqip7eIeGwBmvuWjZ5RQIDAQABAoIBAQCVOumfWgf+LBlB +TxvknKRoe/Ukes6cU1S0ZGlcV4KM0i4Y4/poWHiyJLqUMX4yNB3BxNL5nfEyH6nY +Ki74m/Dd/gtnJ9GGIfxJE6pC7Sq9/pvwIjtEkutxC/vI0LeJX6GKBIZ+JyGN5EWd +sF0xdAc9Z7+/VR2ygj0bDFgUt7rMv6fLaXh6i5Ms0JV7I/HkIi0Lmy9FncJPOTjP +/Wb3Rj5twDppBqSiqU2JNQHysWzNbp8nzBGeR0+WU6xkWjjGzVyQZJq4XJQhqqot +t+v+/lF+jObujcRxPRStaA5IoQdmls3l+ubkoFeNp3j6Nigz40wjTJArMu/Q9xQ5 +A+kHYNgBAoGBAPVNku0eyz1SyMM8FNoB+AfSpkslTnqfmehn1GCOOS9JPimGWS3A +UlAs/PAPW/H/FTM38eC89GsKKVV8zvwkERNwf+PIGzkQrJgYLxGwoflAKsvFoQi9 +PVbIn0TBDZ3TWyNfGul62fEgNen4B46d7kG6l/C3p9eKKCo3sCBgWl8FAoGBANFS +n9YWyAYmHQAWy5R0YeTsdtiRpZWkB0Is9Jr8Zm/DQDNnsKgvXw//qxuWYMi68teK +6o8t5mgDQNWBu3rXrU73f8mMVJNmzSHFbyQEyFOJ9yvI5qMRbJfvdURUje6d3ZUw +G7olKjX0fec4cAG7hbT8sMDvIbnATdhh3VppiEVBAoGBAJKidJnaNpPJ0MkkOTK4 +ypOikFWLT4ZtsYsDxiiR3A0wM0CPVu/Kb2oN+oVmKQhX+0xKvQQi79iskljP6ss+ +pBaCwXBgRiWumf2xNzHT7H8apHp7APBAb1JZSxvGa2VU2r4iM+wty+of3xqlcZ8H +OU2BRSJYJrTpmWjjMR2pe1whAoGAfMTbMSlzIPcm4h60SlD06Rdp370xDfkvumpB +gwBfrs6bPgjYa+eQqmCjBValagDFL2VGWwHpDKajxqAFuDtGuoMcUG6tGw9zxmWA +0d9n6SObiSW/FAQWzpmVNJ2R3GGM6pg6bsIoXvDU+zXQzbeRA0h7swTW/Xl67Teo +UXQGHgECgYEAjckqv2e39AgBvjxvj9SylVbFNSERrbpmiIRH31MnAHpTXbxRf7K+ +/79vUsRfQun9F/+KVfjUyMqRj0PE2tS4ATIjqQsa18RCB4mAE3sNsKz8HbJfzIFq +eEqAWmURm6gRmLmaTMlXS0ZtZaw/A2Usa/DJumu9CsfBu7ZJbDnrQIY= +-----END RSA PRIVATE KEY----- diff --git a/test/integration/roles/test_docker/files/devdockerCA.srl b/test/integration/roles/test_docker/files/devdockerCA.srl new file mode 100644 index 00000000000000..78f0162afecbc3 --- /dev/null +++ b/test/integration/roles/test_docker/files/devdockerCA.srl @@ -0,0 +1 @@ +D96F3E552F279F46 diff --git a/test/integration/roles/test_docker/files/docker-registry.htpasswd b/test/integration/roles/test_docker/files/docker-registry.htpasswd new file mode 100644 index 00000000000000..7cee295817c943 --- /dev/null +++ b/test/integration/roles/test_docker/files/docker-registry.htpasswd @@ -0,0 +1 @@ +testdocker:$apr1$6cYd3tA9$4Dc9/I5Z.bl8/br8O/6B41 diff --git a/test/integration/roles/test_docker/files/dockertest.ansible.com.crt b/test/integration/roles/test_docker/files/dockertest.ansible.com.crt new file mode 100644 index 00000000000000..e89327c3faf508 --- /dev/null +++ b/test/integration/roles/test_docker/files/dockertest.ansible.com.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDYTCCAkkCCQDZbz5VLyefRjANBgkqhkiG9w0BAQUFADCBhDELMAkGA1UEBhMC +VVMxCzAJBgNVBAgMAk5DMQ8wDQYDVQQHDAZEdXJoYW0xEDAOBgNVBAoMB0Fuc2li +bGUxHzAdBgNVBAMMFmRvY2tlcnRlc3QuYW5zaWJsZS5jb20xJDAiBgkqhkiG9w0B +CQEWFXRrdXJhdG9taUBhbnNpYmxlLmNvbTAgFw0xNTAzMTcyMjMxNTBaGA8yMjg4 +MTIzMDIyMzE1MFowXjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5DMQ8wDQYDVQQH +DAZEdXJoYW0xEDAOBgNVBAoMB0Fuc2libGUxHzAdBgNVBAMMFmRvY2tlcnRlc3Qu +YW5zaWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7WpI3 +QuuARgPufAA0JkGCGIUNWqFyTEngOWvBVEuk5TnDB4x78OCE9j7rr75OxZaSc6Y7 +oFTl+hhlgt6sqj+GXehgCHLA97CCc8eUqGv3bwdIIg/hahCPjEWfYzocX1xmUdzN +6klbV9lSO7FGSuk7W4DNga/weRfZmVoPi6jqTvx0tFsGrHVb1evholUKpxaOEYQZ +2NJ22+UXpUyVzN/mw5TAGNG0/yR7sIgCjKYCsYF8k79SfNDMJ1VcCPy3aag45jaz +WoA+OIJJFRkAaPSM5VtnbGBv/slpDVaKfl2ei7Ey3mKx1b7jYMzRz07Gw+zqr1gJ +kBWvfjR7ioxXcN7jAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAJyF24tCq5R8SJto +EMln0m9dMoJTC5usaBYBUMMe6hV2ikUGaXVDIqY+Yypt1sIcjGnLRmehJbej8iS7 +4aypuLc8Fgb4CvW+gY3I3W1iF7ZxIN/4yr237Z9KH1d1uGi+066Sk94OCXlqgsb+ +RzU6XOg+PMIjYC/us5VRv8a2qfjIA8getR+19nP+hR6NgIQcEyRKG2FmhkUSAwd8 +60FhpW4UmPQmn0ErZmRwdp2hNPj5g3my5iOSi7DzdK4CwZJAASOoWsbQIxP0k4JE +PMo7Ad1YxXlOvNWIA8FLMkRsq3li6KJ17WBdEYgFeuxWpf1/x1WA+WpwEIfC5cuR +A5LkaNI= +-----END CERTIFICATE----- diff --git a/test/integration/roles/test_docker/files/dockertest.ansible.com.csr b/test/integration/roles/test_docker/files/dockertest.ansible.com.csr new file mode 100644 index 00000000000000..62b1f8535acf50 --- /dev/null +++ b/test/integration/roles/test_docker/files/dockertest.ansible.com.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICozCCAYsCAQAwXjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5DMQ8wDQYDVQQH +DAZEdXJoYW0xEDAOBgNVBAoMB0Fuc2libGUxHzAdBgNVBAMMFmRvY2tlcnRlc3Qu +YW5zaWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7WpI3 +QuuARgPufAA0JkGCGIUNWqFyTEngOWvBVEuk5TnDB4x78OCE9j7rr75OxZaSc6Y7 +oFTl+hhlgt6sqj+GXehgCHLA97CCc8eUqGv3bwdIIg/hahCPjEWfYzocX1xmUdzN +6klbV9lSO7FGSuk7W4DNga/weRfZmVoPi6jqTvx0tFsGrHVb1evholUKpxaOEYQZ +2NJ22+UXpUyVzN/mw5TAGNG0/yR7sIgCjKYCsYF8k79SfNDMJ1VcCPy3aag45jaz +WoA+OIJJFRkAaPSM5VtnbGBv/slpDVaKfl2ei7Ey3mKx1b7jYMzRz07Gw+zqr1gJ +kBWvfjR7ioxXcN7jAgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAoPgw9dlA3Ys2 +oahtr2KMNFnHnab6hUr/CuDIygkOft+MCX1cPXY1c0R72NQq42TjAFO5UnriJ0Jg +rcWgBAw8TCOHH77ZWawQFjWWoxNTy+bfXNJ002tzc4S/A4s8ytcFQN7E2irbGtUB +ratVaE+c6RvD/o48N4YLUyJbJK84FZ1xMnJI0z5R6XzDWEqYbobzkM/aUWvDTT9F ++F9H5W/3sIhNFVGLygSKbhgrb6eaC8R36fcmTRfYYdT4GrpXFePoZ4LJGCKiiaGV +p8gZzYQ9xjRYDP2OUMacBDlX1Mu5IJ2SCfjavD1hMhB54tWiiw3CRMJcNMql7ob/ +ZHH8UDMqgA== +-----END CERTIFICATE REQUEST----- diff --git a/test/integration/roles/test_docker/files/dockertest.ansible.com.key b/test/integration/roles/test_docker/files/dockertest.ansible.com.key new file mode 100644 index 00000000000000..bda2bb612629c6 --- /dev/null +++ b/test/integration/roles/test_docker/files/dockertest.ansible.com.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAu1qSN0LrgEYD7nwANCZBghiFDVqhckxJ4DlrwVRLpOU5wweM +e/DghPY+66++TsWWknOmO6BU5foYZYLerKo/hl3oYAhywPewgnPHlKhr928HSCIP +4WoQj4xFn2M6HF9cZlHczepJW1fZUjuxRkrpO1uAzYGv8HkX2ZlaD4uo6k78dLRb +Bqx1W9Xr4aJVCqcWjhGEGdjSdtvlF6VMlczf5sOUwBjRtP8ke7CIAoymArGBfJO/ +UnzQzCdVXAj8t2moOOY2s1qAPjiCSRUZAGj0jOVbZ2xgb/7JaQ1Win5dnouxMt5i +sdW+42DM0c9OxsPs6q9YCZAVr340e4qMV3De4wIDAQABAoIBABjczxSIS+pM4E6w +o/JHtV/HUzjPcydQ2mjoFdWlExjB1qV8BfeYoqLibr0mKFIZxH6Q3FmDUGDojH5E +HLq7KQzyv1inJltXQ1Q8exrOMu22DThUVNksEyCJk9+v8lE7km59pJiq46s8gDl6 +dG8Il+TporEi6a820qRsxlfTx8m4EUbyPIhf2e2wYdqiscLwj49ZzMs3TFJxN3j4 +lLP3QDHz9n8q+XXpUT9+rsePe4D4DVVRLhg8w35zkys36xfvBZrI+9SytSs+r1/e +X4gVhxeX9q3FkvXiw1IDGPr0l5X7SH+5zk7JWuLfFbNBK02zR/Bd2OIaYAOmyIFk +ZzsVfokCgYEA8Cj04S32Tga7lOAAUEuPjgXbCtGYqBUJ/9mlMHJBtyl4vaBRm1Z3 +1YQqlL3yGM1F6ZStPWs86vsVaScypr7+RnmQ/uPjz1g2jNI9vomqRkzpzd8/bBwW +J3FCaKFIfl9uQx4ac7piAYdhNXswjQ7Kzn5xgG24i8EkUm6+UxarA38CgYEAx7X+ +qOVT+kA5WU1EDIc2x3Au0PhNIXiHOGRLW0MC7Vy1xBrgxfVrz6J8flBXOxmWYjRq +3dFiHA9S7WPQStkgTjzE91sthLefJ8DKXE4IrRkvYXIIX8DqkcFxTHS/OzckTcK/ +z79jNOPYA1s+z2jzgd24sslXbqxNz1LqZ/PlRp0CgYEAik8cEF72/aK0/x0uMRAD +IcjPiGCDKTHMq3M9xjPXEtQofBTLSsm2g9n05+qodY4qmEYOq1OKJs3pW8C+U/ek +2xOB5Ll75lqoN9uQwZ3o2UnMUMskbG+UdqyskTNpW5Y8Gx1IIKQTc0vzOOi0YlhF +hjydw1ftM1dNQsgShimE3aMCgYEAwITwFk7kcoTBBBZY+B7Mrtu1Ndt3N0HiUHlW +r4Zc5waNbptefVbF9GY1zuqR/LYA43CWaHj1NAmNrqye2diPrPwmADHUInGEqqTO +LsdG099Ibo6oBe6J8bJiDwsoYeQZSiDoGVPtRcoyraGjXfxVaaac6zTu5RCS/b53 +m3hhWH0CgYAqi3x10NpJHInU/zNa1GhI9UVJzabE2APdbPHvoE/yyfpCGhExiXZw +MDImUzc59Ro0pCZ9Bk7pd5LwdjjeJXih7jaRZQlPD1BeM6dKdmJps1KMaltOOJ4J +W0FE34E+Kt5JeIix8zmhxgaAU9NVilaNx5tI/D65Y0inMBZpqedrtg== +-----END RSA PRIVATE KEY----- diff --git a/test/integration/roles/test_docker/files/nginx-docker-registry.conf b/test/integration/roles/test_docker/files/nginx-docker-registry.conf new file mode 100644 index 00000000000000..99c7802e1bf574 --- /dev/null +++ b/test/integration/roles/test_docker/files/nginx-docker-registry.conf @@ -0,0 +1,40 @@ +# For versions of Nginx > 1.3.9 that include chunked transfer encoding support +# Replace with appropriate values where necessary + +upstream docker-registry { + server localhost:5000; +} + +server { + listen 8080; + server_name dockertest.ansible.com; + + ssl on; + ssl_certificate /etc/pki/tls/certs/dockertest.ansible.com.crt; + ssl_certificate_key /etc/pki/tls/private/dockertest.ansible.com.key; + + proxy_set_header Host $http_host; # required for Docker client sake + proxy_set_header X-Real-IP $remote_addr; # pass on real client IP + + client_max_body_size 0; # disable any limits to avoid HTTP 413 for large image uploads + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486) + chunked_transfer_encoding on; + + location / { + # let Nginx know about our auth file + auth_basic "Restricted"; + auth_basic_user_file /etc/nginx/docker-registry.htpasswd; + + proxy_pass http://docker-registry; + } + location /_ping { + auth_basic off; + proxy_pass http://docker-registry; + } + location /v1/_ping { + auth_basic off; + proxy_pass http://docker-registry; + } + +} diff --git a/test/integration/roles/test_docker/tasks/docker-setup-debian.yml b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml index 01a67eee6bb8f1..068011a0937223 100644 --- a/test/integration/roles/test_docker/tasks/docker-setup-debian.yml +++ b/test/integration/roles/test_docker/tasks/docker-setup-debian.yml @@ -2,5 +2,5 @@ apt: state: present # Note: add docker-registry when available - name: docker.io,python-docker,netcat-openbsd + name: docker.io,python-docker,netcat-openbsd,nginx diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml index d141bddc55efb0..3ba234ecffca5f 100644 --- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml +++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml @@ -1,7 +1,7 @@ - name: Install docker packages (yum) yum: state: present - name: docker-io,docker-registry,python-docker-py + name: docker-io,docker-registry,python-docker-py,nginx - name: Install netcat yum: diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml index 11f2f9ac2c1927..10067d7ad7a5f6 100644 --- a/test/integration/roles/test_docker/tasks/docker-tests.yml +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -33,3 +33,34 @@ assert: that: - "'hello world' in docker_output.stdout_lines" + +- name: Run a script that sets environment in busybox + docker: + image: busybox + state: reloaded + pull: always + env: + TEST: hello + command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"' + detach: True + +- name: Get the docker container id + shell: "docker ps | grep busybox | awk '{ print $1 }'" + register: container_id + +- name: Get the docker container ip + shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" + register: container_ip + +- name: Try to access the server + shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + register: docker_output + +- name: check that the script ran + assert: + that: + - "'hello world' in docker_output.stdout_lines" + +- name: Remove the busybox image from the local docker + shell: "docker rmi -f busybox" + diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index 52d840601975db..348062234ad07a 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -3,18 +3,24 @@ name: docker-registry state: started +- name: Retrieve busybox image from docker hub + docker: + image: busybox + state: present + pull: missing + - name: Get busybox image id shell: "docker images | grep busybox | awk '{ print $3 }'" register: image_id -- name: Tag docker image into the local repository +- name: Tag docker image into the local registry shell: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine" -- name: Push docker image into the local repository +- name: Push docker image into the private registry shell: "docker push localhost:5000/mine" - name: Remove the busybox image from the local docker - shell: "docker rmi -f {{ image_id.stdout_lines[0] }}" + shell: "docker rmi -f busybox" - name: Remove the new image from the local docker shell: "docker rmi -f localhost:5000/mine" @@ -23,12 +29,13 @@ shell: "docker images |wc -l" register: docker_output +# docker prints a header so the header should be all that's present - name: Check that there are no images in docker assert: that: - "'1' in docker_output.stdout_lines" -- name: Retrieve the image from private docker server +- name: Retrieve the image from private docker registry docker: image: "localhost:5000/mine" state: present @@ -60,3 +67,96 @@ assert: that: - "'hello world' in docker_output.stdout_lines" + +- name: Remove the new image from the local docker + shell: "docker rmi -f localhost:5000/mine" + +- name: Get number of images in docker + shell: "docker images |wc -l" + register: docker_output + +- name: Check that there are no images in docker + assert: + that: + - "'1' in docker_output.stdout_lines" + +- name: Setup nginx with a user/password + copy: + src: docker-registry.htpasswd + dest: /etc/nginx/docker-registry.htpasswd + +- name: Setup nginx with a config file + copy: + src: nginx-docker-registry.conf + dest: /etc/nginx/conf.d/nginx-docker-registry.conf + +- name: Setup nginx docker cert + copy: + src: dockertest.ansible.com.crt + dest: /etc/pki/tls/certs/dockertest.ansible.com.crt + +- name: Setup nginx docker key + copy: + src: dockertest.ansible.com.key + dest: /etc/pki/tls/private/dockertest.ansible.com.key + +- name: Setup the ca keys + copy: + src: devdockerCA.crt + dest: /etc/pki/ca-trust/source/anchors/devdockerCA.crt + +- name: Update the ca bundle + command: update-ca-trust extract + +- name: Restart docker daemon + service: + name: docker + state: restarted + +- name: Start nginx + service: + name: nginx + state: restarted + +- name: Add domain name to hosts + lineinfile: + line: "127.0.0.1 dockertest.ansible.com" + dest: /etc/hosts + state: present + +- name: Start a container after getting it from a secured private registry + docker: + image: dockertest.ansible.com:8080/mine + registry: dockertest.ansible.com:8080 + username: "testdocker" + password: "testdocker" + state: running + command: "nc -l -p 2000 -e xargs -n1 echo hello" + detach: True + +- name: Get the docker container id + shell: "docker ps | grep mine | awk '{ print $1 }'" + register: container_id + +- name: Get the docker container ip + shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" + register: container_ip + +- name: Try to access the server + shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + register: docker_output + +- name: check that the script ran + assert: + that: + - "'hello world' in docker_output.stdout_lines" + +- name: Remove the private repo image from the local docker + shell: "docker rmi -f dockertest.ansible.com:8080/mine" + +- name: Remove domain name to hosts + lineinfile: + line: "127.0.0.1 dockertest.ansible.com" + dest: /etc/hosts + state: absent + From c2fb0b8f9d0bccd27a08dfc7febe1d5533a1d8a4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 13:40:19 -0700 Subject: [PATCH 0105/3617] Some debugging for why docker tests are failing in jenkins --- .../test_docker/tasks/registry-tests.yml | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index 348062234ad07a..fea9bdabf70d5f 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -14,26 +14,27 @@ register: image_id - name: Tag docker image into the local registry - shell: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine" + command: "docker tag {{ image_id.stdout_lines[0] }} localhost:5000/mine" - name: Push docker image into the private registry - shell: "docker push localhost:5000/mine" + command: "docker push localhost:5000/mine" - name: Remove the busybox image from the local docker - shell: "docker rmi -f busybox" + command: "docker rmi -f busybox" - name: Remove the new image from the local docker - shell: "docker rmi -f localhost:5000/mine" + command: "docker rmi -f localhost:5000/mine" - name: Get number of images in docker - shell: "docker images |wc -l" + command: "docker images" register: docker_output +- debug: var=docker_output # docker prints a header so the header should be all that's present - name: Check that there are no images in docker assert: that: - - "'1' in docker_output.stdout_lines" + - "{{ docker_output.stdout_lines| length }} <= 1 " - name: Retrieve the image from private docker registry docker: @@ -69,16 +70,17 @@ - "'hello world' in docker_output.stdout_lines" - name: Remove the new image from the local docker - shell: "docker rmi -f localhost:5000/mine" + command: "docker rmi -f localhost:5000/mine" - name: Get number of images in docker - shell: "docker images |wc -l" + command: "docker images" register: docker_output +- debug: var=docker_output - name: Check that there are no images in docker assert: that: - - "'1' in docker_output.stdout_lines" + - "{{ docker_output.stdout_lines| length }} <= 1" - name: Setup nginx with a user/password copy: @@ -152,7 +154,7 @@ - "'hello world' in docker_output.stdout_lines" - name: Remove the private repo image from the local docker - shell: "docker rmi -f dockertest.ansible.com:8080/mine" + command: "docker rmi -f dockertest.ansible.com:8080/mine" - name: Remove domain name to hosts lineinfile: From 2a967879fb40a137405e00345b53fac78e4f7c80 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 14:05:27 -0700 Subject: [PATCH 0106/3617] Fix the removal of busybox image --- test/integration/roles/test_docker/tasks/registry-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index fea9bdabf70d5f..e8f0596171d4bf 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -20,7 +20,7 @@ command: "docker push localhost:5000/mine" - name: Remove the busybox image from the local docker - command: "docker rmi -f busybox" + command: "docker rmi -f {{ image_id.stdout_lines[0] }}" - name: Remove the new image from the local docker command: "docker rmi -f localhost:5000/mine" From 3c52c36629bf74fb0e5225f6b98bf7d2d19dbe2e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 17:57:29 -0700 Subject: [PATCH 0107/3617] Okay, let's see if these pauses are enough to get this passing --- .../roles/test_docker/tasks/docker-tests.yml | 14 ++++++-- .../test_docker/tasks/registry-tests.yml | 36 +++++++++++++------ 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml index 10067d7ad7a5f6..383b8eb3f657a6 100644 --- a/test/integration/roles/test_docker/tasks/docker-tests.yml +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -8,6 +8,7 @@ image: busybox state: present pull: missing + docker_api_version: "1.14" - name: Run a small script in busybox docker: @@ -16,6 +17,7 @@ pull: always command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True + docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep busybox | awk '{ print $1 }'" @@ -25,6 +27,10 @@ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" register: container_ip +- name: Pause a few moments because docker is not reliable + pause: + seconds: 40 + - name: Try to access the server shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" register: docker_output @@ -43,6 +49,7 @@ TEST: hello command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"' detach: True + docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep busybox | awk '{ print $1 }'" @@ -52,6 +59,10 @@ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" register: container_ip +- name: Pause a few moments because docker is not reliable + pause: + seconds: 40 + - name: Try to access the server shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" register: docker_output @@ -62,5 +73,4 @@ - "'hello world' in docker_output.stdout_lines" - name: Remove the busybox image from the local docker - shell: "docker rmi -f busybox" - + shell: "docker rmi -f $(docker images -q)" diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index e8f0596171d4bf..03d2fa0db73039 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -19,17 +19,16 @@ - name: Push docker image into the private registry command: "docker push localhost:5000/mine" -- name: Remove the busybox image from the local docker - command: "docker rmi -f {{ image_id.stdout_lines[0] }}" +- name: Remove containers + shell: "docker rm $(docker ps -aq)" -- name: Remove the new image from the local docker - command: "docker rmi -f localhost:5000/mine" +- name: Remove all images from the local docker + shell: "docker rmi -f $(docker images -q)" - name: Get number of images in docker command: "docker images" register: docker_output -- debug: var=docker_output # docker prints a header so the header should be all that's present - name: Check that there are no images in docker assert: @@ -42,6 +41,7 @@ state: present pull: missing insecure_registry: True + docker_api_version: "1.14" - name: Run a small script in the new image docker: @@ -51,6 +51,7 @@ command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True insecure_registry: True + docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" @@ -60,6 +61,10 @@ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" register: container_ip +- name: Pause a few moments because docker is not reliable + pause: + seconds: 40 + - name: Try to access the server shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" register: docker_output @@ -69,14 +74,17 @@ that: - "'hello world' in docker_output.stdout_lines" -- name: Remove the new image from the local docker - command: "docker rmi -f localhost:5000/mine" + +- name: Remove containers + shell: "docker rm $(docker ps -aq)" + +- name: Remove all images from the local docker + shell: "docker rmi -f $(docker images -q)" - name: Get number of images in docker command: "docker images" register: docker_output -- debug: var=docker_output - name: Check that there are no images in docker assert: that: @@ -135,6 +143,7 @@ state: running command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True + docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" @@ -144,6 +153,10 @@ shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" register: container_ip +- name: Pause a few moments because docker is not reliable + pause: + seconds: 40 + - name: Try to access the server shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" register: docker_output @@ -153,8 +166,11 @@ that: - "'hello world' in docker_output.stdout_lines" -- name: Remove the private repo image from the local docker - command: "docker rmi -f dockertest.ansible.com:8080/mine" +- name: Remove containers + shell: "docker rm $(docker ps -aq)" + +- name: Remove all images from the local docker + shell: "docker rmi -f $(docker images -q)" - name: Remove domain name to hosts lineinfile: From 9dd5f8c758dd60af5fc8bb00e4961e1fa080b588 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 18:30:10 -0700 Subject: [PATCH 0108/3617] Update core module pointer --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index ae253593e3a0e3..e338fef730abf9 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit ae253593e3a0e3339a136bf57e0a54e62229e8e6 +Subproject commit e338fef730abf94b4b128a73433c166952c3add9 From 98db6a232d13c51763322737cb2d60831201da34 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 18:56:46 -0700 Subject: [PATCH 0109/3617] Have selinux allow docker<=>nginx communication --- .../roles/test_docker/tasks/docker-tests.yml | 5 ++++- .../roles/test_docker/tasks/registry-tests.yml | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml index 383b8eb3f657a6..33ffe6c70ca4c3 100644 --- a/test/integration/roles/test_docker/tasks/docker-tests.yml +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -72,5 +72,8 @@ that: - "'hello world' in docker_output.stdout_lines" -- name: Remove the busybox image from the local docker +- name: Remove containers + shell: "docker rm $(docker ps -aq)" + +- name: Remove all images from the local docker shell: "docker rmi -f $(docker images -q)" diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index 03d2fa0db73039..57b4d252774176 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -90,6 +90,20 @@ that: - "{{ docker_output.stdout_lines| length }} <= 1" +# +# Private registry secured with an SSL proxy +# + +- name: Set selinux to allow docker to connect to nginx + seboolean: + name: docker_connect_any + state: yes + +- name: Set selinux to allow nginx to connect to docker + seboolean: + name: httpd_can_network_connect + state: yes + - name: Setup nginx with a user/password copy: src: docker-registry.htpasswd From b8efd3f777f379e69180b377017dcc31bb708e1c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 19:55:13 -0700 Subject: [PATCH 0110/3617] Update core module pointer --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e338fef730abf9..76198a8223e279 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e338fef730abf94b4b128a73433c166952c3add9 +Subproject commit 76198a8223e279bebb2aeccc452c26e66ad9b747 From 73f5a1fcddfca003a6e32741eb06f11ae29efa53 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 18 Mar 2015 20:25:53 -0700 Subject: [PATCH 0111/3617] Update the extras module pointer --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 696bc60caad2ea..cb848fcd9ec836 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 696bc60caad2ea96c0a70c8091e24b2da060f35c +Subproject commit cb848fcd9ec8364210fc05a5a7addd955b8a2529 From 85cfe1bd52b20cb0b35255ef37a7b2095bd3aec6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Mar 2015 11:17:16 -0400 Subject: [PATCH 0112/3617] added google addwords tag --- docsite/_themes/srtd/layout.html | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index d073c4c22f8996..ce44c4284da0da 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -113,6 +113,24 @@ } + + + + + + From 456f83962d2233cb0b367c5b5749b2b2c7e4455e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Mar 2015 14:31:00 -0400 Subject: [PATCH 0113/3617] ignore PE methods that are not sudo for checksums until we get them working universally --- lib/ansible/runner/__init__.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 5c5554816179f8..8e326935b09dff 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -1243,7 +1243,13 @@ def _remote_checksum(self, conn, tmp, path, inject): python_interp = 'python' cmd = conn.shell.checksum(path, python_interp) - data = self._low_level_exec_command(conn, cmd, tmp, sudoable=True) + + #TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods + if self.become_method == 'sudo': + sudoable = True + else: + sudoable = False + data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable) data2 = utils.last_non_blank_line(data['stdout']) try: if data2 == '': From ac1c49302dffb8b7d261df1c9199815a9590c480 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 19 Mar 2015 12:50:46 -0700 Subject: [PATCH 0114/3617] Update core modules pointer --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 76198a8223e279..fb1c92ffa4ff7f 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 76198a8223e279bebb2aeccc452c26e66ad9b747 +Subproject commit fb1c92ffa4ff7f6c82944806ca6da3d71b7af0d5 From d1641f292502d77a31594b0209fc88f25ca13772 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 19 Mar 2015 16:10:01 -0700 Subject: [PATCH 0115/3617] Remove the multiline string for arguments not. --- docsite/rst/playbooks_intro.rst | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index d0c702c071252d..4751467b016857 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -106,10 +106,6 @@ YAML dictionaries to supply the modules with their key=value arguments.:: name: httpd state: restarted -.. note:: - - The above example using YAML dictionaries for module arguments can also be accomplished using the YAML multiline string syntax with the `>` character but this can lead to string quoting errors. - Below, we'll break down what the various features of the playbook language are. .. _playbook_basics: From 6264eb4e02eff67a1701b7e578073a5bf9adba68 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 19 Mar 2015 22:45:47 -0700 Subject: [PATCH 0116/3617] Pull in ec2_asg fixes from core modules --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fb1c92ffa4ff7f..a78de5080109ee 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fb1c92ffa4ff7f6c82944806ca6da3d71b7af0d5 +Subproject commit a78de5080109eeaf46d5e42f9bbeb4f02d510627 From 0c57bed728a90d20d8c5686a1cb83170dbf088e2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Mar 2015 17:18:23 -0400 Subject: [PATCH 0117/3617] now add_host loads hostvars --- lib/ansible/runner/action_plugins/add_host.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/ansible/runner/action_plugins/add_host.py b/lib/ansible/runner/action_plugins/add_host.py index 0e49e928dbf95e..2fcea6cd5c7ab8 100644 --- a/lib/ansible/runner/action_plugins/add_host.py +++ b/lib/ansible/runner/action_plugins/add_host.py @@ -55,7 +55,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** if ":" in new_name: new_name, new_port = new_name.split(":") args['ansible_ssh_port'] = new_port - + # redefine inventory and get group "all" inventory = self.runner.inventory allgroup = inventory.get_group('all') @@ -72,10 +72,10 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** # Add any variables to the new_host for k in args.keys(): if not k in [ 'name', 'hostname', 'groupname', 'groups' ]: - new_host.set_variable(k, args[k]) - - - groupnames = args.get('groupname', args.get('groups', args.get('group', ''))) + new_host.set_variable(k, args[k]) + + + groupnames = args.get('groupname', args.get('groups', args.get('group', ''))) # add it to the group if that was specified if groupnames: for group_name in groupnames.split(","): @@ -95,13 +95,17 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** vv("added host to group via add_host module: %s" % group_name) result['new_groups'] = groupnames.split(",") - + + + # actually load host vars + new_host.vars = inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password) + result['new_host'] = new_name # clear pattern caching completely since it's unpredictable what # patterns may have referenced the group inventory.clear_pattern_cache() - + return ReturnData(conn=conn, comm_ok=True, result=result) From a53cf9d6fae511fb3a9444cca5c9afde5a1ea6ad Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Mar 2015 11:22:07 -0400 Subject: [PATCH 0118/3617] now correctly aplies add_host passed variables last to override existing vars. --- lib/ansible/runner/action_plugins/add_host.py | 11 +++++------ test/integration/unicode.yml | 6 ++++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/ansible/runner/action_plugins/add_host.py b/lib/ansible/runner/action_plugins/add_host.py index 2fcea6cd5c7ab8..72172fcaec9991 100644 --- a/lib/ansible/runner/action_plugins/add_host.py +++ b/lib/ansible/runner/action_plugins/add_host.py @@ -69,12 +69,6 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** inventory._hosts_cache[new_name] = new_host allgroup.add_host(new_host) - # Add any variables to the new_host - for k in args.keys(): - if not k in [ 'name', 'hostname', 'groupname', 'groups' ]: - new_host.set_variable(k, args[k]) - - groupnames = args.get('groupname', args.get('groups', args.get('group', ''))) # add it to the group if that was specified if groupnames: @@ -100,6 +94,11 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** # actually load host vars new_host.vars = inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password) + # Add any passed variables to the new_host + for k in args.keys(): + if not k in [ 'name', 'hostname', 'groupname', 'groups' ]: + new_host.set_variable(k, args[k]) + result['new_host'] = new_name # clear pattern caching completely since it's unpredictable what diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 2889155055d647..6dca7fe490b16e 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -42,6 +42,12 @@ debug: var=unicode_host_var +- name: 'A play for hosts in group: ĪīĬĭ' + hosts: 'ĪīĬĭ' + gather_facts: false + tasks: + - debug: var=hostvars[inventory_hostname] + - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' gather_facts: true From c49685b753b63332e3f648795839d2067fa36205 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Mar 2015 11:24:35 -0400 Subject: [PATCH 0119/3617] removed debug play from tests --- test/integration/unicode.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 6dca7fe490b16e..b04d760182c9ef 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -41,13 +41,6 @@ - name: 'A task with unicode host vars' debug: var=unicode_host_var - -- name: 'A play for hosts in group: ĪīĬĭ' - hosts: 'ĪīĬĭ' - gather_facts: false - tasks: - - debug: var=hostvars[inventory_hostname] - - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' gather_facts: true From d4ebe7750204cb3d61449ad22fab6aef685e961e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Mar 2015 11:34:18 -0400 Subject: [PATCH 0120/3617] now use combine vars to preserve existing cached host vars --- lib/ansible/runner/action_plugins/add_host.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/action_plugins/add_host.py b/lib/ansible/runner/action_plugins/add_host.py index 72172fcaec9991..995b205b628553 100644 --- a/lib/ansible/runner/action_plugins/add_host.py +++ b/lib/ansible/runner/action_plugins/add_host.py @@ -20,7 +20,7 @@ from ansible.callbacks import vv from ansible.errors import AnsibleError as ae from ansible.runner.return_data import ReturnData -from ansible.utils import parse_kv +from ansible.utils import parse_kv, combine_vars from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -92,7 +92,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** # actually load host vars - new_host.vars = inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password) + new_host.vars = combine_vars(new_host.vars, inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password)) # Add any passed variables to the new_host for k in args.keys(): From 8a5f162e29f45ce427606706f7e3908ec4ca2bda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 16:45:54 +0100 Subject: [PATCH 0121/3617] [patch] fix "remote_src" behavior according patch module documentation. Patch documentation says "remote_src" us False by default. That was not the case in the action plugin. --- lib/ansible/runner/action_plugins/patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py index dbba4c53dd7889..ebd0c6cf59454e 100644 --- a/lib/ansible/runner/action_plugins/patch.py +++ b/lib/ansible/runner/action_plugins/patch.py @@ -32,7 +32,7 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** src = options.get('src', None) dest = options.get('dest', None) - remote_src = utils.boolean(options.get('remote_src', 'yes')) + remote_src = utils.boolean(options.get('remote_src', 'no')) if src is None: result = dict(failed=True, msg="src is required") From 6888f1ccd9a60d656b868317c9fa46e9524bd3f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Fri, 20 Mar 2015 17:13:50 +0100 Subject: [PATCH 0122/3617] [patch] Use _make_tmp_path to prevent from copying full patch file path. --- lib/ansible/runner/action_plugins/patch.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py index ebd0c6cf59454e..29d4f7eca5a190 100644 --- a/lib/ansible/runner/action_plugins/patch.py +++ b/lib/ansible/runner/action_plugins/patch.py @@ -47,7 +47,10 @@ def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, ** else: src = utils.path_dwim(self.runner.basedir, src) - tmp_src = tmp + src + if tmp is None or "-tmp-" not in tmp: + tmp = self.runner._make_tmp_path(conn) + + tmp_src = conn.shell.join_path(tmp, os.path.basename(src)) conn.put_file(src, tmp_src) if self.runner.become and self.runner.become_user != 'root': From d4eddabb2a04b61cf4f880b46b3642c4c9a4987d Mon Sep 17 00:00:00 2001 From: Eri Bastos Date: Fri, 20 Mar 2015 14:40:44 -0300 Subject: [PATCH 0123/3617] Patch for bug #10485 - ansible_distribution fact populates as 'RedHat' on Oracle Linux systems --- lib/ansible/module_utils/facts.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 93fe68786d80cf..40be989241f6d2 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -87,7 +87,8 @@ class Facts(object): _I386RE = re.compile(r'i([3456]86|86pc)') # For the most part, we assume that platform.dist() will tell the truth. # This is the fallback to handle unknowns or exceptions - OSDIST_LIST = ( ('/etc/redhat-release', 'RedHat'), + OSDIST_LIST = ( ('/etc/oracle-release', 'Oracle Linux'), + ('/etc/redhat-release', 'RedHat'), ('/etc/vmware-release', 'VMwareESX'), ('/etc/openwrt_release', 'OpenWrt'), ('/etc/system-release', 'OtherLinux'), @@ -287,6 +288,13 @@ def get_distribution_facts(self): # Once we determine the value is one of these distros # we trust the values are always correct break + elif name == 'Oracle Linux': + data = get_file_content(path) + if 'Oracle Linux' in data: + self.facts['distribution'] = name + else: + self.facts['distribution'] = data.split()[0] + break elif name == 'RedHat': data = get_file_content(path) if 'Red Hat' in data: From b186676e381dedc7c38b0488cd586db4711880c7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Mar 2015 11:30:57 -0700 Subject: [PATCH 0124/3617] Clean up jsonify and make json_dict_*to* more flexible at the same time. --- v2/ansible/module_utils/basic.py | 35 ++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py index 8c424663ff9df4..6c7217bd8838f6 100644 --- a/v2/ansible/module_utils/basic.py +++ b/v2/ansible/module_utils/basic.py @@ -65,6 +65,7 @@ import platform import errno import tempfile +from itertools import imap, repeat try: import json @@ -234,7 +235,7 @@ def load_platform_subclass(cls, *args, **kwargs): return super(cls, subclass).__new__(subclass) -def json_dict_unicode_to_bytes(d): +def json_dict_unicode_to_bytes(d, encoding='utf-8'): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, @@ -242,17 +243,17 @@ def json_dict_unicode_to_bytes(d): ''' if isinstance(d, unicode): - return d.encode('utf-8') + return d.encode(encoding) elif isinstance(d, dict): - return dict(map(json_dict_unicode_to_bytes, d.iteritems())) + return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding))) elif isinstance(d, list): - return list(map(json_dict_unicode_to_bytes, d)) + return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding))) elif isinstance(d, tuple): - return tuple(map(json_dict_unicode_to_bytes, d)) + return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding))) else: return d -def json_dict_bytes_to_unicode(d): +def json_dict_bytes_to_unicode(d, encoding='utf-8'): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, @@ -260,13 +261,13 @@ def json_dict_bytes_to_unicode(d): ''' if isinstance(d, str): - return unicode(d, 'utf-8') + return unicode(d, encoding) elif isinstance(d, dict): - return dict(map(json_dict_bytes_to_unicode, d.iteritems())) + return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding))) elif isinstance(d, list): - return list(map(json_dict_bytes_to_unicode, d)) + return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding))) elif isinstance(d, tuple): - return tuple(map(json_dict_bytes_to_unicode, d)) + return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding))) else: return d @@ -1189,13 +1190,17 @@ def boolean(self, arg): self.fail_json(msg='Boolean %s not in either boolean list' % arg) def jsonify(self, data): - for encoding in ("utf-8", "latin-1", "unicode_escape"): + for encoding in ("utf-8", "latin-1"): try: return json.dumps(data, encoding=encoding) - # Old systems using simplejson module does not support encoding keyword. - except TypeError, e: - return json.dumps(data) - except UnicodeDecodeError, e: + # Old systems using old simplejson module does not support encoding keyword. + except TypeError: + try: + new_data = json_dict_bytes_to_unicode(data, encoding=encoding) + except UnicodeDecodeError: + continue + return json.dumps(new_data) + except UnicodeDecodeError: continue self.fail_json(msg='Invalid unicode encoding encountered') From 8d8c4c061572478cd09e0e071fa2711ee3bbb5db Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Mar 2015 11:39:58 -0700 Subject: [PATCH 0125/3617] Update modules for asg tag fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a78de5080109ee..4ce57ee1217344 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a78de5080109eeaf46d5e42f9bbeb4f02d510627 +Subproject commit 4ce57ee12173449179fc52a82849888488c9b72f From 393246fdd3ebd75eaa23de0f84efe71bfec5c305 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 20 Mar 2015 14:13:51 -0500 Subject: [PATCH 0126/3617] Make v2 playbook class attributes inheritable Also fixing some other become-related things --- v2/ansible/executor/connection_info.py | 16 +++---- v2/ansible/playbook/base.py | 27 ++++++++--- v2/ansible/playbook/become.py | 38 +++++++++++++++ v2/ansible/playbook/block.py | 46 ++++++++++++++----- v2/ansible/playbook/helpers.py | 2 + v2/ansible/playbook/play.py | 1 + v2/ansible/playbook/role/__init__.py | 19 ++++---- v2/ansible/playbook/role/definition.py | 8 +++- v2/ansible/playbook/task.py | 32 ++++++++----- v2/samples/roles/test_become_r1/meta/main.yml | 1 + .../roles/test_become_r1/tasks/main.yml | 2 + v2/samples/roles/test_become_r2/meta/main.yml | 3 ++ .../roles/test_become_r2/tasks/main.yml | 2 + v2/samples/test_become.yml | 6 +++ 14 files changed, 152 insertions(+), 51 deletions(-) create mode 100644 v2/samples/roles/test_become_r1/meta/main.yml create mode 100644 v2/samples/roles/test_become_r1/tasks/main.yml create mode 100644 v2/samples/roles/test_become_r2/meta/main.yml create mode 100644 v2/samples/roles/test_become_r2/tasks/main.yml diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 26a14a23f9d1d4..165cd1245fb44c 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -157,13 +157,10 @@ def set_task_override(self, task): new_info.copy(self) for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'): - attr_val = None if hasattr(task, attr): attr_val = getattr(task, attr) - if task._block and hasattr(task._block, attr) and not attr_val: - attr_val = getattr(task._block, attr) - if attr_val: - setattr(new_info, attr, attr_val) + if attr_val: + setattr(new_info, attr, attr_val) return new_info @@ -184,6 +181,7 @@ def make_become_cmd(self, cmd, executable, become_settings=None): executable = executable or '$SHELL' + success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd)) if self.become: if self.become_method == 'sudo': # Rather than detect if sudo wants a password this time, -k makes sudo always ask for @@ -195,23 +193,23 @@ def make_become_cmd(self, cmd, executable, become_settings=None): exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE) flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS) becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ - (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd))) + (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd) elif self.become_method == 'su': exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE) flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS) - becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, pipes.quote('echo %s; %s' % (success_key, cmd))) + becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd) elif self.become_method == 'pbrun': exe = become_settings.get('pbrun_exe', 'pbrun') flags = become_settings.get('pbrun_flags', '') - becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key, cmd))) + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, success_cmd) elif self.become_method == 'pfexec': exe = become_settings.get('pfexec_exe', 'pbrun') flags = become_settings.get('pfexec_flags', '') # No user as it uses it's own exec_attr to figure it out - becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key, cmd))) + becomecmd = '%s %s "%s"' % (exe, flags, success_cmd) else: raise errors.AnsibleError("Privilege escalation method not found: %s" % method) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 949e6a09fdc652..e32da5d8c5a90c 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -72,11 +72,20 @@ def _get_base_attributes(self): def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' + def _get_base_classes_munge(target_class): + base_classes = list(target_class.__bases__[:]) + for base_class in target_class.__bases__: + base_classes.extend( _get_base_classes_munge(base_class)) + return base_classes + + base_classes = list(self.__class__.__bases__[:]) for base_class in self.__class__.__bases__: - method = getattr(self, ("_munge_%s" % base_class.__name__).lower(), None) - if method: - ds = method(ds) + base_classes.extend(_get_base_classes_munge(base_class)) + for base_class in base_classes: + method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None) + if method: + return method(ds) return ds def load_data(self, ds, variable_manager=None, loader=None): @@ -271,15 +280,21 @@ def __getattr__(self, needle): # optionally allowing masking by accessors if not needle.startswith("_"): - method = "get_%s" % needle - if method in self.__dict__: - return method(self) + method = "_get_attr_%s" % needle + if method in dir(self): + return getattr(self, method)() if needle in self._attributes: return self._attributes[needle] raise AttributeError("attribute not found in %s: %s" % (self.__class__.__name__, needle)) + def __setattr__(self, needle, value): + if hasattr(self, '_attributes') and needle in self._attributes: + self._attributes[needle] = value + else: + super(Base, self).__setattr__(needle, value) + def __getstate__(self): return self.serialize() diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py index 0b0ad10176002e..67eb52b15eeedd 100644 --- a/v2/ansible/playbook/become.py +++ b/v2/ansible/playbook/become.py @@ -95,3 +95,41 @@ def _munge_become(self, ds): ds['become_user'] = C.DEFAULT_BECOME_USER return ds + + def _get_attr_become(self): + ''' + Override for the 'become' getattr fetcher, used from Base. + ''' + if hasattr(self, '_get_parent_attribute'): + return self._get_parent_attribute('become') + else: + return self._attributes['become'] + + def _get_attr_become_method(self): + ''' + Override for the 'become_method' getattr fetcher, used from Base. + ''' + if hasattr(self, '_get_parent_attribute'): + return self._get_parent_attribute('become_method') + else: + return self._attributes['become_method'] + + def _get_attr_become_user(self): + ''' + Override for the 'become_user' getattr fetcher, used from Base. + ''' + if hasattr(self, '_get_parent_attribute'): + return self._get_parent_attribute('become_user') + else: + return self._attributes['become_user'] + + def _get_attr_become_password(self): + ''' + Override for the 'become_password' getattr fetcher, used from Base. + ''' + if hasattr(self, '_get_parent_attribute'): + return self._get_parent_attribute('become_password') + else: + return self._attributes['become_password'] + + diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index fa67b6ae1b99d8..2946e83f5ef394 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -131,23 +131,24 @@ def _load_always(self, attr, ds): # use_handlers=self._use_handlers, # ) - def compile(self): - ''' - Returns the task list for this object - ''' - - task_list = [] - for task in self.block: - # FIXME: evaulate task tags/conditionals here - task_list.extend(task.compile()) - - return task_list - def copy(self): + def _dupe_task_list(task_list, new_block): + new_task_list = [] + for task in task_list: + new_task = task.copy(exclude_block=True) + new_task._block = new_block + new_task_list.append(new_task) + return new_task_list + new_me = super(Block, self).copy() new_me._use_handlers = self._use_handlers new_me._dep_chain = self._dep_chain[:] + new_me.block = _dupe_task_list(self.block or [], new_me) + new_me.rescue = _dupe_task_list(self.rescue or [], new_me) + new_me.always = _dupe_task_list(self.always or [], new_me) + print("new block tasks are: %s" % new_me.block) + new_me._parent_block = None if self._parent_block: new_me._parent_block = self._parent_block.copy() @@ -252,3 +253,24 @@ def set_loader(self, loader): for dep in self._dep_chain: dep.set_loader(loader) + def _get_parent_attribute(self, attr): + ''' + Generic logic to get the attribute or parent attribute for a block value. + ''' + + value = self._attributes[attr] + if not value: + if self._parent_block: + value = getattr(self._block, attr) + elif self._role: + value = getattr(self._role, attr) + if not value and len(self._dep_chain): + reverse_dep_chain = self._dep_chain[:] + reverse_dep_chain.reverse() + for dep in reverse_dep_chain: + value = getattr(dep, attr) + if value: + break + + return value + diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index 0e147205578406..3ea559d7997b5d 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -37,6 +37,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use assert type(ds) in (list, NoneType) block_list = [] + print("in load list of blocks, ds is: %s" % ds) if ds: for block in ds: b = Block.load( @@ -50,6 +51,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use ) block_list.append(b) + print("-> returning block list: %s" % block_list) return block_list diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index cbe4e038617a82..190189aa178afe 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -219,6 +219,7 @@ def compile(self): block_list.extend(self.tasks) block_list.extend(self.post_tasks) + print("block list is: %s" % block_list) return block_list def get_vars(self): diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index dfb1f70addf036..21bcd21803e423 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -30,6 +30,7 @@ from ansible.parsing import DataLoader from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base +from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional from ansible.playbook.helpers import load_list_of_blocks, compile_block_list from ansible.playbook.role.include import RoleInclude @@ -69,7 +70,7 @@ def hash_params(params): ROLE_CACHE = dict() -class Role(Base, Conditional, Taggable): +class Role(Base, Become, Conditional, Taggable): def __init__(self): self._role_name = None @@ -136,6 +137,12 @@ def _load_role_data(self, role_include, parent_role=None): if parent_role: self.add_parent(parent_role) + # copy over all field attributes, except for when and tags, which + # are special cases and need to preserve pre-existing values + for (attr_name, _) in iteritems(self._get_base_attributes()): + if attr_name not in ('when', 'tags'): + setattr(self, attr_name, getattr(role_include, attr_name)) + current_when = getattr(self, 'when')[:] current_when.extend(role_include.when) setattr(self, 'when', current_when) @@ -144,10 +151,6 @@ def _load_role_data(self, role_include, parent_role=None): current_tags.extend(role_include.tags) setattr(self, 'tags', current_tags) - # save the current base directory for the loader and set it to the current role path - #cur_basedir = self._loader.get_basedir() - #self._loader.set_basedir(self._role_path) - # load the role's files, if they exist library = os.path.join(self._role_path, 'library') if os.path.isdir(library): @@ -179,9 +182,6 @@ def _load_role_data(self, role_include, parent_role=None): elif self._default_vars is None: self._default_vars = dict() - # and finally restore the previous base directory - #self._loader.set_basedir(cur_basedir) - def _load_role_yaml(self, subdir): file_path = os.path.join(self._role_path, subdir) if self._loader.path_exists(file_path) and self._loader.is_directory(file_path): @@ -313,9 +313,6 @@ def compile(self, dep_chain=[]): for dep in deps: dep_blocks = dep.compile(dep_chain=new_dep_chain) for dep_block in dep_blocks: - # since we're modifying the task, and need it to be unique, - # we make a copy of it here and assign the dependency chain - # to the copy, then append the copy to the task list. new_dep_block = dep_block.copy() new_dep_block._dep_chain = new_dep_chain block_list.append(new_dep_block) diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py index d52c6795fb92d4..bc1a0daacf2ae7 100644 --- a/v2/ansible/playbook/role/definition.py +++ b/v2/ansible/playbook/role/definition.py @@ -28,6 +28,7 @@ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base +from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional from ansible.playbook.taggable import Taggable from ansible.utils.path import unfrackpath @@ -36,7 +37,7 @@ __all__ = ['RoleDefinition'] -class RoleDefinition(Base, Conditional, Taggable): +class RoleDefinition(Base, Become, Conditional, Taggable): _role = FieldAttribute(isa='string') @@ -57,6 +58,9 @@ def munge(self, ds): assert isinstance(ds, dict) or isinstance(ds, string_types) + if isinstance(ds, dict): + ds = super(RoleDefinition, self).munge(ds) + # we create a new data structure here, using the same # object used internally by the YAML parsing code so we # can preserve file:line:column information if it exists @@ -88,7 +92,7 @@ def munge(self, ds): self._ds = ds # and return the cleaned-up data structure - return super(RoleDefinition, self).munge(new_ds) + return new_ds def _load_role_name(self, ds): ''' diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 79ec2df3401ad5..ab66898242cb5a 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -210,20 +210,21 @@ def get_vars(self): del all_vars['when'] return all_vars - def compile(self): - ''' - For tasks, this is just a dummy method returning an array - with 'self' in it, so we don't have to care about task types - further up the chain. - ''' - - return [self] - - def copy(self): + # no longer used, as blocks are the lowest level of compilation now + #def compile(self): + # ''' + # For tasks, this is just a dummy method returning an array + # with 'self' in it, so we don't have to care about task types + # further up the chain. + # ''' + # + # return [self] + + def copy(self, exclude_block=False): new_me = super(Task, self).copy() new_me._block = None - if self._block: + if self._block and not exclude_block: new_me._block = self._block.copy() new_me._role = None @@ -309,3 +310,12 @@ def set_loader(self, loader): if self._task_include: self._task_include.set_loader(loader) + def _get_parent_attribute(self, attr): + ''' + Generic logic to get the attribute or parent attribute for a task value. + ''' + value = self._attributes[attr] + if not value and self._block: + value = getattr(self._block, attr) + return value + diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/v2/samples/roles/test_become_r1/meta/main.yml new file mode 100644 index 00000000000000..603a2d53a2507f --- /dev/null +++ b/v2/samples/roles/test_become_r1/meta/main.yml @@ -0,0 +1 @@ +allow_duplicates: yes diff --git a/v2/samples/roles/test_become_r1/tasks/main.yml b/v2/samples/roles/test_become_r1/tasks/main.yml new file mode 100644 index 00000000000000..9231d0af98a26d --- /dev/null +++ b/v2/samples/roles/test_become_r1/tasks/main.yml @@ -0,0 +1,2 @@ +- debug: msg="this is test_become_r1" +- command: whoami diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/v2/samples/roles/test_become_r2/meta/main.yml new file mode 100644 index 00000000000000..9304df73a0db9b --- /dev/null +++ b/v2/samples/roles/test_become_r2/meta/main.yml @@ -0,0 +1,3 @@ +allow_duplicates: yes +dependencies: + - test_become_r1 diff --git a/v2/samples/roles/test_become_r2/tasks/main.yml b/v2/samples/roles/test_become_r2/tasks/main.yml new file mode 100644 index 00000000000000..01d6d313852a54 --- /dev/null +++ b/v2/samples/roles/test_become_r2/tasks/main.yml @@ -0,0 +1,2 @@ +- debug: msg="this is test_become_r2" +- command: whoami diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml index 4b02563ca79257..eb527e595958d2 100644 --- a/v2/samples/test_become.yml +++ b/v2/samples/test_become.yml @@ -1,8 +1,14 @@ - hosts: all gather_facts: no + roles: + - { role: test_become_r2 } + - { role: test_become_r2, sudo_user: testing } tasks: + - command: whoami - command: whoami become_user: testing + - block: + - command: whoami - block: - command: whoami become_user: testing From 94909bd4a2ce31d13378980b126953dcf38f555a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 13 Mar 2015 11:43:02 -0400 Subject: [PATCH 0127/3617] Added return values documentation to modules --- docsite/rst/common_return_values.rst | 47 ++++++++++++++++++++++++++++ hacking/module_formatter.py | 1 + hacking/templates/rst.j2 | 19 +++++++++-- 3 files changed, 65 insertions(+), 2 deletions(-) create mode 100644 docsite/rst/common_return_values.rst diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst new file mode 100644 index 00000000000000..ebee58c1c25900 --- /dev/null +++ b/docsite/rst/common_return_values.rst @@ -0,0 +1,47 @@ +Common Return Values +==================== + +.. contents:: Topics + +Ansible modules normally return a data structure that can be registered into a variable, +or seen directly when using the `ansible` program as output. + +.. _facts: + +Facts +````` + +Some modules return 'facts' to ansible (i.e setup), this is done through a 'ansible_facts' key and anything inside +will automatically be available for the current host directly as a variable and there is no need to +register this data. + + +.. _status: + +Status +`````` + +Every module must return a status, saying if the module was successful, if anything changed or not. Ansible itself +will return a status if it skips the module due to a user condition (when: ) or running in check mode when the module +does not support it. + + +.. _other: + +Other common returns +```````````````````` + +It is common on failure or success to return a 'msg' that either explains the failure or makes a note about the execution. +Some modules, specifically those that execute shell or commands directly, will return stdout and stderr, if ansible sees +a stdout in the results it will append a stdout_lines which is just a list or the lines in stdout. + +.. seealso:: + + :doc:`modules` + Learn about available modules + `GitHub modules directory `_ + Browse source of core modules + `Mailing List `_ + Development mailing list + `irc.freenode.net `_ + #ansible IRC chat channel diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 1bc83ad93049a2..6d595c634d6ba4 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -289,6 +289,7 @@ def process_module(module, options, env, template, outputname, module_map, alias doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['ansible_version'] = options.ansible_version doc['plainexamples'] = examples #plain text + doc['returndocs'] = returndocs # here is where we build the table of contents... diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index e5562d3e56b136..122cebb590e71f 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -106,6 +106,21 @@ Examples {% endif %} {% endif %} + +{% if returndocs %} +Return Values +------------- + +Common return values are documented here ::doc::`common_return_values`, the following are the fields unique to this module: + +.. raw:: html +
+@{ returndocs }@
+
+ +:: +{% endif %} + {% if notes %} {% for note in notes %} .. note:: @{ note | convert_symbols_to_format }@ @@ -120,7 +135,7 @@ This is a Core Module --------------------- This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. - + If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. @@ -135,7 +150,7 @@ This is an Extras Module ------------------------ This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo. - + If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. From 690d227034354a8f6cc286de029344a70cfb9830 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 13 Mar 2015 11:45:22 -0400 Subject: [PATCH 0128/3617] extended return value explanation --- docsite/rst/common_return_values.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst index ebee58c1c25900..38a6917233989b 100644 --- a/docsite/rst/common_return_values.rst +++ b/docsite/rst/common_return_values.rst @@ -3,8 +3,9 @@ Common Return Values .. contents:: Topics -Ansible modules normally return a data structure that can be registered into a variable, -or seen directly when using the `ansible` program as output. +Ansible modules normally return a data structure that can be registered into a variable, or seen directly when using +the `ansible` program as output. Here we document the values common to all modules, each module can optionally document +it's own unique returns. If these docs exist they will be visible through ansible-doc and https://docs.ansible.com. .. _facts: From 2cacac4b23c6979daf8e037738d152afac78899d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 13 Mar 2015 12:17:15 -0400 Subject: [PATCH 0129/3617] minor adjustments to formatting --- hacking/templates/rst.j2 | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 122cebb590e71f..6d3c21f4240804 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -114,11 +114,15 @@ Return Values Common return values are documented here ::doc::`common_return_values`, the following are the fields unique to this module: .. raw:: html -
-@{ returndocs }@
-
+ +

+

+    @{ returndocs }@
+    
+

:: + {% endif %} {% if notes %} From 64b447f01bf5338195627eff2fec4e62257f6f02 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 13 Mar 2015 12:22:55 -0400 Subject: [PATCH 0130/3617] grammer correction --- docsite/rst/common_return_values.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst index 38a6917233989b..ff2b92b4af0a92 100644 --- a/docsite/rst/common_return_values.rst +++ b/docsite/rst/common_return_values.rst @@ -5,7 +5,7 @@ Common Return Values Ansible modules normally return a data structure that can be registered into a variable, or seen directly when using the `ansible` program as output. Here we document the values common to all modules, each module can optionally document -it's own unique returns. If these docs exist they will be visible through ansible-doc and https://docs.ansible.com. +its own unique returns. If these docs exist they will be visible through ansible-doc and https://docs.ansible.com. .. _facts: From c3076b84788f78a075764e4d9e8fb28fef5db60c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Mar 2015 16:54:22 -0400 Subject: [PATCH 0131/3617] added module returnval documentation to web docs --- hacking/module_formatter.py | 5 +++- hacking/templates/rst.j2 | 53 ++++++++++++++++++++++++++++++++----- 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 6d595c634d6ba4..c3aca94949c2bf 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -289,7 +289,10 @@ def process_module(module, options, env, template, outputname, module_map, alias doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') doc['ansible_version'] = options.ansible_version doc['plainexamples'] = examples #plain text - doc['returndocs'] = returndocs + if returndocs: + doc['returndocs'] = yaml.safe_load(returndocs) + else: + doc['returndocs'] = None # here is where we build the table of contents... diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 6d3c21f4240804..6873c3fea5855d 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -111,18 +111,57 @@ Examples Return Values ------------- -Common return values are documented here ::doc::`common_return_values`, the following are the fields unique to this module: +Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module: .. raw:: html -

-

-    @{ returndocs }@
-    
-

+ + + + + + + + -:: + {% for entry in returndocs %} + + + + + + + + {% if returndocs[entry].type == 'dictionary' %} + + + {% endif %} + {% endfor %} + +
namedespcriptionreturnedtypesample
@{ entry }@ @{ returndocs[entry].description }@ @{ returndocs[entry].returned }@ @{ returndocs[entry].type }@ @{ returndocs[entry].sample}@
contains: + + + + + + + + + + {% for sub in returndocs[entry].contains %} + + + + + + + + {% endfor %} + +
namedespcriptionreturnedtypesample
@{ sub }@ @{ returndocs[entry].contains[sub].description }@ @{ returndocs[entry].contains[sub].returned }@ @{ returndocs[entry].contains[sub].type }@ @{ returndocs[entry].contains[sub].sample}@
+
+

{% endif %} {% if notes %} From 72586d0df5fd0c7b51a0be193622f0653d7c7e1e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Mar 2015 17:27:00 -0400 Subject: [PATCH 0132/3617] updated to latest core/devel --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 4ce57ee1217344..7683f36613ec09 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 4ce57ee12173449179fc52a82849888488c9b72f +Subproject commit 7683f36613ec0904618b9b2d07f215b3f028a4e0 From c7c8425856f55d7b2e54b179ef9b27a5a3efb98c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Mar 2015 23:12:16 -0400 Subject: [PATCH 0133/3617] fixed command line PE options to be the same as in 1.9 --- v2/ansible/utils/cli.py | 48 ++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index f846d6f73ca336..6ef416b9745fa1 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -55,12 +55,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False, help='ask for SSH password') parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection') - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', - help='ask for su password') - parser.add_option('--ask-become-pass', default=False, dest='ask_become_pass', action='store_true', - help='ask for privlege escalation password') parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, @@ -86,29 +80,33 @@ def base_parser(usage="", output_opts=False, runas_opts=False, help='log output to this directory') if runas_opts: - parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", - dest='become', help="run operations with become (nopasswd implied)") - parser.add_option('-B', '--become-user', help='run operations with as this ' - 'user (default=%s)' % C.DEFAULT_BECOME_USER) - parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", - dest='sudo', help="run operations with sudo (nopasswd)") + # priv user defaults to root later on to enable detecting when this option was given here + parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password (deprecated, use become)') + parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + help='ask for su password (deprecated, use become)') + parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', + help="run operations with sudo (nopasswd) (deprecated, use become)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, - help='desired sudo user (default=root)') # Can't default to root because we need to detect when this option was given - parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, - dest='remote_user', help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) + help='desired sudo user (default=root) (deprecated, use become)') + parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true', + help='run operations with su (deprecated, use become)') + parser.add_option('-R', '--su-user', default=None, + help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER) + + # consolidated privilege escalation (become) + parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become', + help="run operations with become (nopasswd implied)") + parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string', + help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS))) + parser.add_option('--become-user', default=None, dest='become_user', type='string', + help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER) + parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', + help='ask for privilege escalation password') - parser.add_option('-S', '--su', default=C.DEFAULT_SU, - action='store_true', help='run operations with su') - parser.add_option('-R', '--su-user', help='run operations with su as this ' - 'user (default=%s)' % C.DEFAULT_SU_USER) if connect_opts: - parser.add_option('-c', '--connection', dest='connection', - default=C.DEFAULT_TRANSPORT, - help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) - parser.add_option('--become-method', dest='become_method', - default=C.DEFAULT_BECOME_METHOD, - help="privlege escalation method to use (default=%s)" % C.DEFAULT_BECOME_METHOD) + parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', From cf96c7719e4974f69cd4691ecfe21ba5cda29c55 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Mar 2015 23:48:52 -0400 Subject: [PATCH 0134/3617] added become_method list and pipeline support to connection class methods added generic method to check supported become methods for the connection plugin --- v2/ansible/plugins/connections/__init__.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py index aad19b77643de5..11015d7431338d 100644 --- a/v2/ansible/plugins/connections/__init__.py +++ b/v2/ansible/plugins/connections/__init__.py @@ -34,8 +34,18 @@ class ConnectionBase: A base class for connections to contain common code. ''' + has_pipelining = False + become_methods = C.BECOME_METHODS + def __init__(self, connection_info, *args, **kwargs): self._connection_info = connection_info - self._has_pipelining = False self._display = Display(connection_info) + + def _become_method_supported(self, become_method): + ''' Checks if the current class supports this privilege escalation method ''' + + if become_method in self.__class__.become_methods: + return True + + raise errors.AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method) From 93c9803818d6fe46ece22c6019f0af932f405a42 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 20 Mar 2015 21:43:41 -0500 Subject: [PATCH 0135/3617] Removing some leftover debug prints and cleaning up test sample --- v2/ansible/playbook/block.py | 1 - v2/ansible/playbook/helpers.py | 2 -- v2/ansible/playbook/play.py | 1 - v2/samples/roles/test_become_r1/meta/main.yml | 2 +- v2/samples/roles/test_become_r1/tasks/main.yml | 1 - v2/samples/roles/test_become_r2/meta/main.yml | 2 +- v2/samples/roles/test_become_r2/tasks/main.yml | 1 - v2/samples/test_become.yml | 4 ---- 8 files changed, 2 insertions(+), 12 deletions(-) diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 2946e83f5ef394..03957bfe2f6691 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -147,7 +147,6 @@ def _dupe_task_list(task_list, new_block): new_me.block = _dupe_task_list(self.block or [], new_me) new_me.rescue = _dupe_task_list(self.rescue or [], new_me) new_me.always = _dupe_task_list(self.always or [], new_me) - print("new block tasks are: %s" % new_me.block) new_me._parent_block = None if self._parent_block: diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index 3ea559d7997b5d..0e147205578406 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -37,7 +37,6 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use assert type(ds) in (list, NoneType) block_list = [] - print("in load list of blocks, ds is: %s" % ds) if ds: for block in ds: b = Block.load( @@ -51,7 +50,6 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use ) block_list.append(b) - print("-> returning block list: %s" % block_list) return block_list diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 190189aa178afe..cbe4e038617a82 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -219,7 +219,6 @@ def compile(self): block_list.extend(self.tasks) block_list.extend(self.post_tasks) - print("block list is: %s" % block_list) return block_list def get_vars(self): diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/v2/samples/roles/test_become_r1/meta/main.yml index 603a2d53a2507f..cb58e2857bc3a3 100644 --- a/v2/samples/roles/test_become_r1/meta/main.yml +++ b/v2/samples/roles/test_become_r1/meta/main.yml @@ -1 +1 @@ -allow_duplicates: yes +#allow_duplicates: yes diff --git a/v2/samples/roles/test_become_r1/tasks/main.yml b/v2/samples/roles/test_become_r1/tasks/main.yml index 9231d0af98a26d..ef8d396978e611 100644 --- a/v2/samples/roles/test_become_r1/tasks/main.yml +++ b/v2/samples/roles/test_become_r1/tasks/main.yml @@ -1,2 +1 @@ -- debug: msg="this is test_become_r1" - command: whoami diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/v2/samples/roles/test_become_r2/meta/main.yml index 9304df73a0db9b..55b258adb4d336 100644 --- a/v2/samples/roles/test_become_r2/meta/main.yml +++ b/v2/samples/roles/test_become_r2/meta/main.yml @@ -1,3 +1,3 @@ -allow_duplicates: yes +#allow_duplicates: yes dependencies: - test_become_r1 diff --git a/v2/samples/roles/test_become_r2/tasks/main.yml b/v2/samples/roles/test_become_r2/tasks/main.yml index 01d6d313852a54..ef8d396978e611 100644 --- a/v2/samples/roles/test_become_r2/tasks/main.yml +++ b/v2/samples/roles/test_become_r2/tasks/main.yml @@ -1,2 +1 @@ -- debug: msg="this is test_become_r2" - command: whoami diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml index eb527e595958d2..b7550f33c778fe 100644 --- a/v2/samples/test_become.yml +++ b/v2/samples/test_become.yml @@ -1,14 +1,10 @@ - hosts: all gather_facts: no roles: - - { role: test_become_r2 } - { role: test_become_r2, sudo_user: testing } tasks: - - command: whoami - command: whoami become_user: testing - - block: - - command: whoami - block: - command: whoami become_user: testing From b370728439b17de1265f6c9227f151dec803bc75 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 21 Mar 2015 00:35:56 -0400 Subject: [PATCH 0136/3617] several fixes to cli tools - fixed issue with previous commit with bad constants vs C ref on become - added list-tags - rearranged common options to utils/cli.py - added generic validate for both vault and become conflicts - removed dupes and conflicting options --- v2/ansible/utils/cli.py | 64 ++++++++++++++++++++++++++++++----------- v2/bin/ansible | 23 ++++----------- v2/bin/ansible-playbook | 25 ++++++---------- 3 files changed, 63 insertions(+), 49 deletions(-) diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index 6ef416b9745fa1..3b899e49c56f7a 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -38,7 +38,7 @@ def format_help(self, formatter=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) return optparse.OptionParser.format_help(self, formatter=None) -def base_parser(usage="", output_opts=False, runas_opts=False, +def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): ''' create an options parser for any ansible script ''' @@ -52,7 +52,7 @@ def base_parser(usage="", output_opts=False, runas_opts=False, help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, default=C.DEFAULT_HOST_LIST) parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', - help='ask for SSH password') + help='ask for connection password') parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection') parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', @@ -64,14 +64,16 @@ def base_parser(usage="", output_opts=False, runas_opts=False, parser.add_option('-M', '--module-path', dest='module_path', help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None) + parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", + help="set additional variables as key=value or YAML/JSON", default=[]) if subset_opts: parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', help='further limit selected hosts to an additional pattern') - - parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', - dest='timeout', - help="override the SSH timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) + parser.add_option('-t', '--tags', dest='tags', default='all', + help="only run plays and tasks tagged with these values") + parser.add_option('--skip-tags', dest='skip_tags', + help="only run plays and tasks whose tags do not match these values") if output_opts: parser.add_option('-o', '--one-line', dest='one_line', action='store_true', @@ -85,28 +87,32 @@ def base_parser(usage="", output_opts=False, runas_opts=False, help='ask for sudo password (deprecated, use become)') parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') - parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', + parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, help='desired sudo user (default=root) (deprecated, use become)') - parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true', + parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', help='run operations with su (deprecated, use become)') parser.add_option('-R', '--su-user', default=None, - help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER) + help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER) # consolidated privilege escalation (become) - parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become', + parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', help="run operations with become (nopasswd implied)") - parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string', - help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS))) + parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string', + help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) parser.add_option('--become-user', default=None, dest='become_user', type='string', - help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER) + help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') if connect_opts: - parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) + parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, + help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) + parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', + help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) + if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', @@ -117,14 +123,20 @@ def base_parser(usage="", output_opts=False, runas_opts=False, if check_opts: parser.add_option("-C", "--check", default=False, dest='check', action='store_true', - help="don't make any changes; instead, try to predict some of the changes that may occur" - ) + help="don't make any changes; instead, try to predict some of the changes that may occur") + parser.add_option('--syntax-check', dest='syntax', action='store_true', + help="perform a syntax check on the playbook, but do not execute it") if diff_opts: parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', help="when changing (small) files and templates, show the differences in those files; works great with --check" ) + if meta_opts: + parser.add_option('--force-handlers', dest='force_handlers', action='store_true', + help="run handlers even if a task fails") + parser.add_option('--flush-cache', dest='flush_cache', action='store_true', + help="clear the fact cache") return parser @@ -219,3 +231,23 @@ def _gitinfo(): f.close() return result +def validate_conflicts(parser, options): + + # Check for vault related conflicts + if (options.ask_vault_pass and options.vault_password_file): + parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") + + + # Check for privilege escalation conflicts + if (options.su or options.su_user or options.ask_su_pass) and \ + (options.sudo or options.sudo_user or options.ask_sudo_pass) or \ + (options.su or options.su_user or options.ask_su_pass) and \ + (options.become or options.become_user or options.become_ask_pass) or \ + (options.sudo or options.sudo_user or options.ask_sudo_pass) and \ + (options.become or options.become_user or options.become_ask_pass): + + parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " + "and su arguments ('-su', '--su-user', and '--ask-su-pass') " + "and become arguments ('--become', '--become-user', and '--ask-become-pass')" + " are exclusive of each other") + diff --git a/v2/bin/ansible b/v2/bin/ansible index c51040c6a844d5..1e298623f52848 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -29,7 +29,7 @@ from ansible.inventory import Inventory from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play -from ansible.utils.cli import base_parser +from ansible.utils.cli import base_parser, validate_conflicts from ansible.vars import VariableManager ######################################################## @@ -45,15 +45,14 @@ class Cli(object): parser = base_parser( usage='%prog [options]', - runas_opts=True, - subset_opts=True, + runas_opts=True, async_opts=True, - output_opts=True, - connect_opts=True, + output_opts=True, + connect_opts=True, check_opts=True, - diff_opts=False, ) + # options unique to ansible ad-hoc parser.add_option('-a', '--args', dest='module_args', help="module arguments", default=C.DEFAULT_MODULE_ARGS) parser.add_option('-m', '--module-name', dest='module_name', @@ -66,15 +65,7 @@ class Cli(object): parser.print_help() sys.exit(1) - # su and sudo command line arguments need to be mutually exclusive - if (options.su or options.su_user or options.ask_su_pass) and \ - (options.sudo or options.sudo_user or options.ask_sudo_pass): - parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') are " - "mutually exclusive") - - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") + validate_conflicts(parser,options) return (options, args) @@ -113,8 +104,6 @@ class Cli(object): variable_manager = VariableManager() inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory) - if options.subset: - inventory.subset(options.subset) hosts = inventory.list_hosts(pattern) if len(hosts) == 0: diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index bdd9598ec82174..26bbe14c7acb56 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -12,7 +12,7 @@ from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook import Playbook from ansible.playbook.task import Task -from ansible.utils.cli import base_parser +from ansible.utils.cli import base_parser, validate_conflicts from ansible.utils.unicode import to_unicode from ansible.utils.vars import combine_vars from ansible.utils.vault import read_vault_file @@ -30,31 +30,22 @@ def main(args): parser = base_parser( usage = "%prog playbook.yml", connect_opts=True, + meta_opts=True, runas_opts=True, subset_opts=True, check_opts=True, - diff_opts=True + diff_opts=True, ) - parser.add_option('--vault-password', dest="vault_password", - help="password for vault encrypted files") - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) - parser.add_option('-t', '--tags', dest='tags', default='all', - help="only run plays and tasks tagged with these values") - parser.add_option('--skip-tags', dest='skip_tags', - help="only run plays and tasks whose tags do not match these values") - parser.add_option('--syntax-check', dest='syntax', action='store_true', - help="perform a syntax check on the playbook, but do not execute it") + + # ansible playbook specific opts parser.add_option('--list-tasks', dest='listtasks', action='store_true', help="list all tasks that would be executed") parser.add_option('--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") parser.add_option('--start-at-task', dest='start_at', help="start the playbook at the task matching this name") - parser.add_option('--force-handlers', dest='force_handlers', action='store_true', - help="run handlers even if a task fails") - parser.add_option('--flush-cache', dest='flush_cache', action='store_true', - help="clear the fact cache") + parser.add_option('--list-tags', dest='listtags', action='store_true', + help="list all available tags") options, args = parser.parse_args(args) @@ -62,6 +53,8 @@ def main(args): parser.print_help(file=sys.stderr) return 1 + validate_conflicts(parser,options) + vault_pass = None if options.ask_vault_pass: # FIXME: prompt here From 9d3a63945d7ca11a024409b20f010d48b157605d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 21 Mar 2015 00:48:38 -0400 Subject: [PATCH 0137/3617] moved pipeline check to class var that was previouslly added --- v2/ansible/plugins/action/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index d430bd748beb1f..e56003021588bf 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -130,10 +130,10 @@ def _late_needs_tmp_path(self, tmp, module_style): if tmp and "tmp" in tmp: # tmp has already been created return False - if not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become: + if not self._connection.__class__.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become: # tmp is necessary to store module source code return True - if not self._connection._has_pipelining: + if not self._connection.__class__.has_pipelining: # tmp is necessary to store the module source code # or we want to keep the files on the target system return True @@ -380,7 +380,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ # FIXME: all of the old-module style and async stuff has been removed from here, and # might need to be re-added (unless we decide to drop support for old-style modules # at this point and rework things to support non-python modules specifically) - if self._connection._has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES: + if self._connection.__class__.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES: in_data = module_data else: if remote_module_path: From edb1bd25ddb9b63eb9a8c8d3224277489d13de4f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 21 Mar 2015 01:19:07 -0400 Subject: [PATCH 0138/3617] added password prompting and become/sudo/su collapsing --- v2/ansible/utils/cli.py | 47 +++++++++++++++++++++++++++++++++++++++++ v2/bin/ansible | 15 +++++++------ v2/bin/ansible-playbook | 14 +++++++----- 3 files changed, 65 insertions(+), 11 deletions(-) diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index 3b899e49c56f7a..09f5ef4a30f9de 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -24,9 +24,11 @@ import os import time import yaml +import getpass from ansible import __version__ from ansible import constants as C +from ansible.utils.unicode import to_bytes # FIXME: documentation for methods here, which have mostly been # copied directly over from the old utils/__init__.py @@ -231,6 +233,51 @@ def _gitinfo(): f.close() return result + +def ask_passwords(options): + sshpass = None + becomepass = None + vaultpass = None + become_prompt = '' + + if options.ask_pass: + sshpass = getpass.getpass(prompt="SSH password: ") + become_prompt = "%s password[defaults to SSH password]: " % options.become_method.upper() + if sshpass: + sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') + else: + become_prompt = "%s password: " % options.become_method.upper() + + if options.become_ask_pass: + becomepass = getpass.getpass(prompt=become_prompt) + if options.ask_pass and becomepass == '': + becomepass = sshpass + if becomepass: + becomepass = to_bytes(becomepass) + + if options.ask_vault_pass: + vaultpass = getpass.getpass(prompt="Vault password: ") + if vaultpass: + vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip() + + return (sshpass, becomepass, vaultpass) + + +def normalize_become_options(options): + ''' this keeps backwards compatibility with sudo/su options ''' + options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS + options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER + + if options.become: + pass + elif options.sudo: + options.become = True + options.become_method = 'sudo' + elif options.su: + options.become = True + options.become_method = 'su' + + def validate_conflicts(parser, options): # Check for vault related conflicts diff --git a/v2/bin/ansible b/v2/bin/ansible index 1e298623f52848..74ee46121aa90c 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -29,7 +29,7 @@ from ansible.inventory import Inventory from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play -from ansible.utils.cli import base_parser, validate_conflicts +from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords from ansible.vars import VariableManager ######################################################## @@ -79,11 +79,14 @@ class Cli(object): #------------------------------------------------------------------------------- # FIXME: the password asking stuff needs to be ported over still #------------------------------------------------------------------------------- - #sshpass = None - #sudopass = None - #su_pass = None - #vault_pass = None - # + sshpass = None + becomepass = None + vault_pass = None + + normalize_become_options(options) + (sshpass, becomepass, vault_pass) = ask_passwords(options) + + #options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS ## Never ask for an SSH password when we run with local connection #if options.connection == "local": diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 26bbe14c7acb56..f1b590958b343b 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -12,7 +12,7 @@ from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook import Playbook from ansible.playbook.task import Task -from ansible.utils.cli import base_parser, validate_conflicts +from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords from ansible.utils.unicode import to_unicode from ansible.utils.vars import combine_vars from ansible.utils.vault import read_vault_file @@ -55,11 +55,15 @@ def main(args): validate_conflicts(parser,options) + # Manage passwords + sshpass = None + becomepass = None vault_pass = None - if options.ask_vault_pass: - # FIXME: prompt here - pass - elif options.vault_password_file: + + normalize_become_options(options) + (sshpass, becomepass, vault_pass) = ask_passwords(options) + + if options.vault_password_file: # read vault_pass from a file vault_pass = read_vault_file(options.vault_password_file) From 10e14d0e0ab54746f6c4599dacbfb806629f6cc8 Mon Sep 17 00:00:00 2001 From: Henry Todd Date: Sat, 21 Mar 2015 13:21:55 +0800 Subject: [PATCH 0139/3617] Update add_host example in AWS Guide The add_host module now uses "groups" instead of "groupname" to allow for specifying more than one group. --- docsite/rst/guide_aws.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index 7cfffc218db9f3..97eb0904fe2f98 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -107,7 +107,7 @@ From this, we'll use the add_host module to dynamically create a host group cons register: ec2 - name: Add all instance public IPs to host group - add_host: hostname={{ item.public_ip }} groupname=ec2hosts + add_host: hostname={{ item.public_ip }} groups=ec2hosts with_items: ec2.instances With the host group now created, a second play at the bottom of the the same provisioning playbook file might now have some configuration steps:: From 08896e2cfdd6bcf338724f8214309a9422bbcfe4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 21 Mar 2015 01:23:28 -0400 Subject: [PATCH 0140/3617] enabled vault password file and fixed prompting for connection password on local --- v2/bin/ansible | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/v2/bin/ansible b/v2/bin/ansible index 74ee46121aa90c..f8478b32c227f0 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -30,6 +30,7 @@ from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords +from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager ######################################################## @@ -76,9 +77,9 @@ class Cli(object): pattern = args[0] - #------------------------------------------------------------------------------- - # FIXME: the password asking stuff needs to be ported over still - #------------------------------------------------------------------------------- + if options.connection == "local": + options.ask_pass = False + sshpass = None becomepass = None vault_pass = None @@ -86,23 +87,12 @@ class Cli(object): normalize_become_options(options) (sshpass, becomepass, vault_pass) = ask_passwords(options) - - #options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - ## Never ask for an SSH password when we run with local connection - #if options.connection == "local": - # options.ask_pass = False - #options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS - #options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS - #options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - # - #(sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass) - # + if options.vault_password_file: # read vault_pass from a file - #if not options.ask_vault_pass and options.vault_password_file: - # vault_pass = utils.read_vault_file(options.vault_password_file) - #------------------------------------------------------------------------------- + vault_pass = read_vault_file(options.vault_password_file) + - # FIXME: needs vault password, after the above is fixed + # FIXME: needs vault password loader = DataLoader() variable_manager = VariableManager() From ca540ef9f831e20bb1f9054fad889dd063954c23 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 21 Mar 2015 01:33:10 -0400 Subject: [PATCH 0141/3617] added vault password to dataloder creation --- v2/bin/ansible | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/v2/bin/ansible b/v2/bin/ansible index f8478b32c227f0..8eb5c97a6f5568 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -91,9 +91,7 @@ class Cli(object): # read vault_pass from a file vault_pass = read_vault_file(options.vault_password_file) - - # FIXME: needs vault password - loader = DataLoader() + loader = DataLoader(vault_password=vault_pass) variable_manager = VariableManager() inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory) From ec8118ec413ed4fc27d6f95874ece5022df335e7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 21 Mar 2015 02:02:59 -0400 Subject: [PATCH 0142/3617] now ansible ignores tempate errors on passwords they could be caused by random character combinations, fixes #10468 --- lib/ansible/runner/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 8e326935b09dff..4565b90a04d798 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -934,8 +934,12 @@ def _executor_internal_inner(self, host, module_name, module_args, inject, port, # user/pass may still contain variables at this stage actual_user = template.template(self.basedir, actual_user, inject) - actual_pass = template.template(self.basedir, actual_pass, inject) - self.become_pass = template.template(self.basedir, self.become_pass, inject) + try: + actual_pass = template.template(self.basedir, actual_pass, inject) + self.become_pass = template.template(self.basedir, self.become_pass, inject) + except: + # ignore password template errors, could be triggered by password charaters #10468 + pass # make actual_user available as __magic__ ansible_ssh_user variable inject['ansible_ssh_user'] = actual_user From 9a680472f8d90ba87cbae917b6ab1f0d0cf67ffb Mon Sep 17 00:00:00 2001 From: Tim Rupp Date: Sat, 21 Mar 2015 19:22:12 -0700 Subject: [PATCH 0143/3617] Fixes a brief spelling error Fixes a simple spelling mistake that was bugging me when I read the online docs. Trying to make the docs as great as possible. --- docsite/rst/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index e7b21456afd5c5..1b499c547406bb 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -5,7 +5,7 @@ Here are some commonly-asked questions and their answers. .. _users_and_ports: -If you are looking to set environment varialbes remotely for your project (in a task, not locally for Ansible) +If you are looking to set environment variables remotely for your project (in a task, not locally for Ansible) The keyword is simply `environment` ``` From c5d5ed17ea2c5c1e6f81f2a4a87f196051b7a44d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 22 Mar 2015 02:05:27 -0400 Subject: [PATCH 0144/3617] added tag resolution mirroring updated v1 --- v2/ansible/playbook/taggable.py | 59 +++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 17 deletions(-) diff --git a/v2/ansible/playbook/taggable.py b/v2/ansible/playbook/taggable.py index e83f1d7ae50c2a..ce1bdfcf8a7ff3 100644 --- a/v2/ansible/playbook/taggable.py +++ b/v2/ansible/playbook/taggable.py @@ -24,6 +24,8 @@ from ansible.template import Templar class Taggable: + + untagged = set(['untagged']) _tags = FieldAttribute(isa='list', default=[]) def __init__(self): @@ -38,22 +40,45 @@ def _load_tags(self, attr, ds): raise AnsibleError('tags must be specified as a list', obj=ds) def evaluate_tags(self, only_tags, skip_tags, all_vars): - templar = Templar(loader=self._loader, variables=all_vars) - tags = templar.template(self.tags) - if not isinstance(tags, list): - tags = set([tags]) - else: - tags = set(tags) - - #print("%s tags are: %s, only_tags=%s, skip_tags=%s" % (self, my_tags, only_tags, skip_tags)) - if skip_tags: - skipped_tags = tags.intersection(skip_tags) - if len(skipped_tags) > 0: - return False - matched_tags = tags.intersection(only_tags) - #print("matched tags are: %s" % matched_tags) - if len(matched_tags) > 0 or 'all' in only_tags: - return True + ''' this checks if the current item should be executed depending on tag options ''' + + should_run = True + + if self.tags: + templar = Templar(loader=self._loader, variables=all_vars) + tags = templar.template(self.tags) + + if not isinstance(tags, list): + if tags.find(',') != -1: + tags = set(tags.split(',')) + else: + tags = set([tags]) + else: + tags = set(tags) else: - return False + # this makes intersection work for untagged + tags = self.__class__.untagged + + if only_tags: + + should_run = False + + if 'always' in tags or 'all' in only_tags: + should_run = True + elif tags.intersection(only_tags): + should_run = True + elif 'tagged' in only_tags and tags != self.__class__.untagged: + should_run = True + + if should_run and skip_tags: + + # Check for tags that we need to skip + if 'all' in skip_tags: + if 'always' not in tags or 'always' in skip_tags: + should_run = False + elif tags.intersection(skip_tags): + should_run = False + elif 'tagged' in skip_tags and tags != self.__class__.untagged: + should_run = False + return should_run From bda83fdf84068bcd3720f5c6d82c21a7d5e66594 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 22 Mar 2015 19:17:04 -0500 Subject: [PATCH 0145/3617] Fixing bug in v2 dynamic include code, pointed out by apollo13 --- v2/ansible/plugins/strategies/linear.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py index c6b9445b2e673b..b503d6ebd51022 100644 --- a/v2/ansible/plugins/strategies/linear.py +++ b/v2/ansible/plugins/strategies/linear.py @@ -236,7 +236,7 @@ def __repr__(self): for include_result in include_results: original_task = iterator.get_original_task(res._host, res._task) if original_task and original_task._role: - include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file) + include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include']) else: include_file = self._loader.path_dwim(res._task.args.get('_raw_params')) From 5942144868f503dbc3b4652fdf4281db1cb7197a Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Mon, 23 Mar 2015 01:25:18 +0100 Subject: [PATCH 0146/3617] Port #10258 to v2 --- v2/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py index 6c7217bd8838f6..79a0fab67b6e4c 100644 --- a/v2/ansible/module_utils/basic.py +++ b/v2/ansible/module_utils/basic.py @@ -1376,7 +1376,7 @@ def atomic_move(self, src, dest): # based on the current value of umask umask = os.umask(0) os.umask(umask) - os.chmod(dest, 0666 ^ umask) + os.chmod(dest, 0666 & ~umask) if switched_user: os.chown(dest, os.getuid(), os.getgid()) From 317728f64955f0d38da014fd7e48cba97883b646 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 23 Mar 2015 09:20:27 -0500 Subject: [PATCH 0147/3617] Allow ansible-galaxy to install symlinks --- bin/ansible-galaxy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy index f281bf97ae896b..a6d625671ec548 100755 --- a/bin/ansible-galaxy +++ b/bin/ansible-galaxy @@ -556,7 +556,7 @@ def install_role(role_name, role_version, role_filename, options): # we only extract files, and remove any relative path # bits that might be in the file for security purposes # and drop the leading directory, as mentioned above - if member.isreg(): + if member.isreg() or member.issym(): parts = member.name.split("/")[1:] final_parts = [] for part in parts: From 095990b4d8dcd93e65b188fb9ffeb37b1d3b09e5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Mar 2015 15:19:13 -0500 Subject: [PATCH 0148/3617] Moving from getattr to properties for the v2 base class --- v2/ansible/playbook/base.py | 54 ++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index e32da5d8c5a90c..c33dde858fe7ed 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -21,6 +21,7 @@ import uuid +from functools import partial from inspect import getmembers from io import FileIO @@ -50,11 +51,24 @@ def __init__(self): # every object gets a random uuid: self._uuid = uuid.uuid4() - # each class knows attributes set upon it, see Task.py for example - self._attributes = dict() + # and initialize the base attributes + self._initialize_base_attributes() + + @staticmethod + def _generic_g(key, self): + method = "_get_attr_%s" % key + if method in dir(self): + return getattr(self, method)() + + return self._attributes[key] - for (name, value) in iteritems(self._get_base_attributes()): - self._attributes[name] = value.default + @staticmethod + def _generic_s(key, self, value): + self._attributes[key] = value + + @staticmethod + def _generic_d(key, self): + del self._attributes[key] def _get_base_attributes(self): ''' @@ -69,6 +83,17 @@ def _get_base_attributes(self): base_attributes[name] = value return base_attributes + def _initialize_base_attributes(self): + # each class knows attributes set upon it, see Task.py for example + self._attributes = dict() + + for (name, value) in self._get_base_attributes().items(): + getter = partial(self._generic_g, name) + setter = partial(self._generic_s, name) + deleter = partial(self._generic_d, name) + setattr(Base, name, property(getter, setter, deleter)) + setattr(self, name, value.default) + def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' @@ -274,27 +299,6 @@ def deserialize(self, data): # restore the UUID field setattr(self, '_uuid', data.get('uuid')) - def __getattr__(self, needle): - - # return any attribute names as if they were real - # optionally allowing masking by accessors - - if not needle.startswith("_"): - method = "_get_attr_%s" % needle - if method in dir(self): - return getattr(self, method)() - - if needle in self._attributes: - return self._attributes[needle] - - raise AttributeError("attribute not found in %s: %s" % (self.__class__.__name__, needle)) - - def __setattr__(self, needle, value): - if hasattr(self, '_attributes') and needle in self._attributes: - self._attributes[needle] = value - else: - super(Base, self).__setattr__(needle, value) - def __getstate__(self): return self.serialize() From 79cf7e72927bfd61d5bdc6e4630317d18d539c9e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 23 Mar 2015 15:20:24 -0500 Subject: [PATCH 0149/3617] Modifying sample for test_become to show more test cases --- v2/samples/roles/test_become_r1/meta/main.yml | 2 +- v2/samples/roles/test_become_r2/meta/main.yml | 2 +- v2/samples/test_become.yml | 5 +++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/v2/samples/roles/test_become_r1/meta/main.yml index cb58e2857bc3a3..603a2d53a2507f 100644 --- a/v2/samples/roles/test_become_r1/meta/main.yml +++ b/v2/samples/roles/test_become_r1/meta/main.yml @@ -1 +1 @@ -#allow_duplicates: yes +allow_duplicates: yes diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/v2/samples/roles/test_become_r2/meta/main.yml index 55b258adb4d336..9304df73a0db9b 100644 --- a/v2/samples/roles/test_become_r2/meta/main.yml +++ b/v2/samples/roles/test_become_r2/meta/main.yml @@ -1,3 +1,3 @@ -#allow_duplicates: yes +allow_duplicates: yes dependencies: - test_become_r1 diff --git a/v2/samples/test_become.yml b/v2/samples/test_become.yml index b7550f33c778fe..3dd318c89961a3 100644 --- a/v2/samples/test_become.yml +++ b/v2/samples/test_become.yml @@ -1,10 +1,15 @@ - hosts: all gather_facts: no + remote_user: root roles: + - { role: test_become_r2 } - { role: test_become_r2, sudo_user: testing } tasks: + - command: whoami - command: whoami become_user: testing + - block: + - command: whoami - block: - command: whoami become_user: testing From 577cdcadb35cc4eee73626262984275fd81e8dda Mon Sep 17 00:00:00 2001 From: Cristian Ciupitu Date: Mon, 23 Mar 2015 22:45:23 +0200 Subject: [PATCH 0150/3617] Doc: use literal code blocks for YAML examples MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Without this, the straight double quotes (") are displayed as curved quotes (“ and ”). --- docsite/rst/YAMLSyntax.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index 424db0ad46600b..d3eb843523173b 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -85,11 +85,11 @@ That's all you really need to know about YAML to start writing Gotchas ------- -While YAML is generally friendly, the following is going to result in a YAML syntax error: +While YAML is generally friendly, the following is going to result in a YAML syntax error:: foo: somebody said I should put a colon here: so I did -You will want to quote any hash values using colons, like so: +You will want to quote any hash values using colons, like so:: foo: "somebody said I should put a colon here: so I did" From fdf51e9a967a0d488e89d60c6409c86fb8b41513 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Mar 2015 16:14:34 -0700 Subject: [PATCH 0151/3617] Use class.mro() instead of custom base_class finder code --- v2/ansible/playbook/base.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index c33dde858fe7ed..2a42441309a55d 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -97,17 +97,7 @@ def _initialize_base_attributes(self): def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' - def _get_base_classes_munge(target_class): - base_classes = list(target_class.__bases__[:]) - for base_class in target_class.__bases__: - base_classes.extend( _get_base_classes_munge(base_class)) - return base_classes - - base_classes = list(self.__class__.__bases__[:]) - for base_class in self.__class__.__bases__: - base_classes.extend(_get_base_classes_munge(base_class)) - - for base_class in base_classes: + for base_class in self.__class__.mro(): method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None) if method: return method(ds) From 63c54035de58d68dde422351be137fc5361677e7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Mar 2015 16:38:51 -0700 Subject: [PATCH 0152/3617] Get rid of iteritems usage when we only care about the keys --- v2/ansible/playbook/base.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 2a42441309a55d..4ab2347dc97a8f 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -97,6 +97,9 @@ def _initialize_base_attributes(self): def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' + ### FIXME: Can't find any classes with methods named + # _munge_base_class.__name__ so maybe Base.munge should be reduced down + # to return ds for base_class in self.__class__.mro(): method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None) if method: @@ -132,7 +135,7 @@ def load_data(self, ds, variable_manager=None, loader=None): # FIXME: we currently don't do anything with private attributes but # may later decide to filter them out of 'ds' here. - for (name, attribute) in iteritems(self._get_base_attributes()): + for name in self._get_base_attributes(): # copy the value over unless a _load_field method is defined if name in ds: method = getattr(self, '_load_%s' % name, None) @@ -151,7 +154,7 @@ def load_data(self, ds, variable_manager=None, loader=None): return self def get_ds(self): - try: + try: return getattr(self, '_ds') except AttributeError: return None @@ -168,7 +171,7 @@ def _validate_attributes(self, ds): not map to attributes for this object. ''' - valid_attrs = [name for (name, attribute) in iteritems(self._get_base_attributes())] + valid_attrs = frozenset(name for name in self._get_base_attributes()) for key in ds: if key not in valid_attrs: raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds) @@ -191,7 +194,7 @@ def copy(self): new_me = self.__class__() - for (name, attribute) in iteritems(self._get_base_attributes()): + for name in self._get_base_attributes(): setattr(new_me, name, getattr(self, name)) new_me._loader = self._loader @@ -223,7 +226,7 @@ def post_validate(self, all_vars=dict(), fail_on_undefined=True): try: # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) - + # run the post-validator if present method = getattr(self, '_post_validate_%s' % name, None) if method: @@ -262,7 +265,7 @@ def serialize(self): repr = dict() - for (name, attribute) in iteritems(self._get_base_attributes()): + for name in self._get_base_attributes(): repr[name] = getattr(self, name) # serialize the uuid field From 6ba24e9fa1c73120440f52878cc148b17552a206 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Mar 2015 17:41:02 -0700 Subject: [PATCH 0153/3617] Remove comment on changing Base.munge => it's used by become.py --- v2/ansible/playbook/base.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 4ab2347dc97a8f..4ac815552a51a3 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -97,9 +97,6 @@ def _initialize_base_attributes(self): def munge(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' - ### FIXME: Can't find any classes with methods named - # _munge_base_class.__name__ so maybe Base.munge should be reduced down - # to return ds for base_class in self.__class__.mro(): method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None) if method: From bc69ad81479fe687163421a0e1d905b5780110b5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Mar 2015 18:42:28 -0700 Subject: [PATCH 0154/3617] Rename munge methods to preprocess_data. Remove the call to preprocess_loop data from playbook_include as includes can't be used with loops. --- v2/ansible/playbook/base.py | 12 ++++++------ v2/ansible/playbook/become.py | 8 +++++++- v2/ansible/playbook/block.py | 8 ++++---- v2/ansible/playbook/play.py | 4 ++-- v2/ansible/playbook/playbook_include.py | 13 ++++++------- v2/ansible/playbook/role/definition.py | 4 ++-- v2/ansible/playbook/role/requirement.py | 4 ++-- v2/ansible/playbook/task.py | 8 ++++---- 8 files changed, 33 insertions(+), 28 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 4ac815552a51a3..5aff5348ee7ab7 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -94,11 +94,11 @@ def _initialize_base_attributes(self): setattr(Base, name, property(getter, setter, deleter)) setattr(self, name, value.default) - def munge(self, ds): + def preprocess_data(self, ds): ''' infrequently used method to do some pre-processing of legacy terms ''' for base_class in self.__class__.mro(): - method = getattr(self, "_munge_%s" % base_class.__name__.lower(), None) + method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None) if method: return method(ds) return ds @@ -121,10 +121,10 @@ def load_data(self, ds, variable_manager=None, loader=None): if isinstance(ds, string_types) or isinstance(ds, FileIO): ds = self._loader.load(ds) - # call the munge() function to massage the data into something - # we can more easily parse, and then call the validation function - # on it to ensure there are no incorrect key values - ds = self.munge(ds) + # call the preprocess_data() function to massage the data into + # something we can more easily parse, and then call the validation + # function on it to ensure there are no incorrect key values + ds = self.preprocess_data(ds) self._validate_attributes(ds) # Walk all attributes in the class. diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py index 67eb52b15eeedd..291cff2b716570 100644 --- a/v2/ansible/playbook/become.py +++ b/v2/ansible/playbook/become.py @@ -51,7 +51,13 @@ def _detect_privilege_escalation_conflict(self, ds): elif has_sudo and has_su: raise errors.AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together') - def _munge_become(self, ds): + def _preprocess_data_become(self, ds): + """Preprocess the playbook data for become attributes + + This is called from the Base object's preprocess_data() method which + in turn is called pretty much anytime any sort of playbook object + (plays, tasks, blocks, etc) are created. + """ self._detect_privilege_escalation_conflict(ds) diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 03957bfe2f6691..6506345172aae3 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -66,7 +66,7 @@ def load(data, parent_block=None, role=None, task_include=None, use_handlers=Fal b = Block(parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers) return b.load_data(data, variable_manager=variable_manager, loader=loader) - def munge(self, ds): + def preprocess_data(self, ds): ''' If a simple task is given, an implicit block for that single task is created, which goes in the main portion of the block @@ -80,11 +80,11 @@ def munge(self, ds): if not is_block: if isinstance(ds, list): - return super(Block, self).munge(dict(block=ds)) + return super(Block, self).preprocess_data(dict(block=ds)) else: - return super(Block, self).munge(dict(block=[ds])) + return super(Block, self).preprocess_data(dict(block=[ds])) - return super(Block, self).munge(ds) + return super(Block, self).preprocess_data(ds) def _load_block(self, attr, ds): return load_list_of_tasks( diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index cbe4e038617a82..a96e6e1ecaa0b7 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -102,7 +102,7 @@ def load(data, variable_manager=None, loader=None): p = Play() return p.load_data(data, variable_manager=variable_manager, loader=loader) - def munge(self, ds): + def preprocess_data(self, ds): ''' Adjusts play datastructure to cleanup old/legacy items ''' @@ -121,7 +121,7 @@ def munge(self, ds): ds['remote_user'] = ds['user'] del ds['user'] - return super(Play, self).munge(ds) + return super(Play, self).preprocess_data(ds) def _load_vars(self, attr, ds): ''' diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py index e1d7f6be34f24b..f7eae230f7c0e8 100644 --- a/v2/ansible/playbook/playbook_include.py +++ b/v2/ansible/playbook/playbook_include.py @@ -48,7 +48,8 @@ def load_data(self, ds, basedir, variable_manager=None, loader=None): from ansible.playbook import Playbook # first, we use the original parent method to correctly load the object - # via the munge/load_data system we normally use for other playbook objects + # via the load_data/preprocess_data system we normally use for other + # playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) # then we use the object to load a Playbook @@ -67,7 +68,7 @@ def load_data(self, ds, basedir, variable_manager=None, loader=None): return pb - def munge(self, ds): + def preprocess_data(self, ds): ''' Regorganizes the data for a PlaybookInclude datastructure to line up with what we expect the proper attributes to be @@ -83,9 +84,7 @@ def munge(self, ds): for (k,v) in ds.iteritems(): if k == 'include': - self._munge_include(ds, new_ds, k, v) - elif k.replace("with_", "") in lookup_loader: - self._munge_loop(ds, new_ds, k, v) + self._preprocess_include(ds, new_ds, k, v) else: # some basic error checking, to make sure vars are properly # formatted and do not conflict with k=v parameters @@ -98,9 +97,9 @@ def munge(self, ds): raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds) new_ds[k] = v - return super(PlaybookInclude, self).munge(new_ds) + return super(PlaybookInclude, self).preprocess_data(new_ds) - def _munge_include(self, ds, new_ds, k, v): + def _preprocess_include(self, ds, new_ds, k, v): ''' Splits the include line up into filename and parameters ''' diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py index bc1a0daacf2ae7..fb96a0e55f9c83 100644 --- a/v2/ansible/playbook/role/definition.py +++ b/v2/ansible/playbook/role/definition.py @@ -54,12 +54,12 @@ def __init__(self, role_basedir=None): def load(data, variable_manager=None, loader=None): raise AnsibleError("not implemented") - def munge(self, ds): + def preprocess_data(self, ds): assert isinstance(ds, dict) or isinstance(ds, string_types) if isinstance(ds, dict): - ds = super(RoleDefinition, self).munge(ds) + ds = super(RoleDefinition, self).preprocess_data(ds) # we create a new data structure here, using the same # object used internally by the YAML parsing code so we diff --git a/v2/ansible/playbook/role/requirement.py b/v2/ansible/playbook/role/requirement.py index d321f6e17dfb32..61db0cb1fd4979 100644 --- a/v2/ansible/playbook/role/requirement.py +++ b/v2/ansible/playbook/role/requirement.py @@ -61,7 +61,7 @@ def parse(self, ds): if isinstance(ds, string_types): role_name = ds else: - ds = self._munge_role_spec(ds) + ds = self._preprocess_role_spec(ds) (new_ds, role_params) = self._split_role_params(ds) # pull the role name out of the ds @@ -70,7 +70,7 @@ def parse(self, ds): return (new_ds, role_name, role_params) - def _munge_role_spec(self, ds): + def _preprocess_role_spec(self, ds): if 'role' in ds: # Old style: {role: "galaxy.role,version,name", other_vars: "here" } role_info = self._role_spec_parse(ds['role']) diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index ab66898242cb5a..0f5e7674866bbd 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -137,7 +137,7 @@ def __repr__(self): ''' returns a human readable representation of the task ''' return "TASK: %s" % self.get_name() - def _munge_loop(self, ds, new_ds, k, v): + def _preprocess_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' loop_name = k.replace("with_", "") @@ -146,7 +146,7 @@ def _munge_loop(self, ds, new_ds, k, v): new_ds['loop'] = loop_name new_ds['loop_args'] = v - def munge(self, ds): + def preprocess_data(self, ds): ''' tasks are especially complex arguments so need pre-processing. keep it short. @@ -178,11 +178,11 @@ def munge(self, ds): # determined by the ModuleArgsParser() above continue elif k.replace("with_", "") in lookup_loader: - self._munge_loop(ds, new_ds, k, v) + self._preprocess_loop(ds, new_ds, k, v) else: new_ds[k] = v - return super(Task, self).munge(new_ds) + return super(Task, self).preprocess_data(new_ds) def post_validate(self, all_vars=dict(), fail_on_undefined=True): ''' From 8a0b8629e86efeddec7da5f8976231deee000f7f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Mar 2015 00:17:10 -0400 Subject: [PATCH 0155/3617] readded -u option --- v2/ansible/utils/cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index 09f5ef4a30f9de..6500234c74125e 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -46,6 +46,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, parser = SortedOptParser(usage, version=version("%prog")) + parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', + help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', From 131683523b97f9a2ce4ab062f566a26243d53b9f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Mar 2015 23:15:30 -0700 Subject: [PATCH 0156/3617] Add some comments to explain how the property code for Attributes works --- v2/ansible/playbook/base.py | 39 ++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 5aff5348ee7ab7..e834d3b729684f 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -54,21 +54,40 @@ def __init__(self): # and initialize the base attributes self._initialize_base_attributes() + # The following three functions are used to programatically define data + # descriptors (aka properties) for the Attributes of all of the playbook + # objects (tasks, blocks, plays, etc). + # + # The function signature is a little strange because of how we define + # them. We use partial to give each method the name of the Attribute that + # it is for. Since partial prefills the positional arguments at the + # beginning of the function we end up with the first positional argument + # being allocated to the name instead of to the class instance (self) as + # normal. To deal with that we make the property name field the first + # positional argument and self the second arg. + # + # Because these methods are defined inside of the class, they get bound to + # the instance when the object is created. After we run partial on them + # and put the result back into the class as a property, they get bound + # a second time. This leads to self being placed in the arguments twice. + # To work around that, we mark the functions as @staticmethod so that the + # first binding to the instance doesn't happen. + @staticmethod - def _generic_g(key, self): - method = "_get_attr_%s" % key + def _generic_g(prop_name, self): + method = "_get_attr_%s" % prop_name if method in dir(self): return getattr(self, method)() - return self._attributes[key] + return self._attributes[prop_name] @staticmethod - def _generic_s(key, self, value): - self._attributes[key] = value + def _generic_s(prop_name, self, value): + self._attributes[prop_name] = value @staticmethod - def _generic_d(key, self): - del self._attributes[key] + def _generic_d(prop_name, self): + del self._attributes[prop_name] def _get_base_attributes(self): ''' @@ -91,7 +110,13 @@ def _initialize_base_attributes(self): getter = partial(self._generic_g, name) setter = partial(self._generic_s, name) deleter = partial(self._generic_d, name) + + # Place the property into the class so that cls.name is the + # property functions. setattr(Base, name, property(getter, setter, deleter)) + + # Place the value into the instance so that the property can + # process and hold that value/ setattr(self, name, value.default) def preprocess_data(self, ds): From c6942578bfb8ecf79850f418ca94d2655b3cef12 Mon Sep 17 00:00:00 2001 From: Henrik Danielsson Date: Tue, 24 Mar 2015 11:27:12 +0100 Subject: [PATCH 0157/3617] Added installation instructions for Arch Linux. --- docsite/rst/intro_installation.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 303880cac11f84..450d125e5f5460 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -261,6 +261,17 @@ Ansible is available for Solaris as `SysV package from OpenCSW `_. + .. _from_pip: Latest Releases Via Pip From 19ba26e9a5ddb4aa1d326ae058e8a79b349345dc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Mar 2015 14:48:50 -0400 Subject: [PATCH 0158/3617] makes raw module have quiet ssh so as to avoid extra output when not requried --- lib/ansible/runner/connection_plugins/ssh.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py index a7a57a01cf25f1..036175f6a9c3e2 100644 --- a/lib/ansible/runner/connection_plugins/ssh.py +++ b/lib/ansible/runner/connection_plugins/ssh.py @@ -272,7 +272,10 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab if utils.VERBOSITY > 3: ssh_cmd += ["-vvv"] else: - ssh_cmd += ["-v"] + if self.runner.module_name == 'raw': + ssh_cmd += ["-q"] + else: + ssh_cmd += ["-v"] ssh_cmd += self.common_args if self.ipv6: From cf6155f1c2f8696e9e0cc681c13e8a26ac05885a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1n=20Dzurek?= Date: Tue, 24 Mar 2015 20:00:51 +0100 Subject: [PATCH 0159/3617] rst.j2 template better core module source wording --- hacking/templates/rst.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 6873c3fea5855d..d6d252c5c6b005 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -177,7 +177,7 @@ Common return values are documented here :doc:`common_return_values`, the follow This is a Core Module --------------------- -This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. +The source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. From b6ec502983a598e1a4043f541df3c2279e80a99e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Mar 2015 21:09:04 -0400 Subject: [PATCH 0160/3617] added missing element to make google groups link a actual link --- hacking/templates/rst.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index d6d252c5c6b005..444b4243af5241 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -196,7 +196,7 @@ This source of this module is hosted on GitHub in the `ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. -Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. From aca4e292fa3f762f85b027c089cab181cd0761da Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 09:55:39 -0400 Subject: [PATCH 0161/3617] some updates of what 1.9 includes --- CHANGELOG.md | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5adaa6e5320c7..688fc78ff9eca2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,21 @@ Ansible Changes By Release in progress, details pending +* Tags rehaul: added 'always', 'untagged' and 'tagged' special tags and normalized + tag resolution. Added tag information to --list-tasks and new --list-tags option. + +* Privilege Escalation generalization, new 'Become' system and varialbes now will + handle existing and new methods. Sudo and su have been kept for backwards compatibility. + New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin. + +* Improved ssh connection error reporting, now you get back the specific message from ssh. + +* Added facility to document task module return values for registered vars, both for + ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be + updated individually (we will start doing so incrementally). + * Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. + * Safety changes: several modules have force parameters that defaulted to true. These have been changed to default to false so as not to accidentally lose work. Playbooks that depended on the former behaviour simply to add @@ -29,8 +43,39 @@ in progress, details pending * Optimize the plugin loader to cache available plugins much more efficiently. For some use cases this can lead to dramatic improvements in startup time. +* Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly. + * Fix skipped tasks to not display their parameters if no_log is specified. +* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries. + +* Added travis integration to github for basic tests, this should speed up ticket triage and merging. + +* environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it. + +* expanded facts and OS support for existing facts. + +* new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return. + +* the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes). + +* new filters: + * ternary: allows for trueval/falseval assignement dependint on conditional + * cartesian: returns the cartesian product of 2 lists + * to_uuid: given a string it will return an ansible domain specific UUID + * A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr + +* new lookup plugins (allow fetching data for use in plays): + * dig: does dns resolution and returns IPs. + * url: allows pulling data from a url. + +* new callback plugins: + * syslog_json: allows logging play output to a syslog network server using json format + +* new task modules: + +* Many documentation additions and fixes. + ## 1.8.4 "You Really Got Me" - Feb 19, 2015 * Fixed regressions in ec2 and mount modules, introduced in 1.8.3 From 699f6b16dbe953cb5d3b3538a40a9f5726573f97 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 10:36:20 -0400 Subject: [PATCH 0162/3617] a few more updates --- CHANGELOG.md | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 688fc78ff9eca2..313ae81e624830 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,9 @@ Ansible Changes By Release in progress, details pending -* Tags rehaul: added 'always', 'untagged' and 'tagged' special tags and normalized +* Added kerberos suport to winrm connection plugin. + +* Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized tag resolution. Added tag information to --list-tasks and new --list-tags option. * Privilege Escalation generalization, new 'Become' system and varialbes now will @@ -53,16 +55,23 @@ in progress, details pending * environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it. -* expanded facts and OS support for existing facts. +* expanded facts and OS/distribution support for existing facts and improved performance with pypy. * new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return. * the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes). +* allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules). + +* sped up YAML parsing in ansible by up to 25% by switching to CParser loader. + * new filters: * ternary: allows for trueval/falseval assignement dependint on conditional * cartesian: returns the cartesian product of 2 lists * to_uuid: given a string it will return an ansible domain specific UUID + * checksum: uses the ansible internal checksum to return a hash from a string + * hash: get a hash from a string (md5, sha1, etc) + * password_hash: get a hash form as string that can be used as a password in the user module (and others) * A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr * new lookup plugins (allow fetching data for use in plays): @@ -73,9 +82,15 @@ in progress, details pending * syslog_json: allows logging play output to a syslog network server using json format * new task modules: + * patch: allows for patching files on target systems + +* new inventory scripts: + * vbox: virtualbox + * consul: use consul as an inventory source * Many documentation additions and fixes. + ## 1.8.4 "You Really Got Me" - Feb 19, 2015 * Fixed regressions in ec2 and mount modules, introduced in 1.8.3 From 34cd6deb9f93050efe1c6600f2acb62f986c7a12 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 07:41:13 -0700 Subject: [PATCH 0163/3617] Spelling --- CHANGELOG.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 313ae81e624830..f5cb2f0e5d24dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ in progress, details pending * Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized tag resolution. Added tag information to --list-tasks and new --list-tags option. -* Privilege Escalation generalization, new 'Become' system and varialbes now will +* Privilege Escalation generalization, new 'Become' system and variables now will handle existing and new methods. Sudo and su have been kept for backwards compatibility. New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin. @@ -24,23 +24,23 @@ in progress, details pending * Safety changes: several modules have force parameters that defaulted to true. These have been changed to default to false so as not to accidentally lose - work. Playbooks that depended on the former behaviour simply to add + work. Playbooks that depended on the former behaviour simply need to add force=True to the task that needs it. Affected modules: * bzr: When local modifications exist in a checkout, the bzr module used to - default to temoving the modifications on any operation. Now the module + default to removing the modifications on any operation. Now the module will not remove the modifications unless force=yes is specified. Operations that depend on a clean working tree may fail unless force=yes is added. * git: When local modifications exist in a checkout, the git module will now - fail unless force is explictly specified. Specifying force will allow the - module to revert and overwrite local modifications to make git actions + fail unless force is explictly specified. Specifying force=yes will allow + the module to revert and overwrite local modifications to make git actions succeed. * hg: When local modifications exist in a checkout, the hg module used to default to removing the modifications on any operation. Now the module will not remove the modifications unless force=yes is specified. * subversion: When updating a checkout with local modifications, you now need - to add force so the module will revert the modifications before updating. + to add force=yes so the module will revert the modifications before updating. * Optimize the plugin loader to cache available plugins much more efficiently. For some use cases this can lead to dramatic improvements in startup time. From 00b9364699cfd1ea7faf13ea9327ac4f51a9a3bc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 10:56:30 -0400 Subject: [PATCH 0164/3617] added modules from extras --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5cb2f0e5d24dc..1dd459892c6803 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,7 +82,20 @@ in progress, details pending * syslog_json: allows logging play output to a syslog network server using json format * new task modules: + * cryptab: manages linux encrypted block devices + * gce_img: for utilizing GCE image resources + * gluster_volume: manage glusterfs volumes + * haproxy: for the load balancer of same name + * known_hosts: manages the ssh known_hosts file + * lxc_container: manage lxc containers * patch: allows for patching files on target systems + * pkg5: installing and uninstalling packages on Solaris + * pkg5_publisher: manages Solaris pkg5 repository configuration + * postgresql_ext: manage postgresql extensions + * snmp_facts: gather facts via snmp + * svc: manages daemontools based services + * uptimerobot: manage monitoring with this service + * new inventory scripts: * vbox: virtualbox From 1b11e45f3cb4e1e5671104d85e58430b43a70725 Mon Sep 17 00:00:00 2001 From: Matthieu Caneill Date: Wed, 25 Mar 2015 16:34:07 +0100 Subject: [PATCH 0165/3617] doc: building debian package: 'asciidoc' is a required dependency --- packaging/debian/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index c7538dbf793603..715084380d76aa 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -4,7 +4,7 @@ Ansible Debian Package To create an Ansible DEB package: sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools sshpass - sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot + sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc git clone git://github.com/ansible/ansible.git cd ansible make deb From 1aaf444943f1f338878c494dec2b59a2639e6669 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 08:51:35 -0700 Subject: [PATCH 0166/3617] Put all module changes in the same location --- CHANGELOG.md | 48 +++++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1dd459892c6803..e9024224115247 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,28 +20,6 @@ in progress, details pending ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be updated individually (we will start doing so incrementally). -* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. - -* Safety changes: several modules have force parameters that defaulted to true. - These have been changed to default to false so as not to accidentally lose - work. Playbooks that depended on the former behaviour simply need to add - force=True to the task that needs it. Affected modules: - - * bzr: When local modifications exist in a checkout, the bzr module used to - default to removing the modifications on any operation. Now the module - will not remove the modifications unless force=yes is specified. - Operations that depend on a clean working tree may fail unless force=yes is - added. - * git: When local modifications exist in a checkout, the git module will now - fail unless force is explictly specified. Specifying force=yes will allow - the module to revert and overwrite local modifications to make git actions - succeed. - * hg: When local modifications exist in a checkout, the hg module used to - default to removing the modifications on any operation. Now the module - will not remove the modifications unless force=yes is specified. - * subversion: When updating a checkout with local modifications, you now need - to add force=yes so the module will revert the modifications before updating. - * Optimize the plugin loader to cache available plugins much more efficiently. For some use cases this can lead to dramatic improvements in startup time. @@ -97,13 +75,37 @@ in progress, details pending * uptimerobot: manage monitoring with this service +* module enhancements and notable changes + * The selinux module now sets the current running state to permissive if state='disabled' + * Can now set accounts as expired via the user module + * vsphere_guest now supports deploying guests from a template + * Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. + * Safety changes: several modules have force parameters that defaulted to true. + These have been changed to default to false so as not to accidentally lose + work. Playbooks that depended on the former behaviour simply need to add + force=True to the task that needs it. Affected modules: + * bzr: When local modifications exist in a checkout, the bzr module used to + default to removing the modifications on any operation. Now the module + will not remove the modifications unless force=yes is specified. + Operations that depend on a clean working tree may fail unless force=yes is + added. + * git: When local modifications exist in a checkout, the git module will now + fail unless force is explictly specified. Specifying force=yes will allow + the module to revert and overwrite local modifications to make git actions + succeed. + * hg: When local modifications exist in a checkout, the hg module used to + default to removing the modifications on any operation. Now the module + will not remove the modifications unless force=yes is specified. + * subversion: When updating a checkout with local modifications, you now need + to add force=yes so the module will revert the modifications before updating. + + * new inventory scripts: * vbox: virtualbox * consul: use consul as an inventory source * Many documentation additions and fixes. - ## 1.8.4 "You Really Got Me" - Feb 19, 2015 * Fixed regressions in ec2 and mount modules, introduced in 1.8.3 From 9b20ca31d6e7e3cc9344468328b7e85823f660a3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 09:24:48 -0700 Subject: [PATCH 0167/3617] Add a unch of changelog entries for 1.9 --- CHANGELOG.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9024224115247..bb1dfcad2987d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ Ansible Changes By Release in progress, details pending -* Added kerberos suport to winrm connection plugin. +* Added kerberos support to winrm connection plugin. * Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized tag resolution. Added tag information to --list-tasks and new --list-tags option. @@ -76,10 +76,26 @@ in progress, details pending * module enhancements and notable changes + * vsphere_guest now supports deploying guests from a template + * ec2_vol gained the ability to specify the EBS volume type + * ec2_vol can now detach volumes by specifying instance=None + * Added tenancy support for the ec2 module + * rds module has gained the ability to manage tags and set charset and public accessibility + * ec2_snapshot module gained the capability to remove snapshots + * Several important docker changes: + * restart_policy parameters to configure when the container automatically restarts + * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option + * Add insecure_registry parameter for connecting to registries via http + * authorized_keys can now use url as a key source * The selinux module now sets the current running state to permissive if state='disabled' * Can now set accounts as expired via the user module - * vsphere_guest now supports deploying guests from a template + * Overhaul of the service module to make code simpler and behave better for systems running systemd or rcctl + * yum module now has a parameter to refresh its cache of package metadata + * Add parameters to the postgres modules to specify a unix socket to connect to the db + * The mount module now supports bind mounts + * django_manage can now handle * Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. + * Add a refspec argument to the git module that allows pulling commits that aren't part of a branch * Safety changes: several modules have force parameters that defaulted to true. These have been changed to default to false so as not to accidentally lose work. Playbooks that depended on the former behaviour simply need to add From 2c3e58ad594ed5b3d5dd75263a383dd3cbf9119e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 10:15:19 -0700 Subject: [PATCH 0168/3617] And all of core module changes added --- CHANGELOG.md | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb1dfcad2987d4..ada38e6f155732 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,20 +77,47 @@ in progress, details pending * module enhancements and notable changes * vsphere_guest now supports deploying guests from a template - * ec2_vol gained the ability to specify the EBS volume type - * ec2_vol can now detach volumes by specifying instance=None - * Added tenancy support for the ec2 module - * rds module has gained the ability to manage tags and set charset and public accessibility - * ec2_snapshot module gained the capability to remove snapshots + * Multiple new enhancements to the amazon web service modules: + * ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one + * ec2_vol gained the ability to specify the EBS volume type + * ec2_vol can now detach volumes by specifying instance=None + * Fix ec2_group to purge specific grants rather than whole rules + * Added tenancy support for the ec2 module + * rds module has gained the ability to manage tags and set charset and public accessibility + * ec2_snapshot module gained the capability to remove snapshots + * Add alias support for route53 + * Add private_zones support to route53 + * ec2_asg: Add wait_for_instances parameter that waits until an instance is ready before ending the ansible task + * gce gained the ip_forward parameter to forward ip packets + * disk_auto_delete parameter to gce that will remove the boot disk after an instance is destroyed + * gce can now spawn instances with no external ip + * gce_pd gained the ability to choose a disk type + * gce_net gained target_tags parameter for creating firewall rules + * rax module has new parameters for making use of a boot volume + * Add scheduler_hints to the nova_compute module for optional parameters * Several important docker changes: * restart_policy parameters to configure when the container automatically restarts * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option * Add insecure_registry parameter for connecting to registries via http + * New parameter to set a container's domainname + * Undeprecated docker_image module until there's replacement functionality + * Allow setting the container's pid namespace + * Add a pull parameter that chooses when ansible will look for more recent images in the registry + * docker module states have been greatly enhanced. The reworked and new states are: + * present now creates but does not start containers + * restarted always restarts a container + * reloaded restarts a container if ansible detects that the configuration is different than what is spcified + * reloaded accounts for exposed ports, env vars, and volumes + * Can now connect to the docker server using TLS + * Many fixes for hardlink and softlink handling in file-related modules + * Implement user, group, mode, and selinux parameters for the unarchive module * authorized_keys can now use url as a key source + * authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task * The selinux module now sets the current running state to permissive if state='disabled' - * Can now set accounts as expired via the user module - * Overhaul of the service module to make code simpler and behave better for systems running systemd or rcctl + * Can now set accounts to expire via the user module + * Overhaul of the service module to make code simpler and behave better for systems running several popular init systems * yum module now has a parameter to refresh its cache of package metadata + * apt module gained a build_dep parameter to install a package's build dependencies * Add parameters to the postgres modules to specify a unix socket to connect to the db * The mount module now supports bind mounts * django_manage can now handle From 1eed3edc2fa1e53466f7a74e275424e1e80b3b42 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 25 Mar 2015 14:17:02 -0500 Subject: [PATCH 0169/3617] tweaking the CHANGELOG --- CHANGELOG.md | 223 ++++++++++++++++++++++++--------------------------- 1 file changed, 106 insertions(+), 117 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ada38e6f155732..e1d171e8b45770 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,152 +1,141 @@ Ansible Changes By Release ========================== -## 1.9 "Dancing In the Street" - ACTIVE DEVELOPMENT +## 2.0 "TBD" - ACTIVE DEVELOPMENT -in progress, details pending +Major Changes: -* Added kerberos support to winrm connection plugin. +New Modules: + +Other Notable Changes: + +## 1.9 "Dancing In the Street" - Mar 25, 2015 +Major changes: + +* Added kerberos support to winrm connection plugin. * Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized tag resolution. Added tag information to --list-tasks and new --list-tags option. - * Privilege Escalation generalization, new 'Become' system and variables now will handle existing and new methods. Sudo and su have been kept for backwards compatibility. New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin. - * Improved ssh connection error reporting, now you get back the specific message from ssh. - * Added facility to document task module return values for registered vars, both for ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be updated individually (we will start doing so incrementally). - * Optimize the plugin loader to cache available plugins much more efficiently. For some use cases this can lead to dramatic improvements in startup time. - * Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly. - * Fix skipped tasks to not display their parameters if no_log is specified. - * Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries. - * Added travis integration to github for basic tests, this should speed up ticket triage and merging. - * environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it. - * expanded facts and OS/distribution support for existing facts and improved performance with pypy. - * new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return. - * the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes). - * allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules). - * sped up YAML parsing in ansible by up to 25% by switching to CParser loader. -* new filters: - * ternary: allows for trueval/falseval assignement dependint on conditional - * cartesian: returns the cartesian product of 2 lists - * to_uuid: given a string it will return an ansible domain specific UUID - * checksum: uses the ansible internal checksum to return a hash from a string - * hash: get a hash from a string (md5, sha1, etc) - * password_hash: get a hash form as string that can be used as a password in the user module (and others) - * A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr - -* new lookup plugins (allow fetching data for use in plays): - * dig: does dns resolution and returns IPs. - * url: allows pulling data from a url. +New Modules: -* new callback plugins: +* cryptab: manages linux encrypted block devices +* gce_img: for utilizing GCE image resources +* gluster_volume: manage glusterfs volumes +* haproxy: for the load balancer of same name +* known_hosts: manages the ssh known_hosts file +* lxc_container: manage lxc containers +* patch: allows for patching files on target systems +* pkg5: installing and uninstalling packages on Solaris +* pkg5_publisher: manages Solaris pkg5 repository configuration +* postgresql_ext: manage postgresql extensions +* snmp_facts: gather facts via snmp +* svc: manages daemontools based services +* uptimerobot: manage monitoring with this service + +New Filters: + +* ternary: allows for trueval/falseval assignement dependint on conditional +* cartesian: returns the cartesian product of 2 lists +* to_uuid: given a string it will return an ansible domain specific UUID +* checksum: uses the ansible internal checksum to return a hash from a string +* hash: get a hash from a string (md5, sha1, etc) +* password_hash: get a hash form as string that can be used as a password in the user module (and others) +* A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr + +Other Notable Changes: + +* New lookup plugins: + * dig: does dns resolution and returns IPs. + * url: allows pulling data from a url. +* New callback plugins: * syslog_json: allows logging play output to a syslog network server using json format - -* new task modules: - * cryptab: manages linux encrypted block devices - * gce_img: for utilizing GCE image resources - * gluster_volume: manage glusterfs volumes - * haproxy: for the load balancer of same name - * known_hosts: manages the ssh known_hosts file - * lxc_container: manage lxc containers - * patch: allows for patching files on target systems - * pkg5: installing and uninstalling packages on Solaris - * pkg5_publisher: manages Solaris pkg5 repository configuration - * postgresql_ext: manage postgresql extensions - * snmp_facts: gather facts via snmp - * svc: manages daemontools based services - * uptimerobot: manage monitoring with this service - - -* module enhancements and notable changes - * vsphere_guest now supports deploying guests from a template - * Multiple new enhancements to the amazon web service modules: - * ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one - * ec2_vol gained the ability to specify the EBS volume type - * ec2_vol can now detach volumes by specifying instance=None - * Fix ec2_group to purge specific grants rather than whole rules - * Added tenancy support for the ec2 module - * rds module has gained the ability to manage tags and set charset and public accessibility - * ec2_snapshot module gained the capability to remove snapshots - * Add alias support for route53 - * Add private_zones support to route53 - * ec2_asg: Add wait_for_instances parameter that waits until an instance is ready before ending the ansible task - * gce gained the ip_forward parameter to forward ip packets - * disk_auto_delete parameter to gce that will remove the boot disk after an instance is destroyed - * gce can now spawn instances with no external ip - * gce_pd gained the ability to choose a disk type - * gce_net gained target_tags parameter for creating firewall rules - * rax module has new parameters for making use of a boot volume - * Add scheduler_hints to the nova_compute module for optional parameters - * Several important docker changes: - * restart_policy parameters to configure when the container automatically restarts - * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option - * Add insecure_registry parameter for connecting to registries via http - * New parameter to set a container's domainname - * Undeprecated docker_image module until there's replacement functionality - * Allow setting the container's pid namespace - * Add a pull parameter that chooses when ansible will look for more recent images in the registry - * docker module states have been greatly enhanced. The reworked and new states are: - * present now creates but does not start containers - * restarted always restarts a container - * reloaded restarts a container if ansible detects that the configuration is different than what is spcified - * reloaded accounts for exposed ports, env vars, and volumes - * Can now connect to the docker server using TLS - * Many fixes for hardlink and softlink handling in file-related modules - * Implement user, group, mode, and selinux parameters for the unarchive module - * authorized_keys can now use url as a key source - * authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task - * The selinux module now sets the current running state to permissive if state='disabled' - * Can now set accounts to expire via the user module - * Overhaul of the service module to make code simpler and behave better for systems running several popular init systems - * yum module now has a parameter to refresh its cache of package metadata - * apt module gained a build_dep parameter to install a package's build dependencies - * Add parameters to the postgres modules to specify a unix socket to connect to the db - * The mount module now supports bind mounts - * django_manage can now handle - * Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. - * Add a refspec argument to the git module that allows pulling commits that aren't part of a branch - * Safety changes: several modules have force parameters that defaulted to true. - These have been changed to default to false so as not to accidentally lose - work. Playbooks that depended on the former behaviour simply need to add - force=True to the task that needs it. Affected modules: - * bzr: When local modifications exist in a checkout, the bzr module used to - default to removing the modifications on any operation. Now the module - will not remove the modifications unless force=yes is specified. - Operations that depend on a clean working tree may fail unless force=yes is - added. - * git: When local modifications exist in a checkout, the git module will now - fail unless force is explictly specified. Specifying force=yes will allow - the module to revert and overwrite local modifications to make git actions - succeed. - * hg: When local modifications exist in a checkout, the hg module used to - default to removing the modifications on any operation. Now the module - will not remove the modifications unless force=yes is specified. - * subversion: When updating a checkout with local modifications, you now need - to add force=yes so the module will revert the modifications before updating. - - -* new inventory scripts: +* Many new enhancements to the amazon web service modules: + * ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one + * ec2_vol gained the ability to specify the EBS volume type + * ec2_vol can now detach volumes by specifying instance=None + * Fix ec2_group to purge specific grants rather than whole rules + * Added tenancy support for the ec2 module + * rds module has gained the ability to manage tags and set charset and public accessibility + * ec2_snapshot module gained the capability to remove snapshots + * Add alias support for route53 + * Add private_zones support to route53 + * ec2_asg: Add wait_for_instances parameter that waits until an instance is ready before ending the ansible task +* Many new docker improvements: + * restart_policy parameters to configure when the container automatically restarts + * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option + * Add insecure_registry parameter for connecting to registries via http + * New parameter to set a container's domainname + * Undeprecated docker_image module until there's replacement functionality + * Allow setting the container's pid namespace + * Add a pull parameter that chooses when ansible will look for more recent images in the registry + * docker module states have been greatly enhanced. The reworked and new states are: + * present now creates but does not start containers + * restarted always restarts a container + * reloaded restarts a container if ansible detects that the configuration is different than what is spcified + * reloaded accounts for exposed ports, env vars, and volumes + * Can now connect to the docker server using TLS +* Several source control modules had force parameters that defaulted to true. + These have been changed to default to false so as not to accidentally lose + work. Playbooks that depended on the former behaviour simply need to add + force=True to the task that needs it. Affected modules: + * bzr: When local modifications exist in a checkout, the bzr module used to + default to removing the modifications on any operation. Now the module + will not remove the modifications unless force=yes is specified. + Operations that depend on a clean working tree may fail unless force=yes is + added. + * git: When local modifications exist in a checkout, the git module will now + fail unless force is explictly specified. Specifying force=yes will allow + the module to revert and overwrite local modifications to make git actions + succeed. + * hg: When local modifications exist in a checkout, the hg module used to + default to removing the modifications on any operation. Now the module + will not remove the modifications unless force=yes is specified. + * subversion: When updating a checkout with local modifications, you now need + to add force=yes so the module will revert the modifications before updating. +* New inventory scripts: * vbox: virtualbox * consul: use consul as an inventory source - +* gce gained the ip_forward parameter to forward ip packets +* disk_auto_delete parameter to gce that will remove the boot disk after an instance is destroyed +* gce can now spawn instances with no external ip +* gce_pd gained the ability to choose a disk type +* gce_net gained target_tags parameter for creating firewall rules +* rax module has new parameters for making use of a boot volume +* Add scheduler_hints to the nova_compute module for optional parameters +* vsphere_guest now supports deploying guests from a template +* Many fixes for hardlink and softlink handling in file-related modules +* Implement user, group, mode, and selinux parameters for the unarchive module +* authorized_keys can now use url as a key source +* authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task +* The selinux module now sets the current running state to permissive if state='disabled' +* Can now set accounts to expire via the user module +* Overhaul of the service module to make code simpler and behave better for systems running several popular init systems +* yum module now has a parameter to refresh its cache of package metadata +* apt module gained a build_dep parameter to install a package's build dependencies +* Add parameters to the postgres modules to specify a unix socket to connect to the db +* The mount module now supports bind mounts +* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. +* Add a refspec argument to the git module that allows pulling commits that aren't part of a branch * Many documentation additions and fixes. ## 1.8.4 "You Really Got Me" - Feb 19, 2015 From c024057e9721f8736068b5fb5743ff8b18f6248e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 12:21:46 -0700 Subject: [PATCH 0170/3617] Fix assert to work with unicode values --- lib/ansible/utils/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index f164b25bd47cba..07e8174893fc39 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -260,10 +260,10 @@ def check_conditional(conditional, basedir, inject, fail_on_undefined=False): conditional = conditional.replace("jinja2_compare ","") # allow variable names - if conditional in inject and '-' not in str(inject[conditional]): - conditional = inject[conditional] + if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'): + conditional = to_unicode(inject[conditional], nonstring='simplerepr') conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined) - original = str(conditional).replace("jinja2_compare ","") + original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","") # a Jinja2 evaluation that results in something Python can eval! presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional conditional = template.template(basedir, presented, inject) From aaa25eb75c84662d0d496188e143bc616e60ecc5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 12:22:45 -0700 Subject: [PATCH 0171/3617] Make run_command() work when we get byte str with non-ascii characters (instead of unicode type like we were expecting) Fix and test. Fixes #10536 --- lib/ansible/module_utils/basic.py | 7 ++++++- test/integration/unicode.yml | 9 +++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index b68a36b9c651a2..ad1d43f86ca99b 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1457,7 +1457,12 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat # in reporting later, which strips out things like # passwords from the args list if isinstance(args, basestring): - to_clean_args = shlex.split(args.encode('utf-8')) + if isinstance(args, unicode): + b_args = args.encode('utf-8') + else: + b_args = args + to_clean_args = shlex.split(b_args) + del b_args else: to_clean_args = args diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index b04d760182c9ef..1044c2527053ed 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -41,6 +41,15 @@ - name: 'A task with unicode host vars' debug: var=unicode_host_var + - name: 'A task with unicode shell parameters' + shell: echo '¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' + register: output + + - name: 'Assert that the unicode was echoed' + assert: + that: + - "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines" + - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' gather_facts: true From 38892e986ef78271a06b1d228a0d3294281c40d4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 13:56:46 -0700 Subject: [PATCH 0172/3617] Convert exceptions to unicode using to_unicode rather than str. that stops unicode errors if the string has non-ascii text --- v2/ansible/executor/task_executor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 7eaba0061ef29e..4ac062251391a5 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -26,6 +26,7 @@ from ansible.playbook.task import Task from ansible.plugins import lookup_loader, connection_loader, action_loader from ansible.utils.listify import listify_lookup_plugin_terms +from ansible.utils.unicode import to_unicode from ansible.utils.debug import debug @@ -89,7 +90,7 @@ def run(self): debug("done dumping result, returning") return result except AnsibleError, e: - return dict(failed=True, msg=str(e)) + return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr')) def _get_loop_items(self): ''' From 60f972dfe4bc58180c666f820ef2d602acf917e4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Mar 2015 13:57:48 -0700 Subject: [PATCH 0173/3617] Fix the command module handling of non-ascii values. We can't depend on the args being unicode text because we're in module land, not in the ansible controller land --- v2/ansible/module_utils/basic.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py index 79a0fab67b6e4c..b3cebf0ba5a0fc 100644 --- a/v2/ansible/module_utils/basic.py +++ b/v2/ansible/module_utils/basic.py @@ -1433,7 +1433,7 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat msg = None st_in = None - # Set a temporart env path if a prefix is passed + # Set a temporary env path if a prefix is passed env=os.environ if path_prefix: env['PATH']="%s:%s" % (path_prefix, env['PATH']) @@ -1442,7 +1442,12 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat # in reporting later, which strips out things like # passwords from the args list if isinstance(args, basestring): - to_clean_args = shlex.split(args.encode('utf-8')) + if isinstance(args, unicode): + b_args = args.encode('utf-8') + else: + b_args = args + to_clean_args = shlex.split(b_args) + del b_args else: to_clean_args = args From c697bc2546444a3adbe86b7537e3e2d71ea75523 Mon Sep 17 00:00:00 2001 From: Andrew Thompson Date: Wed, 25 Mar 2015 21:58:24 -0400 Subject: [PATCH 0174/3617] Fix some typos in CHANGELOG.md --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1d171e8b45770..f354dfd145720f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,9 +29,9 @@ Major changes: * Fix skipped tasks to not display their parameters if no_log is specified. * Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries. * Added travis integration to github for basic tests, this should speed up ticket triage and merging. -* environment: directive now can also be applied to play and is inhertited by tasks, which can still overridde it. +* environment: directive now can also be applied to play and is inhertited by tasks, which can still override it. * expanded facts and OS/distribution support for existing facts and improved performance with pypy. -* new 'wantlist' option to lookups allows for selecting a list typed variable vs a commad delimited string as the return. +* new 'wantlist' option to lookups allows for selecting a list typed variable vs a command delimited string as the return. * the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes). * allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules). * sped up YAML parsing in ansible by up to 25% by switching to CParser loader. @@ -126,7 +126,7 @@ Other Notable Changes: * Many fixes for hardlink and softlink handling in file-related modules * Implement user, group, mode, and selinux parameters for the unarchive module * authorized_keys can now use url as a key source -* authorized_keys has a new exclusive paameter that determines if keys that weren't specified in the task +* authorized_keys has a new exclusive parameter that determines if keys that weren't specified in the task * The selinux module now sets the current running state to permissive if state='disabled' * Can now set accounts to expire via the user module * Overhaul of the service module to make code simpler and behave better for systems running several popular init systems From 51d6db136cf9f58847fefe5e2ba398e4e2ee974d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 22:06:30 -0400 Subject: [PATCH 0175/3617] updated changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f354dfd145720f..3ae9d1d189182e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,14 @@ Ansible Changes By Release ## 2.0 "TBD" - ACTIVE DEVELOPMENT Major Changes: + big_ip modules now support turning off ssl certificat validation (use only for self signed) New Modules: + vertica_configuration + vertica_facts + vertica_role + vertica_schema + vertica_user Other Notable Changes: From 74ef30cec1e90cf9f8b33937ea5c8bf7418d20b4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:16:05 -0400 Subject: [PATCH 0176/3617] added pushover module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3ae9d1d189182e..72804bb65135ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Major Changes: big_ip modules now support turning off ssl certificat validation (use only for self signed) New Modules: + pushover vertica_configuration vertica_facts vertica_role From 361517165160718e04755ccaf4a242f2fff8bbd0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Mar 2015 23:56:26 -0400 Subject: [PATCH 0177/3617] added maven artifact to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72804bb65135ed..553e6090bb3607 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Major Changes: big_ip modules now support turning off ssl certificat validation (use only for self signed) New Modules: + maven_artifact pushover vertica_configuration vertica_facts From e9c8e89c77738a65d9791d23f700023176206524 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Mar 2015 01:16:32 -0400 Subject: [PATCH 0178/3617] added cloudtrail to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 553e6090bb3607..38c09d0b59d268 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Major Changes: big_ip modules now support turning off ssl certificat validation (use only for self signed) New Modules: + cloudtrail maven_artifact pushover vertica_configuration From bb6d983290e030502bd407ba800ba0eb2f60209c Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 26 Mar 2015 10:26:33 +0100 Subject: [PATCH 0179/3617] cloudstack: add utils for common functionality --- lib/ansible/module_utils/cloudstack.py | 182 +++++++++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 lib/ansible/module_utils/cloudstack.py diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py new file mode 100644 index 00000000000000..cb482ae993290d --- /dev/null +++ b/lib/ansible/module_utils/cloudstack.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This code is part of Ansible, but is an independent component. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import sys + +try: + from cs import CloudStack, CloudStackException, read_config +except ImportError: + print("failed=True " + \ + "msg='python library cs required: pip install cs'") + sys.exit(1) + + +class AnsibleCloudStack: + + def __init__(self, module): + self.module = module + self._connect() + + self.project_id = None + self.ip_address_id = None + self.zone_id = None + self.vm_id = None + self.os_type_id = None + self.hypervisor = None + + + def _connect(self): + api_key = self.module.params.get('api_key') + api_secret = self.module.params.get('secret_key') + api_url = self.module.params.get('api_url') + api_http_method = self.module.params.get('api_http_method') + + if api_key and api_secret and api_url: + self.cs = CloudStack( + endpoint=api_url, + key=api_key, + secret=api_secret, + method=api_http_method + ) + else: + self.cs = CloudStack(**read_config()) + + + def get_project_id(self): + if self.project_id: + return self.project_id + + project = self.module.params.get('project') + if not project: + return None + + projects = self.cs.listProjects() + if projects: + for p in projects['project']: + if project in [ p['name'], p['displaytext'], p['id'] ]: + self.project_id = p['id'] + return self.project_id + self.module.fail_json(msg="project '%s' not found" % project) + + + def get_ip_address_id(self): + if self.ip_address_id: + return self.ip_address_id + + ip_address = self.module.params.get('ip_address') + if not ip_address: + self.module.fail_json(msg="IP address param 'ip_address' is required") + + args = {} + args['ipaddress'] = ip_address + args['projectid'] = self.get_project_id() + ip_addresses = self.cs.listPublicIpAddresses(**args) + + if not ip_addresses: + self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress']) + + self.ip_address_id = ip_addresses['publicipaddress'][0]['id'] + return self.ip_address_id + + + def get_vm_id(self): + if self.vm_id: + return self.vm_id + + vm = self.module.params.get('vm') + if not vm: + self.module.fail_json(msg="Virtual machine param 'vm' is required") + + args = {} + args['projectid'] = self.get_project_id() + vms = self.cs.listVirtualMachines(**args) + if vms: + for v in vms['virtualmachine']: + if vm in [ v['name'], v['id'] ]: + self.vm_id = v['id'] + return self.vm_id + self.module.fail_json(msg="Virtual machine '%s' not found" % vm) + + + def get_zone_id(self): + if self.zone_id: + return self.zone_id + + zone = self.module.params.get('zone') + zones = self.cs.listZones() + + # use the first zone if no zone param given + if not zone: + self.zone_id = zones['zone'][0]['id'] + return self.zone_id + + if zones: + for z in zones['zone']: + if zone in [ z['name'], z['id'] ]: + self.zone_id = z['id'] + return self.zone_id + self.module.fail_json(msg="zone '%s' not found" % zone) + + + def get_os_type_id(self): + if self.os_type_id: + return self.os_type_id + + os_type = self.module.params.get('os_type') + if not os_type: + return None + + os_types = self.cs.listOsTypes() + if os_types: + for o in os_types['ostype']: + if os_type in [ o['description'], o['id'] ]: + self.os_type_id = o['id'] + return self.os_type_id + self.module.fail_json(msg="OS type '%s' not found" % os_type) + + + def get_hypervisor(self): + if self.hypervisor: + return self.hypervisor + + hypervisor = self.module.params.get('hypervisor') + hypervisors = self.cs.listHypervisors() + + # use the first hypervisor if no hypervisor param given + if not hypervisor: + self.hypervisor = hypervisors['hypervisor'][0]['name'] + return self.hypervisor + + for h in hypervisors['hypervisor']: + if hypervisor.lower() == h['name'].lower(): + self.hypervisor = h['name'] + return self.hypervisor + self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor) + + + def _poll_job(self, job=None, key=None): + if 'jobid' in job: + while True: + res = self.cs.queryAsyncJobResult(jobid=job['jobid']) + if res['jobstatus'] != 0: + if 'jobresult' in res and key is not None and key in res['jobresult']: + job = res['jobresult'][key] + break + time.sleep(2) + return job From 1ba05dd3a298ccc0a377f718046dc80aeaea5860 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 26 Mar 2015 14:10:18 +0100 Subject: [PATCH 0180/3617] cloudstack: add doc fragment --- .../utils/module_docs_fragments/cloudstack.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 lib/ansible/utils/module_docs_fragments/cloudstack.py diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py new file mode 100644 index 00000000000000..8d173ea756f3c4 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015 René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +class ModuleDocFragment(object): + + # Standard cloudstack documentation fragment + DOCUMENTATION = ''' +options: + api_key: + description: + - API key of the CloudStack API. + required: false + default: null + aliases: [] + api_secret: + description: + - Secret key of the CloudStack API. + required: false + default: null + aliases: [] + api_url: + description: + - URL of the CloudStack API e.g. https://cloud.example.com/client/api. + required: false + default: null + aliases: [] + api_http_method: + description: + - HTTP method used. + required: false + default: 'get' + aliases: [] +requirements: + - cs +notes: + - Ansible uses the C(cs) library's configuration method if credentials are not + provided by the options C(api_url), C(api_key), C(api_secret). + Configuration is read from several locations, in the following order: + - The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and + C(CLOUDSTACK_METHOD) environment variables. + - A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, + - A C(cloudstack.ini) file in the current working directory. + - A C(.cloudstack.ini) file in the users home directory. + See https://github.com/exoscale/cs for more information. + - This module supports check mode. +''' From c066a60b7c48c9a31b51834d49bccfd0b00dd2e5 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 26 Mar 2015 15:32:58 +0100 Subject: [PATCH 0181/3617] cloudstack: fail_json() if library cs is not found --- lib/ansible/module_utils/cloudstack.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index cb482ae993290d..ab72f2c7894157 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -17,19 +17,20 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import sys try: from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True except ImportError: - print("failed=True " + \ - "msg='python library cs required: pip install cs'") - sys.exit(1) + has_lib_cs = False class AnsibleCloudStack: def __init__(self, module): + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + self.module = module self._connect() From 3e7d959c9d398d5cbe02b72d4717d86cc45b310a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 26 Mar 2015 15:39:02 +0100 Subject: [PATCH 0182/3617] cloudstack: module utils are BSD licensed --- lib/ansible/module_utils/cloudstack.py | 30 +++++++++++++++++--------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index ab72f2c7894157..f72d270d30b3b7 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -3,19 +3,29 @@ # (c) 2015, René Moser # # This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. # -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: # -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. # -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. try: From 5bf9ea629882a9ef58fe37b68d84dd49980450c6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 11:52:19 -0700 Subject: [PATCH 0183/3617] make sure the shebang we inject into the module is a str Fixes #8564 --- lib/ansible/module_common.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py index 5e3732e9677bac..2ee23c90b774a3 100644 --- a/lib/ansible/module_common.py +++ b/lib/ansible/module_common.py @@ -26,6 +26,7 @@ from ansible import utils from ansible import constants as C from ansible import __version__ +from asnible.utils.unicode import to_bytes REPLACER = "#<>" REPLACER_ARGS = "\"<>\"" @@ -184,7 +185,8 @@ def modify_module(self, module_path, complex_args, module_args, inject): interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) if interpreter_config in inject: - lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:])) + interpreter = to_bytes(inject[interpreter_config], errors='strict') + lines[0] = shebang = "#!%s %s" % (interpreter, " ".join(args[1:])) module_data = "\n".join(lines) return (module_data, module_style, shebang) From ea2d00c5585a474b67f5031f689c143974eb9dc9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 11:57:27 -0700 Subject: [PATCH 0184/3617] v2 equivalent for https://github.com/ansible/ansible/pull/8564 Looks like there's currently no code for the ansible_*_interpreter but modified the note abouot adding it --- v2/ansible/executor/module_common.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py index 9f878fb6b02b56..7c76fd7427d363 100644 --- a/v2/ansible/executor/module_common.py +++ b/v2/ansible/executor/module_common.py @@ -165,23 +165,25 @@ def modify_module(module_path, module_args, strip_comments=False): # facility = inject['ansible_syslog_facility'] # module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) - lines = module_data.split("\n", 1) + lines = module_data.split(b"\n", 1) shebang = None - if lines[0].startswith("#!"): + if lines[0].startswith(b"#!"): shebang = lines[0].strip() args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) # FIXME: more inject stuff here... + #from ansible.utils.unicode import to_bytes #if interpreter_config in inject: - # lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:])) + # interpreter = to_bytes(inject[interpreter_config], errors='strict') + # lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:])) lines.insert(1, ENCODING_STRING) else: lines.insert(0, ENCODING_STRING) - module_data = "\n".join(lines) + module_data = b"\n".join(lines) return (module_data, module_style, shebang) From 0ec1b025a912c7c487083f87ae3ea87b7267dab6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 11:59:53 -0700 Subject: [PATCH 0185/3617] Update the module pointers --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- v2/ansible/modules/extras | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7683f36613ec09..5d776936cc67b2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7683f36613ec0904618b9b2d07f215b3f028a4e0 +Subproject commit 5d776936cc67b2f43d6be9630872595243213fb0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index cb848fcd9ec836..400166a655b304 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit cb848fcd9ec8364210fc05a5a7addd955b8a2529 +Subproject commit 400166a655b304094005aace178d0fab1cfe9763 diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras index 46e316a20a92b5..400166a655b304 160000 --- a/v2/ansible/modules/extras +++ b/v2/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 46e316a20a92b5a54b982eddb301eb3d57da397e +Subproject commit 400166a655b304094005aace178d0fab1cfe9763 From b7936009c2bc279e1175da8ec39eb5143f753204 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 12:09:36 -0700 Subject: [PATCH 0186/3617] Correct typo --- lib/ansible/module_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py index 2ee23c90b774a3..118c757f8dcae1 100644 --- a/lib/ansible/module_common.py +++ b/lib/ansible/module_common.py @@ -26,7 +26,7 @@ from ansible import utils from ansible import constants as C from ansible import __version__ -from asnible.utils.unicode import to_bytes +from ansible.utils.unicode import to_bytes REPLACER = "#<>" REPLACER_ARGS = "\"<>\"" From 7b63a5799343c9a79679388416be99e1ef671a52 Mon Sep 17 00:00:00 2001 From: deimosfr Date: Thu, 26 Mar 2015 21:40:36 +0100 Subject: [PATCH 0187/3617] fix consul inventory issue (missing method param) --- plugins/inventory/consul_io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inventory/consul_io.py b/plugins/inventory/consul_io.py index 46d47fd3bf5456..e0ff3fbbebd675 100755 --- a/plugins/inventory/consul_io.py +++ b/plugins/inventory/consul_io.py @@ -212,7 +212,7 @@ def load_data_for_node(self, node, datacenter): '''loads the data for a sinle node adding it to various groups based on metadata retrieved from the kv store and service availablity''' - index, node_data = self.consul_api.catalog.node(node, datacenter) + index, node_data = self.consul_api.catalog.node(node, dc=datacenter) node = node_data['Node'] self.add_node_to_map(self.nodes, 'all', node) self.add_metadata(node_data, "consul_datacenter", datacenter) From bc2e6d4d0eb6dd213abc4f179376922d41a0795d Mon Sep 17 00:00:00 2001 From: jxn Date: Thu, 26 Mar 2015 20:00:52 -0500 Subject: [PATCH 0188/3617] Fix a few spelling errors in the changelog --- CHANGELOG.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38c09d0b59d268..10a9ca16048885 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ Ansible Changes By Release ## 2.0 "TBD" - ACTIVE DEVELOPMENT Major Changes: - big_ip modules now support turning off ssl certificat validation (use only for self signed) + big_ip modules now support turning off ssl certificate validation (use only for self signed) New Modules: cloudtrail @@ -63,8 +63,8 @@ New Modules: New Filters: -* ternary: allows for trueval/falseval assignement dependint on conditional -* cartesian: returns the cartesian product of 2 lists +* ternary: allows for trueval/falseval assignment dependent on conditional +* cartesian: returns the Cartesian product of 2 lists * to_uuid: given a string it will return an ansible domain specific UUID * checksum: uses the ansible internal checksum to return a hash from a string * hash: get a hash from a string (md5, sha1, etc) @@ -93,14 +93,14 @@ Other Notable Changes: * restart_policy parameters to configure when the container automatically restarts * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option * Add insecure_registry parameter for connecting to registries via http - * New parameter to set a container's domainname + * New parameter to set a container's domain name * Undeprecated docker_image module until there's replacement functionality * Allow setting the container's pid namespace * Add a pull parameter that chooses when ansible will look for more recent images in the registry * docker module states have been greatly enhanced. The reworked and new states are: * present now creates but does not start containers * restarted always restarts a container - * reloaded restarts a container if ansible detects that the configuration is different than what is spcified + * reloaded restarts a container if ansible detects that the configuration is different than what is specified * reloaded accounts for exposed ports, env vars, and volumes * Can now connect to the docker server using TLS * Several source control modules had force parameters that defaulted to true. From e964439b990dd6695d1ee5c5d977d9e053edfcc4 Mon Sep 17 00:00:00 2001 From: kristous Date: Fri, 27 Mar 2015 07:47:20 +0100 Subject: [PATCH 0189/3617] Update README.md to get debuild you need to install devscripts --- packaging/debian/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index 715084380d76aa..62c6af084c02d3 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -4,7 +4,7 @@ Ansible Debian Package To create an Ansible DEB package: sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools sshpass - sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc + sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc devscripts git clone git://github.com/ansible/ansible.git cd ansible make deb From 576832e4c9224caaed8826f83e3b12a430e68277 Mon Sep 17 00:00:00 2001 From: Kim Johansson Date: Fri, 27 Mar 2015 10:46:01 +0100 Subject: [PATCH 0190/3617] Always define error before using it When the error reason is "Forbidden", the code throws a Python exception rather than simply outputting the exception reason. It's not nice to throw a Python exception when all the info to display a proper message is available. --- plugins/inventory/ec2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 5f7bd061d7210d..e93df1053d1e53 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -382,6 +382,8 @@ def get_rds_instances_by_region(self, region): for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError, e: + error = e.reason + if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": From 5ec1f3bd6ed226c63436d6ad7682f2a09d0a636a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 27 Mar 2015 08:45:04 -0400 Subject: [PATCH 0191/3617] removed folding sudo/su to become logic from constants as it is already present downstream in playbook/play/tasks --- lib/ansible/constants.py | 8 ++++---- v2/ansible/constants.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 20079863e7d636..71efefdbc383da 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -112,7 +112,6 @@ def shell_expand_path(path): DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) -DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) @@ -123,6 +122,7 @@ def shell_expand_path(path): DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) +DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') @@ -139,10 +139,10 @@ def shell_expand_path(path): #TODO: get rid of ternary chain mess BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} -DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True) +DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() -DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root') -DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True) +DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None) +DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # need to rethink impementing these 2 DEFAULT_BECOME_EXE = None #DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo') diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index f2da07ffb02059..72b571ebb8034e 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -145,10 +145,10 @@ def shell_expand_path(path): #TODO: get rid of ternary chain mess BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} -DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',True if DEFAULT_SUDO or DEFAULT_SU else False, boolean=True) +DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() -DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',DEFAULT_SUDO_USER if DEFAULT_SUDO else DEFAULT_SU_USER if DEFAULT_SU else 'root') -DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS',True if DEFAULT_ASK_SUDO_PASS else False, boolean=True) +DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', None) +DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # need to rethink impementing these 2 DEFAULT_BECOME_EXE = None #DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo') From 104b2036f77727766c9d0e537591c4fbec8bd7f8 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 27 Mar 2015 12:03:20 -0500 Subject: [PATCH 0192/3617] egg_info is now written directly to lib --- hacking/env-setup | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hacking/env-setup b/hacking/env-setup index f52c91a8b9cf8c..49390dfe5e0964 100644 --- a/hacking/env-setup +++ b/hacking/env-setup @@ -42,11 +42,10 @@ expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_M # Do the work in a function so we don't repeat ourselves later gen_egg_info() { - python setup.py egg_info if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then rm -r "$PREFIX_PYTHONPATH/ansible.egg-info" fi - mv "ansible.egg-info" "$PREFIX_PYTHONPATH" + python setup.py egg_info } if [ "$ANSIBLE_HOME" != "$PWD" ] ; then From 35a2ca8a5db25eb3280c51e3342b8c05719d9b0a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 27 Mar 2015 15:41:02 -0400 Subject: [PATCH 0193/3617] made sequence more flexible, can handle descending and negative sequences and is skipped if start==end --- lib/ansible/runner/lookup_plugins/sequence.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/lib/ansible/runner/lookup_plugins/sequence.py index b162b3069e7d57..13891343b1a053 100644 --- a/lib/ansible/runner/lookup_plugins/sequence.py +++ b/lib/ansible/runner/lookup_plugins/sequence.py @@ -151,10 +151,17 @@ def sanity_check(self): ) elif self.count is not None: # convert count to end - self.end = self.start + self.count * self.stride - 1 + if self.count != 0: + self.end = self.start + self.count * self.stride - 1 + else: + self.start = 0 + self.end = 0 + self.stride = 0 del self.count - if self.end < self.start: - raise AnsibleError("can't count backwards") + if self.stride > 0 and self.end < self.start: + raise AnsibleError("to count backwards make stride negative") + if self.stride < 0 and self.end > self.start: + raise AnsibleError("to count forward don't make stride negative") if self.format.count('%') != 1: raise AnsibleError("bad formatting string: %s" % self.format) @@ -193,12 +200,13 @@ def run(self, terms, inject=None, **kwargs): self.sanity_check() - results.extend(self.generate_sequence()) + if self.start != self.end: + results.extend(self.generate_sequence()) except AnsibleError: raise - except Exception: + except Exception, e: raise AnsibleError( - "unknown error generating sequence" + "unknown error generating sequence: %s" % str(e) ) return results From 662b35cbce50b43f542750451fd35d58bfa2ffd9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 27 Mar 2015 18:30:42 -0400 Subject: [PATCH 0194/3617] readded sudo/su vars to allow role/includes to work with passed sudo/su --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index edec30df758651..a24c5fff1b5036 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -577,7 +577,7 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, bec # evaluate privilege escalation vars for current and child tasks included_become_vars = {} - for k in ["become", "become_user", "become_method", "become_exe"]: + for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]: if k in x: included_become_vars[k] = x[k] elif k in become_vars: From c90e3f0d16d5cc365240d772e90c507b45b940e5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 29 Mar 2015 15:58:56 -0400 Subject: [PATCH 0195/3617] small updates to community and contribution page --- docsite/rst/community.rst | 112 +++++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 43 deletions(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index 4d2de28ce16d14..f33109337dbe27 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -132,39 +132,63 @@ Modules are some of the easiest places to get started. Contributing Code (Features or Bugfixes) ---------------------------------------- -The Ansible project keeps its source on github at -`github.com/ansible/ansible `_ for the core application, and two sub repos ansible/ansible-modules-core and ansible/ansible-modules-extras for module related items. If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module. +The Ansible project keeps its source on github at `github.com/ansible/ansible `_ for +the core application, and two sub repos `github.com/ansible/ansible-modules-core `_ +and `ansible/ansible-modules-extras `_ for module related items. +If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module. -The project takes contributions through -`github pull requests `_. +The project takes contributions through `github pull requests `_. -It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission, and this especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request for the first time, that revisions are needed. (This is not usually needed for module development, but can be nice for large changes). +It is usually a good idea to join the ansible-devel list to discuss any large features prior to submission, +and this especially helps in avoiding duplicate work or efforts where we decide, upon seeing a pull request +for the first time, that revisions are needed. (This is not usually needed for module development, but can be nice for large changes). Note that we do keep Ansible to a particular aesthetic, so if you are unclear about whether a feature is a good fit or not, having the discussion on the development list is often a lot easier than having to modify a pull request later. -When submitting patches, be sure to run the unit tests first “make tests” and always use -“git rebase” vs “git merge” (aliasing git pull to git pull --rebase is a great idea) to -avoid merge commits in your submissions. There are also integration tests that can be run in the "test/integration" directory. +When submitting patches, be sure to run the unit tests first “make tests” and always use, these are the same basic +tests that will automatically run on Travis when creating the PR. There are more in depth tests in the tests/integration +directory, classified as destructive and non_destructive, run these if they pertain to your modification. They are setup +with tags so you can run subsets, some of the tests requrie cloud credentials and will only run if they are provided. +When adding new features of fixing bugs it would be nice to add new tests to avoid regressions. -In order to keep the history clean and better audit incoming code, we will require resubmission of pull requests that contain merge commits. Use "git pull --rebase" vs "git pull" and "git rebase" vs "git merge". Also be sure to use topic branches to keep your additions on different branches, such that they won't pick up stray commits later. +Use “git rebase” vs “git merge” (aliasing git pull to git pull --rebase is a great idea) to avoid merge commits in +your submissions. There are also integration tests that can be run in the "test/integration" directory. -We’ll then review your contributions and engage with you about questions and so on. +In order to keep the history clean and better audit incoming code, we will require resubmission of pull requests that +contain merge commits. Use "git pull --rebase" vs "git pull" and "git rebase" vs "git merge". Also be sure to use topic +branches to keep your additions on different branches, such that they won't pick up stray commits later. -As we have a very large and active community, so it may take awhile to get your contributions +If you make a mistake you do not need to close your PR, create a clean branch locally and then push to github +with --force to overwrite the existing branch (permissible in this case as no one else should be using that +branch as reference). Code comments won't be lost, they just won't be attached to the existing branch. + +We’ll then review your contributions and engage with you about questions and so on. + +As we have a very large and active community, so it may take awhile to get your contributions in! See the notes about priorities in a later section for understanding our work queue. +Be patient, your request might not get merged right away, we also try to keep the devel branch more +or less usable so we like to examine Pull requests carefully, which takes time. -Patches should be made against the 'devel' branch. +Patches should always be made against the 'devel' branch. -Contributions can be for new features like modules, or to fix bugs you or others have found. If you -are interested in writing new modules to be included in the core Ansible distribution, please refer +Keep in mind that small and focused requests are easier to examine and accept, having example cases +also help us understand the utility of a bug fix or a new feature. + +Contributions can be for new features like modules, or to fix bugs you or others have found. If you +are interested in writing new modules to be included in the core Ansible distribution, please refer to the `module development documentation `_. -Ansible's aesthetic encourages simple, readable code and consistent, conservatively extending, -backwards-compatible improvements. Code developed for Ansible needs to support Python 2.6+, +Ansible's aesthetic encourages simple, readable code and consistent, conservatively extending, +backwards-compatible improvements. Code developed for Ansible needs to support Python 2.6+, while code in modules must run under Python 2.4 or higher. Please also use a 4-space indent -and no tabs. +and no tabs, we do not enforce 80 column lines, we are fine wtih 120-140. We do not take 'style only' +requests unless the code is nearly unreadable, we are "PEP8ish", but not strictly compliant. + +You can also contribute by testing and revising other requests, specially if it is one you are interested +in using. Please keep your comments clear and to the point, courteous and constructive, tickets are not a +good place to start discussions (ansible-devel and IRC exist for this). Tip: To easily run from a checkout, source "./hacking/env-setup" and that's it -- no install required. You're now live! @@ -175,32 +199,34 @@ Other Topics Ansible Staff ------------- -Ansible, Inc is a company supporting Ansible and building additional solutions based on -Ansible. We also do services and support for those that are interested. +Ansible, Inc is a company supporting Ansible and building additional solutions based on +Ansible. We also do services and support for those that are interested. We also offer an +enterprise web front end to Ansible (see Tower below). -Our most -important task however is enabling all the great things that happen in the Ansible +Our most important task however is enabling all the great things that happen in the Ansible community, including organizing software releases of Ansible. For more information about any of these things, contact info@ansible.com -On IRC, you can find us as mdehaan, jimi_c, abadger1999, Tybstar, and others. On the mailing list, +On IRC, you can find us as jimi_c, abadger1999, Tybstar, bcoca, and others. On the mailing list, we post with an @ansible.com address. Mailing List Information ------------------------ -Ansible has several mailing lists. Your first post to the mailing list will be +Ansible has several mailing lists. Your first post to the mailing list will be moderated (to reduce spam), so please allow a day or less for your first post. -`Ansible Project List `_ is for sharing Ansible Tips, answering questions, and general user discussion. +`Ansible Project List `_ is for sharing Ansible Tips, +answering questions, and general user discussion. -`Ansible Development List `_ is for learning how to develop on Ansible, asking about prospective feature design, or discussions -about extending ansible or features in progress. +`Ansible Development List `_ is for learning how to develop on Ansible, +asking about prospective feature design, or discussions about extending ansible or features in progress. -`Ansible Announce list `_ is a read-only list that shares information about new releases of Ansible, and also rare infrequent -event information, such as announcements about an AnsibleFest coming up, which is our official conference series. +`Ansible Announce list `_ is a read-only list that shares information +about new releases of Ansible, and also rare infrequent event information, such as announcements about an AnsibleFest coming up, +which is our official conference series. -To subscribe to a group from a non-google account, you can email the subscription address, for +To subscribe to a group from a non-google account, you can email the subscription address, for example ansible-devel+subscribe@googlegroups.com. Release Numbering @@ -208,9 +234,9 @@ Release Numbering Releases ending in ".0" are major releases and this is where all new features land. Releases ending in another integer, like "0.X.1" and "0.X.2" are dot releases, and these are only going to contain -bugfixes. +bugfixes. -Typically we don't do dot releases for minor bugfixes (reserving these for larger items), +Typically we don't do dot releases for minor bugfixes (reserving these for larger items), but may occasionally decide to cut dot releases containing a large number of smaller fixes if it's still a fairly long time before the next release comes out. @@ -219,7 +245,7 @@ Releases are also given code names based on Van Halen songs, that no one really Tower Support Questions ----------------------- -Ansible `Tower `_ is a UI, Server, and REST endpoint for Ansible, produced by Ansible, Inc. +Ansible `Tower `_ is a UI, Server, and REST endpoint for Ansible, produced by Ansible, Inc. If you have a question about tower, email `support@ansible.com `_ rather than using the IRC channel or the general project mailing list. @@ -227,7 +253,7 @@ channel or the general project mailing list. IRC Channel ----------- -Ansible has an IRC channel #ansible on irc.freenode.net. +Ansible has an IRC channel #ansible on irc.freenode.net. Notes on Priority Flags ----------------------- @@ -241,10 +267,10 @@ As a result, we have a LOT of incoming activity to process. In the interest of transparency, we're telling you how we sort incoming requests. In our bug tracker you'll notice some labels - P1, P2, P3, P4, and P5. These are our internal -priority orders that we use to sort tickets. +priority orders that we use to sort tickets. -With some exceptions for easy merges (like documentation typos for instance), -we're going to spend most of our time working on P1 and P2 items first, including pull requests. +With some exceptions for easy merges (like documentation typos for instance), +we're going to spend most of our time working on P1 and P2 items first, including pull requests. These usually relate to important bugs or features affecting large segments of the userbase. So if you see something categorized "P3 or P4", and it's not appearing to get a lot of immediate attention, this is why. @@ -264,18 +290,18 @@ is help close P2 bug reports. Community Code of Conduct ------------------------- -Ansible’s community welcomes users of all types, backgrounds, and skill levels. Please -treat others as you expect to be treated, keep discussions positive, and avoid discrimination of all kinds, profanity, allegations of Cthulhu worship, or engaging in controversial debates (except vi vs emacs is cool). +Ansible’s community welcomes users of all types, backgrounds, and skill levels. Please treat others as you expect to be treated, +keep discussions positive, and avoid discrimination of all kinds, profanity, allegations of Cthulhu worship, or engaging in +controversial debates (except vi vs emacs is cool). The same expectations apply to community events as they do to online interactions. -Posts to mailing lists should remain focused around Ansible and IT automation. Abuse of these community guidelines will not be tolerated and may result in banning from community resources. +Posts to mailing lists should remain focused around Ansible and IT automation. Abuse of these community guidelines will not be +tolerated and may result in banning from community resources. Contributors License Agreement ------------------------------ -By contributing you agree that these contributions are your own (or approved by your employer) -and you grant a full, complete, irrevocable -copyright license to all users and developers of the project, present and future, pursuant -to the license of the project. +By contributing you agree that these contributions are your own (or approved by your employer) and you grant a full, complete, irrevocable +copyright license to all users and developers of the project, present and future, pursuant to the license of the project. From 3afc54d298ad08d24e0c803c4bb98dde124f1d07 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 29 Mar 2015 16:51:11 -0400 Subject: [PATCH 0196/3617] added zabbix modules to changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10a9ca16048885..4dc9219f2a7f85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,9 @@ New Modules: cloudtrail maven_artifact pushover + zabbix_host + zabbix_hostmacro + zabbix_screen vertica_configuration vertica_facts vertica_role From 3a70affb9aa8ff78f3ff33fc21d1095fdc1b911d Mon Sep 17 00:00:00 2001 From: joefis Date: Mon, 30 Mar 2015 16:39:09 +0100 Subject: [PATCH 0197/3617] Vagrant inventory: exit 0 on success Current code has sys.exit(1) at the end of the codepath for the options --help, --list and --host. These are not error conditions so should be returning 0 for success, not 1 which is EPERM i.e. "Operation not permitted". Newer Vagrant versions examine the exit codes from subprocesses and interpret this as a failure. --- plugins/inventory/vagrant.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inventory/vagrant.py b/plugins/inventory/vagrant.py index ea59a7bc02364b..7f6dc925e83fca 100755 --- a/plugins/inventory/vagrant.py +++ b/plugins/inventory/vagrant.py @@ -107,7 +107,7 @@ def get_a_ssh_config(box_name): hosts['vagrant'].append(data['HostName']) print json.dumps(hosts) - sys.exit(1) + sys.exit(0) # Get out the host details #------------------------------ @@ -122,11 +122,11 @@ def get_a_ssh_config(box_name): result['ansible_ssh_port'] = result['Port'] print json.dumps(result) - sys.exit(1) + sys.exit(0) # Print out help #------------------------------ else: parser.print_help() - sys.exit(1) \ No newline at end of file + sys.exit(0) From 2a8a302e7ecef0b47cfd851b3e273a3b199f466c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Mar 2015 20:34:17 -0400 Subject: [PATCH 0198/3617] fixed corner case when counting backwards, added test cases for count=0 and backwards counts --- lib/ansible/runner/lookup_plugins/sequence.py | 6 +++++- .../roles/test_iterators/tasks/main.yml | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/lib/ansible/runner/lookup_plugins/sequence.py index 13891343b1a053..68b0bbec90d6a0 100644 --- a/lib/ansible/runner/lookup_plugins/sequence.py +++ b/lib/ansible/runner/lookup_plugins/sequence.py @@ -166,7 +166,11 @@ def sanity_check(self): raise AnsibleError("bad formatting string: %s" % self.format) def generate_sequence(self): - numbers = xrange(self.start, self.end + 1, self.stride) + if self.stride > 0: + adjust = 1 + else: + adjust = -1 + numbers = xrange(self.start, self.end + adjust, self.stride) for i in numbers: try: diff --git a/test/integration/roles/test_iterators/tasks/main.yml b/test/integration/roles/test_iterators/tasks/main.yml index c95eaff3da4739..b9592aba2f7ed1 100644 --- a/test/integration/roles/test_iterators/tasks/main.yml +++ b/test/integration/roles/test_iterators/tasks/main.yml @@ -60,6 +60,10 @@ set_fact: "{{ 'x' + item }}={{ item }}" with_sequence: start=0 end=3 +- name: test with_sequence backwards + set_fact: "{{ 'y' + item }}={{ item }}" + with_sequence: start=3 end=0 stride=-1 + - name: verify with_sequence assert: that: @@ -67,6 +71,20 @@ - "x1 == '1'" - "x2 == '2'" - "x3 == '3'" + - "y3 == '3'" + - "y2 == '2'" + - "y1 == '1'" + - "y0 == '0'" + +- name: test with_sequence not failing on count == 0 + debug: msg='previously failed with backward counting error' + with_sequence: count=0 + register: count_of_zero + +- assert: + that: + - count_of_zero | skipped + - not count_of_zero | failed # WITH_RANDOM_CHOICE From 68880a797d226a410c4278bb8a11ad809bb99abe Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 26 Mar 2015 12:15:16 -0700 Subject: [PATCH 0199/3617] Update core to fix cloudformation problem --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5d776936cc67b2..7e7eafb3e31ad0 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5d776936cc67b2f43d6be9630872595243213fb0 +Subproject commit 7e7eafb3e31ad03b255c633460766e8c93616e65 From dc9b36ccb0d78b707364e29ea67ae7560b12a7bb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 27 Mar 2015 07:48:26 -0700 Subject: [PATCH 0200/3617] Some notes on optimizing module_replacer --- v2/ansible/executor/module_common.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py index 7c76fd7427d363..23890d64e61a69 100644 --- a/v2/ansible/executor/module_common.py +++ b/v2/ansible/executor/module_common.py @@ -140,6 +140,16 @@ def modify_module(module_path, module_args, strip_comments=False): which results in the inclusion of the common code from powershell.ps1 """ + ### TODO: Optimization ideas if this code is actually a source of slowness: + # * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info) + # * Use pyminifier if installed + # * comment stripping/pyminifier needs to have config setting to turn it + # off for debugging purposes (goes along with keep remote but should be + # separate otherwise users wouldn't be able to get info on what the + # minifier output) + # * Only split into lines and recombine into strings once + # * Cache the modified module? If only the args are different and we do + # that as the last step we could cache sll the work up to that point. with open(module_path) as f: From ce512e18f0254b54e941bf863214d5a1caab0ad1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 27 Mar 2015 09:06:07 -0700 Subject: [PATCH 0201/3617] Remove fireball connection plugin. v2 will have accelerate but not fireball --- v2/ansible/plugins/connections/fireball.py | 151 --------------------- 1 file changed, 151 deletions(-) delete mode 100644 v2/ansible/plugins/connections/fireball.py diff --git a/v2/ansible/plugins/connections/fireball.py b/v2/ansible/plugins/connections/fireball.py deleted file mode 100644 index dd9e09bacda6d6..00000000000000 --- a/v2/ansible/plugins/connections/fireball.py +++ /dev/null @@ -1,151 +0,0 @@ -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import json -import os -import base64 -from ansible.callbacks import vvv -from ansible import utils -from ansible import errors -from ansible import constants - -HAVE_ZMQ=False - -try: - import zmq - HAVE_ZMQ=True -except ImportError: - pass - -class Connection(object): - ''' ZeroMQ accelerated connection ''' - - def __init__(self, runner, host, port, *args, **kwargs): - - self.runner = runner - self.has_pipelining = False - - # attempt to work around shared-memory funness - if getattr(self.runner, 'aes_keys', None): - utils.AES_KEYS = self.runner.aes_keys - - self.host = host - self.key = utils.key_for_hostname(host) - self.context = None - self.socket = None - - if port is None: - self.port = constants.ZEROMQ_PORT - else: - self.port = port - - def connect(self): - ''' activates the connection object ''' - - if not HAVE_ZMQ: - raise errors.AnsibleError("zmq is not installed") - - # this is rough/temporary and will likely be optimized later ... - self.context = zmq.Context() - socket = self.context.socket(zmq.REQ) - addr = "tcp://%s:%s" % (self.host, self.port) - socket.connect(addr) - self.socket = socket - - return self - - def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=None): - ''' run a command on the remote host ''' - - if in_data: - raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - - vvv("EXEC COMMAND %s" % cmd) - - if (self.runner.sudo and sudoable) or (self.runner.su and su): - raise errors.AnsibleError( - "When using fireball, do not specify sudo or su to run your tasks. " + - "Instead sudo the fireball action with sudo. " + - "Task will communicate with the fireball already running in sudo mode." - ) - - data = dict( - mode='command', - cmd=cmd, - tmp_path=tmp_path, - executable=executable, - ) - data = utils.jsonify(data) - data = utils.encrypt(self.key, data) - self.socket.send(data) - - response = self.socket.recv() - response = utils.decrypt(self.key, response) - response = utils.parse_json(response) - - return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr','')) - - def put_file(self, in_path, out_path): - - ''' transfer a file from local to remote ''' - vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) - - if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) - data = file(in_path).read() - data = base64.b64encode(data) - - data = dict(mode='put', data=data, out_path=out_path) - # TODO: support chunked file transfer - data = utils.jsonify(data) - data = utils.encrypt(self.key, data) - self.socket.send(data) - - response = self.socket.recv() - response = utils.decrypt(self.key, response) - response = utils.parse_json(response) - - # no meaningful response needed for this - - def fetch_file(self, in_path, out_path): - ''' save a remote file to the specified path ''' - vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - - data = dict(mode='fetch', in_path=in_path) - data = utils.jsonify(data) - data = utils.encrypt(self.key, data) - self.socket.send(data) - - response = self.socket.recv() - response = utils.decrypt(self.key, response) - response = utils.parse_json(response) - response = response['data'] - response = base64.b64decode(response) - - fh = open(out_path, "w") - fh.write(response) - fh.close() - - def close(self): - ''' terminate the connection ''' - # Be a good citizen - try: - self.socket.close() - self.context.term() - except: - pass - From 4aa3ac41a14099af41c39323d6a102b584c0f785 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 27 Mar 2015 12:19:23 -0700 Subject: [PATCH 0202/3617] Port sivel's fix for egg_info (#10563) to v2 --- v2/hacking/env-setup | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/v2/hacking/env-setup b/v2/hacking/env-setup index c03fa0874e1ef7..8f2c331fe46927 100644 --- a/v2/hacking/env-setup +++ b/v2/hacking/env-setup @@ -42,11 +42,10 @@ expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_M # Do the work in a function so we don't repeat ourselves later gen_egg_info() { - python setup.py egg_info if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then rm -r "$PREFIX_PYTHONPATH/ansible.egg-info" fi - mv "ansible.egg-info" "$PREFIX_PYTHONPATH" + python setup.py egg_info } if [ "$ANSIBLE_HOME" != "$PWD" ] ; then From 1cc2135a0d8400952ef0ee9631f6a07db6d93058 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 30 Mar 2015 12:45:04 -0700 Subject: [PATCH 0203/3617] Fix no closed parens --- v2/ansible/plugins/action/copy.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py index 088a806b61b0ae..09990743bb7ad2 100644 --- a/v2/ansible/plugins/action/copy.py +++ b/v2/ansible/plugins/action/copy.py @@ -31,12 +31,17 @@ from ansible.utils.boolean import boolean from ansible.utils.hashing import checksum +### FIXME: Find a different way to fix 3518 as sys.defaultencoding() breaks +# the python interpreter in subtle ways. It appears that this does not fix +# 3518 anyway (using binary files via lookup(). Instead, it tries to fix +# utf-8 strings in the content parameter. That should be fixable by properly +# encoding or decoding the value before we write it to a file. +# ## fixes https://github.com/ansible/ansible/issues/3518 # http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html - -import sys -reload(sys) -sys.setdefaultencoding("utf8") +#import sys +#reload(sys) +#sys.setdefaultencoding("utf8") class ActionModule(ActionBase): @@ -231,7 +236,7 @@ def run(self, tmp=None, task_vars=dict()): self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if (self._connection_info.become and self._connection_info.become_user != 'root': + if self._connection_info.become and self._connection_info.become_user != 'root': self._remote_chmod('a+r', tmp_src, tmp) if raw: From 43c1a9744765eebfb9eaf9113336d552cfc9096b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 30 Mar 2015 19:19:34 -0700 Subject: [PATCH 0204/3617] Various unicode and backslash escape cleanups * Do backslash escape parsing in parse_kv() [was being done in the copy module purely for newlines in the copy module's content param before] * Make parse_kv always return unicode * Add bandaid to transform args to unicode until we can fix things calling parse_kv to always send it unicode. * Make split_args deal with unicode internally. Warning, no bandaid for things calling split_args without giving it unicode (shouldn't matter as dealt with str internally before) * Fix copy and unarchive action plugins to not use setdefaultencoding * Remove escaping from copy (it was broken and made content into latin-1 sometimes). escaping is now in parse_kv. * Expect that content is now a unicode string so transform to bytes just before writing to the file. * Add initial unittests for split_args and parse_kv. 4 failing tests.because split_args is injecting extra newlines. --- v2/ansible/parsing/splitter.py | 42 +++++++--- v2/ansible/plugins/action/copy.py | 28 +------ v2/ansible/plugins/action/unarchive.py | 8 +- v2/test/parsing/test_splitter.py | 109 +++++++++++++++++++++++++ 4 files changed, 143 insertions(+), 44 deletions(-) create mode 100644 v2/test/parsing/test_splitter.py diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py index 9705baf169d6a9..4af1c7b171e11f 100644 --- a/v2/ansible/parsing/splitter.py +++ b/v2/ansible/parsing/splitter.py @@ -19,6 +19,27 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import re +import codecs + +# Decode escapes adapted from rspeer's answer here: +# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python +_HEXCHAR = '[a-fA-F0-9]' +_ESCAPE_SEQUENCE_RE = re.compile(r''' + ( \\U{0} # 8-digit hex escapes + | \\u{1} # 4-digit hex escapes + | \\x{2} # 2-digit hex escapes + | \\[0-7]{{1,3}} # Octal escapes + | \\N\{{[^}}]+\}} # Unicode characters by name + | \\[\\'"abfnrtv] # Single-character escapes + )'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE) + +def _decode_escapes(s): + def decode_match(match): + return codecs.decode(match.group(0), 'unicode-escape') + + return _ESCAPE_SEQUENCE_RE.sub(decode_match, s) + def parse_kv(args, check_raw=False): ''' Convert a string of key/value items to a dict. If any free-form params @@ -27,6 +48,10 @@ def parse_kv(args, check_raw=False): they will simply be ignored. ''' + ### FIXME: args should already be a unicode string + from ansible.utils.unicode import to_unicode + args = to_unicode(args, nonstring='passthru') + options = {} if args is not None: try: @@ -39,6 +64,7 @@ def parse_kv(args, check_raw=False): raw_params = [] for x in vargs: + x = _decode_escapes(x) if "=" in x: pos = 0 try: @@ -72,7 +98,7 @@ def parse_kv(args, check_raw=False): # recombine the free-form params, if any were found, and assign # them to a special option for use later by the shell/command module if len(raw_params) > 0: - options['_raw_params'] = ' '.join(raw_params) + options[u'_raw_params'] = ' '.join(raw_params) return options @@ -126,17 +152,11 @@ def split_args(args): ''' # the list of params parsed out of the arg string - # this is going to be the result value when we are donei + # this is going to be the result value when we are done params = [] - # here we encode the args, so we have a uniform charset to - # work with, and split on white space + # Initial split on white space args = args.strip() - try: - args = args.encode('utf-8') - do_decode = True - except UnicodeDecodeError: - do_decode = False items = args.strip().split('\n') # iterate over the tokens, and reassemble any that may have been @@ -242,10 +262,6 @@ def split_args(args): if print_depth or block_depth or comment_depth or inside_quotes: raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes") - # finally, we decode each param back to the unicode it was in the arg string - if do_decode: - params = [x.decode('utf-8') for x in params] - return params def is_quoted(data): diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py index 09990743bb7ad2..89c2fde7b3f1e9 100644 --- a/v2/ansible/plugins/action/copy.py +++ b/v2/ansible/plugins/action/copy.py @@ -30,18 +30,7 @@ from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.hashing import checksum - -### FIXME: Find a different way to fix 3518 as sys.defaultencoding() breaks -# the python interpreter in subtle ways. It appears that this does not fix -# 3518 anyway (using binary files via lookup(). Instead, it tries to fix -# utf-8 strings in the content parameter. That should be fixable by properly -# encoding or decoding the value before we write it to a file. -# -## fixes https://github.com/ansible/ansible/issues/3518 -# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html -#import sys -#reload(sys) -#sys.setdefaultencoding("utf8") +from ansible.utils.unicode import to_bytes class ActionModule(ActionBase): @@ -55,16 +44,6 @@ def run(self, tmp=None, task_vars=dict()): raw = boolean(self._task.args.get('raw', 'no')) force = boolean(self._task.args.get('force', 'yes')) - # content with newlines is going to be escaped to safely load in yaml - # now we need to unescape it so that the newlines are evaluated properly - # when writing the file to disk - if content: - if isinstance(content, unicode): - try: - content = content.decode('unicode-escape') - except UnicodeDecodeError: - pass - # FIXME: first available file needs to be reworked somehow... #if (source is None and content is None and not 'first_available_file' in inject) or dest is None: # result=dict(failed=True, msg="src (or content) and dest are required") @@ -86,7 +65,7 @@ def run(self, tmp=None, task_vars=dict()): try: # If content comes to us as a dict it should be decoded json. # We need to encode it back into a string to write it out. - if type(content) is dict: + if isinstance(content, dict): content_tempfile = self._create_content_tempfile(json.dumps(content)) else: content_tempfile = self._create_content_tempfile(content) @@ -316,7 +295,8 @@ def run(self, tmp=None, task_vars=dict()): def _create_content_tempfile(self, content): ''' Create a tempfile containing defined content ''' fd, content_tempfile = tempfile.mkstemp() - f = os.fdopen(fd, 'w') + f = os.fdopen(fd, 'wb') + content = to_bytes(content) try: f.write(content) except Exception, err: diff --git a/v2/ansible/plugins/action/unarchive.py b/v2/ansible/plugins/action/unarchive.py index f99d7e28e64e08..1b6cb354f0fdf7 100644 --- a/v2/ansible/plugins/action/unarchive.py +++ b/v2/ansible/plugins/action/unarchive.py @@ -17,16 +17,10 @@ # along with Ansible. If not, see . import os +import pipes from ansible.plugins.action import ActionBase -## fixes https://github.com/ansible/ansible/issues/3518 -# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html -import sys -reload(sys) -sys.setdefaultencoding("utf8") -import pipes - class ActionModule(ActionBase): diff --git a/v2/test/parsing/test_splitter.py b/v2/test/parsing/test_splitter.py new file mode 100644 index 00000000000000..fc2c05d36fb1f0 --- /dev/null +++ b/v2/test/parsing/test_splitter.py @@ -0,0 +1,109 @@ +# coding: utf-8 +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from nose import tools +from ansible.compat.tests import unittest + +from ansible.parsing.splitter import split_args, parse_kv + + +# Tests using nose's test generators cannot use unittest base class. +# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators +class TestSplitter_Gen: + SPLIT_DATA = ( + (u'a', + [u'a'], + {u'_raw_params': u'a'}), + (u'a=b', + [u'a=b'], + {u'a': u'b'}), + (u'a="foo bar"', + [u'a="foo bar"'], + {u'a': u'foo bar'}), + (u'"foo bar baz"', + [u'"foo bar baz"'], + {u'_raw_params': '"foo bar baz"'}), + (u'foo bar baz', + [u'foo', u'bar', u'baz'], + {u'_raw_params': u'foo bar baz'}), + (u'a=b c="foo bar"', + [u'a=b', u'c="foo bar"'], + {u'a': u'b', u'c': u'foo bar'}), + (u'a="echo \\"hello world\\"" b=bar', + [u'a="echo \\"hello world\\""', u'b=bar'], + {u'a': u'echo "hello world"', u'b': u'bar'}), + (u'a="multi\nline"', + [u'a="multi\nline"'], + {u'a': u'multi\nline'}), + (u'a="blank\n\nline"', + [u'a="blank\n\nline"'], + {u'a': u'blank\n\nline'}), + (u'a="blank\n\n\nlines"', + [u'a="blank\n\n\nlines"'], + {u'a': u'blank\n\n\nlines'}), + (u'a="a long\nmessage\\\nabout a thing\n"', + [u'a="a long\nmessage\\\nabout a thing\n"'], + {u'a': u'a long\nmessage\\\nabout a thing\n'}), + (u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"', + [u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'], + {u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}), + (u'a={{jinja}}', + [u'a={{jinja}}'], + {u'a': u'{{jinja}}'}), + (u'a={{ jinja }}', + [u'a={{ jinja }}'], + {u'a': u'{{ jinja }}'}), + (u'a="{{jinja}}"', + [u'a="{{jinja}}"'], + {u'a': u'{{jinja}}'}), + (u'a={{ jinja }}{{jinja2}}', + [u'a={{ jinja }}{{jinja2}}'], + {u'a': u'{{ jinja }}{{jinja2}}'}), + (u'a="{{ jinja }}{{jinja2}}"', + [u'a="{{ jinja }}{{jinja2}}"'], + {u'a': u'{{ jinja }}{{jinja2}}'}), + (u'a={{jinja}} b={{jinja2}}', + [u'a={{jinja}}', u'b={{jinja2}}'], + {u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}), + (u'a="café eñyei"', + [u'a="café eñyei"'], + {u'a': u'café eñyei'}), + (u'a=café b=eñyei', + [u'a=café', u'b=eñyei'], + {u'a': u'café', u'b': u'eñyei'}), + ) + + def check_split_args(self, args, expected): + tools.eq_(split_args(args), expected) + + def test_split_args(self): + for datapoint in self.SPLIT_DATA: + yield self.check_split_args, datapoint[0], datapoint[1] + + def check_parse_kv(self, args, expected): + tools.eq_(parse_kv(args), expected) + + def test_parse_kv(self): + for datapoint in self.SPLIT_DATA: + try: + yield self.check_parse_kv, datapoint[0], datapoint[2] + except: pass From 378dc561cbf15ededd5f20d88eb6e173953f4de7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 30 Mar 2015 22:47:56 -0700 Subject: [PATCH 0205/3617] Possible fix for the first newline and triple newline problems --- v2/ansible/parsing/splitter.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/v2/ansible/parsing/splitter.py b/v2/ansible/parsing/splitter.py index 4af1c7b171e11f..a1dc051d24c993 100644 --- a/v2/ansible/parsing/splitter.py +++ b/v2/ansible/parsing/splitter.py @@ -211,7 +211,7 @@ def split_args(args): params.append(token) appended = True elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes: - if idx == 0 and not inside_quotes and was_inside_quotes: + if idx == 0 and was_inside_quotes: params[-1] = "%s%s" % (params[-1], token) elif len(tokens) > 1: spacer = '' @@ -251,8 +251,7 @@ def split_args(args): # one item (meaning we split on newlines), add a newline back here # to preserve the original structure if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation: - if not params[-1].endswith('\n'): - params[-1] += '\n' + params[-1] += '\n' # always clear the line continuation flag line_continuation = False From f812582d9c3c8b5d69891fb8fcf99b5b8728eac9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 08:47:30 -0400 Subject: [PATCH 0206/3617] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7e7eafb3e31ad0..bdef699596d48a 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7e7eafb3e31ad03b255c633460766e8c93616e65 +Subproject commit bdef699596d48a9fd5bb5dad040c9b5e0765bbf6 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 400166a655b304..7794042cf65b07 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 400166a655b304094005aace178d0fab1cfe9763 +Subproject commit 7794042cf65b075c9ca9bf4248df994bff94401f From fd7bf51c1479f07ef4bc2c59f68ee5d412b0c763 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 08:58:18 -0400 Subject: [PATCH 0207/3617] updated changelog with new cloudstack modules --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dc9219f2a7f85..06fe0504fc7ea4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,9 @@ Major Changes: New Modules: cloudtrail + cloudstack_fw + cloudstack_iso + cloudstack_sshkey maven_artifact pushover zabbix_host From 4919c225e626e41fbf9d28d228768a6fe17b5290 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:22:19 -0400 Subject: [PATCH 0208/3617] updated ref so docs can build --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index bdef699596d48a..613961c592ed23 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit bdef699596d48a9fd5bb5dad040c9b5e0765bbf6 +Subproject commit 613961c592ed23ded2d7e3771ad45b01de5a95f3 From f337707ef15a2eb70d068751e447d68236b2884d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 09:43:09 -0400 Subject: [PATCH 0209/3617] updated ref to pickup latest docfixes --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 7794042cf65b07..eb04e45311683d 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 7794042cf65b075c9ca9bf4248df994bff94401f +Subproject commit eb04e45311683dba1d54c8e5db293a2d3877eb68 From 57ed9947661de6b832ced11363f0df8801b27c00 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 13:44:01 -0400 Subject: [PATCH 0210/3617] updated version --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 2e0e38c63a62a4..cd5ac039d67e0b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.9 +2.0 From eb788dd8f62a574f9df8a74b472094e4e28a778e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 31 Mar 2015 12:50:47 -0700 Subject: [PATCH 0211/3617] Just move things around so that new_inventory doesn't interfere with testing --- v2/ansible/new_inventory/__init__.py | 4 ++-- v2/ansible/{new_inventory => plugins/inventory}/aggregate.py | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename v2/ansible/{new_inventory => plugins/inventory}/aggregate.py (100%) diff --git a/v2/ansible/new_inventory/__init__.py b/v2/ansible/new_inventory/__init__.py index bcf87c9ef874df..b91d9f05a2825f 100644 --- a/v2/ansible/new_inventory/__init__.py +++ b/v2/ansible/new_inventory/__init__.py @@ -23,8 +23,8 @@ from ansible import constants as C from ansible.inventory.group import Group -from ansible.inventory.host import Host -from ansible.inventory.aggregate import InventoryAggregateParser +from .host import Host +from ansible.plugins.inventory.aggregate import InventoryAggregateParser class Inventory: ''' diff --git a/v2/ansible/new_inventory/aggregate.py b/v2/ansible/plugins/inventory/aggregate.py similarity index 100% rename from v2/ansible/new_inventory/aggregate.py rename to v2/ansible/plugins/inventory/aggregate.py From 90ca3865551b57482e1235d46f66449049e6f6c6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 20:29:06 -0400 Subject: [PATCH 0212/3617] Add api timeout now that shade spports it everywhere --- lib/ansible/module_utils/openstack.py | 1 + lib/ansible/utils/module_docs_fragments/openstack.py | 5 +++++ v2/ansible/module_utils/openstack.py | 1 + 3 files changed, 7 insertions(+) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 35b9026213e988..9e4824a301dabc 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -83,6 +83,7 @@ def openstack_full_argument_spec(**kwargs): key=dict(default=None), wait=dict(default=True, type='bool'), timeout=dict(default=180, type='int'), + api_timeout=dict(default=None, type='int'), endpoint_type=dict( default='public', choices=['public', 'internal', 'admin'] ) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index 2979cb68d7b95f..5643b4e6accbd7 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -60,6 +60,11 @@ class ModuleDocFragment(object): - How long should ansible wait for the requested resource. required: false default: 180 + api_timeout: + description: + - How long should the socket layer wait before timing out for API calls. + If this is omitted, nothing will be passed to the requests library. + required: false verify: description: - Whether or not SSL API requests should be verified. diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py index 35b9026213e988..9e4824a301dabc 100644 --- a/v2/ansible/module_utils/openstack.py +++ b/v2/ansible/module_utils/openstack.py @@ -83,6 +83,7 @@ def openstack_full_argument_spec(**kwargs): key=dict(default=None), wait=dict(default=True, type='bool'), timeout=dict(default=180, type='int'), + api_timeout=dict(default=None, type='int'), endpoint_type=dict( default='public', choices=['public', 'internal', 'admin'] ) From 17e086fe8ceb19839281b4398fdf83690dbf695f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 21:36:18 -0400 Subject: [PATCH 0213/3617] dont break everything when one of the vars in inject does not template correctly, wait till its used --- lib/ansible/utils/template.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index a58b93997157f6..998e55f1f3ba1c 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -188,7 +188,11 @@ def __getitem__(self, varname): if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars): return var else: - return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined) + try: + return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined) + except: + raise KeyError("undefined variable: %s" % varname) + def add_locals(self, locals): ''' From 0d1e2e74a105fc16baf7fb2ff55cbc3c3d06ae6e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 31 Mar 2015 23:07:03 -0400 Subject: [PATCH 0214/3617] converted error on play var initialization into warning with more information --- lib/ansible/playbook/play.py | 6 +++++- lib/ansible/utils/template.py | 11 +++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index a24c5fff1b5036..78f2f6d9ba8000 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -119,7 +119,11 @@ def __init__(self, playbook, ds, basedir, vault_password=None): temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars) - ds = template(basedir, ds, temp_vars) + try: + ds = template(basedir, ds, temp_vars) + except errors.AnsibleError, e: + utils.warning("non fatal error while trying to template play variables: %s" % (str(e))) + ds['tasks'] = _tasks ds['handlers'] = _handlers diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 998e55f1f3ba1c..9426e254eb5826 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -118,7 +118,10 @@ def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_ if isinstance(varname, basestring): if '{{' in varname or '{%' in varname: - varname = template_from_string(basedir, varname, templatevars, fail_on_undefined) + try: + varname = template_from_string(basedir, varname, templatevars, fail_on_undefined) + except errors.AnsibleError, e: + raise errors.AnsibleError("Failed to template %s: %s" % (varname, str(e))) if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["): eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True) @@ -188,11 +191,7 @@ def __getitem__(self, varname): if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars): return var else: - try: - return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined) - except: - raise KeyError("undefined variable: %s" % varname) - + return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined) def add_locals(self, locals): ''' From 87c99b46758dcdca3ccb2daed72a85b7175036a8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 1 Apr 2015 07:54:02 -0400 Subject: [PATCH 0215/3617] Align verify parameter with validate_certs The rest of ansible uses validate_certs, so make that the main documented parameter. However, leave verify as an alias since that's the passthrough value to the underlying libraries. --- lib/ansible/module_utils/openstack.py | 2 +- lib/ansible/utils/module_docs_fragments/openstack.py | 4 +++- v2/ansible/module_utils/openstack.py | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index 9e4824a301dabc..b58cc534287050 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -77,7 +77,7 @@ def openstack_full_argument_spec(**kwargs): auth=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), - verify=dict(default=True), + verify=dict(default=True, aliases=['validate_certs']), cacert=dict(default=None), cert=dict(default=None), key=dict(default=None), diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index 5643b4e6accbd7..519ad785b9b9dc 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -65,11 +65,13 @@ class ModuleDocFragment(object): - How long should the socket layer wait before timing out for API calls. If this is omitted, nothing will be passed to the requests library. required: false - verify: + default: None + validate_certs: description: - Whether or not SSL API requests should be verified. required: false default: True + aliases: ['verify'] cacert: description: - A path to a CA Cert bundle that can be used as part of verifying diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py index 9e4824a301dabc..b58cc534287050 100644 --- a/v2/ansible/module_utils/openstack.py +++ b/v2/ansible/module_utils/openstack.py @@ -77,7 +77,7 @@ def openstack_full_argument_spec(**kwargs): auth=dict(default=None), region_name=dict(default=None), availability_zone=dict(default=None), - verify=dict(default=True), + verify=dict(default=True, aliases=['validate_certs']), cacert=dict(default=None), cert=dict(default=None), key=dict(default=None), From 132c0e794dbece25146ed60897af2b1f506fd698 Mon Sep 17 00:00:00 2001 From: Luke Date: Wed, 1 Apr 2015 08:29:56 -0400 Subject: [PATCH 0216/3617] note added to source section Added reminder to not use source install method if you're going to be installing ansible for a Tower system --- docsite/rst/intro_installation.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 303880cac11f84..bad6ea068eff07 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -103,6 +103,11 @@ when they are implemented, and also easily contribute to the project. Because th nothing to install, following the development version is significantly easier than most open source projects. +.. note:: + + If you are intending to use Tower as the Control Machine, do not use a source install. Please use apt/yum/pip for a stable version + + To install from source. .. code-block:: bash From 1fa3dbb7d2348bf4c25c116dd808831ef31ae387 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Apr 2015 12:12:34 -0400 Subject: [PATCH 0217/3617] capture IOErrors on backup_local (happens on non posix filesystems) fixes #10591 --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index ad1d43f86ca99b..aaaf85e5e057e5 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1303,7 +1303,7 @@ def backup_local(self, fn): try: shutil.copy2(fn, backupdest) - except shutil.Error, e: + except (shutil.Error, IOError), e: self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e)) return backupdest From c41b917162d5d3acdf2573bbb6d87513cede4ccb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 30 Mar 2015 21:48:28 -0700 Subject: [PATCH 0218/3617] Add a yaml constructor for unicode strings: * Changes AnsibleConstructor so that only unicode strings are returned (no str type) * Tracks line, column numbers for strings * Adds unittests for AnsibleLoader (generic for all the yaml parsing) --- v2/ansible/parsing/yaml/composer.py | 16 ++- v2/ansible/parsing/yaml/constructor.py | 27 ++++- v2/ansible/parsing/yaml/objects.py | 3 + v2/test/parsing/yaml/test_loader.py | 156 +++++++++++++++++++++++++ v2/test/test.yml | 2 - 5 files changed, 199 insertions(+), 5 deletions(-) create mode 100644 v2/test/parsing/yaml/test_loader.py delete mode 100644 v2/test/test.yml diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py index 0f9c90606f30a2..4f2c9f411b6595 100644 --- a/v2/ansible/parsing/yaml/composer.py +++ b/v2/ansible/parsing/yaml/composer.py @@ -20,17 +20,27 @@ __metaclass__ = type from yaml.composer import Composer -from yaml.nodes import MappingNode +from yaml.nodes import MappingNode, ScalarNode class AnsibleComposer(Composer): def __init__(self): self.__mapping_starts = [] super(Composer, self).__init__() + def compose_node(self, parent, index): # the line number where the previous token has ended (plus empty lines) node = Composer.compose_node(self, parent, index) - if isinstance(node, MappingNode): + if isinstance(node, ScalarNode): + # Scalars are pretty easy -- assume they start on the current + # token's line (what about multiline strings? Perhaps we also + # need to use previous token ended + node.__datasource__ = self.name + node.__line__ = self.line + 1 + node.__column__ = self.column + 1 + elif isinstance(node, MappingNode): node.__datasource__ = self.name + + # Need extra help to know where the mapping starts try: (cur_line, cur_column) = self.__mapping_starts.pop() except: @@ -38,7 +48,9 @@ def compose_node(self, parent, index): cur_column = None node.__line__ = cur_line node.__column__ = cur_column + return node + def compose_mapping_node(self, anchor): # the column here will point at the position in the file immediately # after the first key is found, which could be a space or a newline. diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index 730ba85418ffcf..b607f46b05548a 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -20,7 +20,8 @@ __metaclass__ = type from yaml.constructor import Constructor -from ansible.parsing.yaml.objects import AnsibleMapping +from ansible.utils.unicode import to_unicode +from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleUnicode class AnsibleConstructor(Constructor): def __init__(self, file_name=None): @@ -52,6 +53,22 @@ def construct_mapping(self, node, deep=False): return ret + def construct_yaml_str(self, node): + # Override the default string handling function + # to always return unicode objects + value = self.construct_scalar(node) + value = to_unicode(value) + data = AnsibleUnicode(self.construct_scalar(node)) + + data._line_number = node.__line__ + data._column_number = node.__column__ + if self._ansible_file_name: + data._data_source = self._ansible_file_name + else: + data._data_source = node.__datasource__ + + return data + AnsibleConstructor.add_constructor( u'tag:yaml.org,2002:map', AnsibleConstructor.construct_yaml_map) @@ -60,3 +77,11 @@ def construct_mapping(self, node, deep=False): u'tag:yaml.org,2002:python/dict', AnsibleConstructor.construct_yaml_map) +AnsibleConstructor.add_constructor( + u'tag:yaml.org,2002:str', + AnsibleConstructor.construct_yaml_str) + +AnsibleConstructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + AnsibleConstructor.construct_yaml_str) + diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index 6eff9966f94bf8..69f8c0968d17ec 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -50,3 +50,6 @@ class AnsibleMapping(AnsibleBaseYAMLObject, dict): ''' sub class for dictionaries ''' pass +class AnsibleUnicode(AnsibleBaseYAMLObject, unicode): + ''' sub class for unicode objects ''' + pass diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py new file mode 100644 index 00000000000000..942062798e19a2 --- /dev/null +++ b/v2/test/parsing/yaml/test_loader.py @@ -0,0 +1,156 @@ +# coding: utf-8 +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from cStringIO import StringIO +from collections import Sequence, Set, Mapping + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch + +from ansible.parsing.yaml.loader import AnsibleLoader + +class TestDataLoader(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_parse_number(self): + stream = StringIO(""" + 1 + """) + loader = AnsibleLoader(stream) + data = loader.get_single_data() + self.assertEqual(data, 1) + + def test_parse_string(self): + stream = StringIO(""" + Ansible + """) + loader = AnsibleLoader(stream) + data = loader.get_single_data() + self.assertEqual(data, u'Ansible') + self.assertIsInstance(data, unicode) + + def test_parse_utf8_string(self): + stream = StringIO(""" + Cafè Eñyei + """) + loader = AnsibleLoader(stream) + data = loader.get_single_data() + self.assertEqual(data, u'Cafè Eñyei') + self.assertIsInstance(data, unicode) + + def test_parse_dict(self): + stream = StringIO(""" + webster: daniel + oed: oxford + """) + loader = AnsibleLoader(stream) + data = loader.get_single_data() + self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'}) + self.assertEqual(len(data), 2) + self.assertIsInstance(data.keys()[0], unicode) + self.assertIsInstance(data.values()[0], unicode) + + def test_parse_list(self): + stream = StringIO(""" + - a + - b + """) + loader = AnsibleLoader(stream) + data = loader.get_single_data() + self.assertEqual(data, [u'a', u'b']) + self.assertEqual(len(data), 2) + self.assertIsInstance(data[0], unicode) + + def test_parse_play(self): + stream = StringIO(""" + - hosts: localhost + vars: + number: 1 + string: Ansible + utf8_string: Cafè Eñyei + dictionary: + webster: daniel + oed: oxford + list: + - a + - b + - 1 + - 2 + tasks: + - name: Test case + ping: + data: "{{ utf8_string }}" + + - name: Test 2 + ping: + data: "Cafè Eñyei" + + - name: Test 3 + command: "printf 'Cafè Eñyei\\n'" + """) + loader = AnsibleLoader(stream) + data = loader.get_single_data() + self.assertEqual(len(data), 1) + self.assertIsInstance(data, list) + self.assertEqual(frozenset(data[0].keys()), frozenset((u'hosts', u'vars', u'tasks'))) + + self.assertEqual(data[0][u'hosts'], u'localhost') + + self.assertEqual(data[0][u'vars'][u'number'], 1) + self.assertEqual(data[0][u'vars'][u'string'], u'Ansible') + self.assertEqual(data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei') + self.assertEqual(data[0][u'vars'][u'dictionary'], + {u'webster': u'daniel', + u'oed': u'oxford'}) + self.assertEqual(data[0][u'vars'][u'list'], [u'a', u'b', 1, 2]) + + self.assertEqual(data[0][u'tasks'], + [{u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}}, + {u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}}, + {u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''}, + ]) + + self.walk(data) + + def walk(self, data): + # Make sure there's no str in the data + self.assertNotIsInstance(data, str) + + # Descend into various container types + if isinstance(data, unicode): + # strings are a sequence so we have to be explicit here + return + elif isinstance(data, (Sequence, Set)): + for element in data: + self.walk(element) + elif isinstance(data, Mapping): + for k, v in data.items(): + self.walk(k) + self.walk(v) + + # Scalars were all checked so we're good to go + return diff --git a/v2/test/test.yml b/v2/test/test.yml deleted file mode 100644 index 299b66610d12b0..00000000000000 --- a/v2/test/test.yml +++ /dev/null @@ -1,2 +0,0 @@ -- name: Test -filename: /usr/café//are_doing_this_to_me From b152275a363bbfc098666a417c982a16808045c2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Apr 2015 12:18:53 -0700 Subject: [PATCH 0219/3617] Test line numbers and "fix" a bug in the scalar line counting --- v2/ansible/parsing/yaml/composer.py | 10 +- v2/test/parsing/yaml/test_loader.py | 191 ++++++++++++++++++++++++---- 2 files changed, 175 insertions(+), 26 deletions(-) diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py index 4f2c9f411b6595..faf712253ecada 100644 --- a/v2/ansible/parsing/yaml/composer.py +++ b/v2/ansible/parsing/yaml/composer.py @@ -35,8 +35,13 @@ def compose_node(self, parent, index): # token's line (what about multiline strings? Perhaps we also # need to use previous token ended node.__datasource__ = self.name - node.__line__ = self.line + 1 - node.__column__ = self.column + 1 + node.__line__ = self.line + + # Need to investigate why this works... + if self.indents: + node.__column__ = self.indent + 1 + else: + node.__column__ = self.column +1 elif isinstance(node, MappingNode): node.__datasource__ = self.name @@ -58,4 +63,3 @@ def compose_mapping_node(self, anchor): # should be good enough to determine the error location. self.__mapping_starts.append((self.line + 1, self.column + 1)) return Composer.compose_mapping_node(self, anchor) - diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py index 942062798e19a2..4f08d8ea70c3df 100644 --- a/v2/test/parsing/yaml/test_loader.py +++ b/v2/test/parsing/yaml/test_loader.py @@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from cStringIO import StringIO +from StringIO import StringIO from collections import Sequence, Set, Mapping from ansible.compat.tests import unittest @@ -28,7 +28,7 @@ from ansible.parsing.yaml.loader import AnsibleLoader -class TestDataLoader(unittest.TestCase): +class TestAnsibleLoaderBasic(unittest.TestCase): def setUp(self): pass @@ -40,52 +40,78 @@ def test_parse_number(self): stream = StringIO(""" 1 """) - loader = AnsibleLoader(stream) + loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, 1) + # No line/column info saved yet def test_parse_string(self): stream = StringIO(""" Ansible """) - loader = AnsibleLoader(stream) + loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, u'Ansible') self.assertIsInstance(data, unicode) + self.assertEqual(data._line_number, 2) + self.assertEqual(data._column_number, 17) + self.assertEqual(data._data_source, 'myfile.yml') + def test_parse_utf8_string(self): stream = StringIO(""" Cafè Eñyei """) - loader = AnsibleLoader(stream) + loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, u'Cafè Eñyei') self.assertIsInstance(data, unicode) + self.assertEqual(data._line_number, 2) + self.assertEqual(data._column_number, 17) + self.assertEqual(data._data_source, 'myfile.yml') + def test_parse_dict(self): stream = StringIO(""" webster: daniel oed: oxford """) - loader = AnsibleLoader(stream) + loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'}) self.assertEqual(len(data), 2) self.assertIsInstance(data.keys()[0], unicode) self.assertIsInstance(data.values()[0], unicode) + # Note: this is the beginning of the first value. + # May be changed in the future to beginning of the first key + self.assertEqual(data._line_number, 2) + self.assertEqual(data._column_number, 25) + self.assertEqual(data._data_source, 'myfile.yml') + + self.assertEqual(data[u'webster']._line_number, 2) + self.assertEqual(data[u'webster']._column_number, 17) + self.assertEqual(data[u'webster']._data_source, 'myfile.yml') + + self.assertEqual(data[u'oed']._line_number, 3) + self.assertEqual(data[u'oed']._column_number, 17) + self.assertEqual(data[u'oed']._data_source, 'myfile.yml') + def test_parse_list(self): stream = StringIO(""" - a - b """) - loader = AnsibleLoader(stream) + loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, [u'a', u'b']) self.assertEqual(len(data), 2) self.assertIsInstance(data[0], unicode) + # No line/column info saved yet - def test_parse_play(self): +class TestAnsibleLoaderPlay(unittest.TestCase): + + def setUp(self): stream = StringIO(""" - hosts: localhost vars: @@ -112,29 +138,35 @@ def test_parse_play(self): - name: Test 3 command: "printf 'Cafè Eñyei\\n'" """) - loader = AnsibleLoader(stream) - data = loader.get_single_data() - self.assertEqual(len(data), 1) - self.assertIsInstance(data, list) - self.assertEqual(frozenset(data[0].keys()), frozenset((u'hosts', u'vars', u'tasks'))) + self.play_filename = '/path/to/myplay.yml' + stream.name = self.play_filename + self.loader = AnsibleLoader(stream) + self.data = self.loader.get_single_data() + + def tearDown(self): + pass + + def test_data_complete(self): + return + self.assertEqual(len(self.data), 1) + self.assertIsInstance(self.data, list) + self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks'))) - self.assertEqual(data[0][u'hosts'], u'localhost') + self.assertEqual(self.data[0][u'hosts'], u'localhost') - self.assertEqual(data[0][u'vars'][u'number'], 1) - self.assertEqual(data[0][u'vars'][u'string'], u'Ansible') - self.assertEqual(data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei') - self.assertEqual(data[0][u'vars'][u'dictionary'], + self.assertEqual(self.data[0][u'vars'][u'number'], 1) + self.assertEqual(self.data[0][u'vars'][u'string'], u'Ansible') + self.assertEqual(self.data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei') + self.assertEqual(self.data[0][u'vars'][u'dictionary'], {u'webster': u'daniel', u'oed': u'oxford'}) - self.assertEqual(data[0][u'vars'][u'list'], [u'a', u'b', 1, 2]) + self.assertEqual(self.data[0][u'vars'][u'list'], [u'a', u'b', 1, 2]) - self.assertEqual(data[0][u'tasks'], + self.assertEqual(self.data[0][u'tasks'], [{u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}}, {u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}}, {u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''}, - ]) - - self.walk(data) + ]) def walk(self, data): # Make sure there's no str in the data @@ -154,3 +186,116 @@ def walk(self, data): # Scalars were all checked so we're good to go return + + def test_no_str_in_data(self): + # Checks that no strings are str type + self.walk(self.data) + + def check_vars(self): + # Numbers don't have line/col information yet + #self.assertEqual(self.data[0][u'vars'][u'number']._line_number, 4) + #self.assertEqual(self.data[0][u'vars'][u'number']._column_number, 21) + #self.assertEqual(self.data[0][u'vars'][u'number']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'vars'][u'string']._line_number, 5) + self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 21) + self.assertEqual(self.data[0][u'vars'][u'string']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'vars'][u'utf8_string']._line_number, 6) + self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 21) + self.assertEqual(self.data[0][u'vars'][u'utf8_string']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'vars'][u'dictionary']._line_number, 8) + self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 31) + self.assertEqual(self.data[0][u'vars'][u'dictionary']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._line_number, 8) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 23) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._line_number, 9) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 23) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._data_source, self.play_filename) + + # Lists don't yet have line/col information + #self.assertEqual(self.data[0][u'vars'][u'list']._line_number, 10) + #self.assertEqual(self.data[0][u'vars'][u'list']._column_number, 21) + #self.assertEqual(self.data[0][u'vars'][u'list']._data_source, self.play_filename) + + def check_tasks(self): + # + # First Task + # + self.assertEqual(self.data[0][u'tasks'][0]._line_number, 16) + self.assertEqual(self.data[0][u'tasks'][0]._column_number, 28) + self.assertEqual(self.data[0][u'tasks'][0]._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'tasks'][0][u'name']._line_number, 16) + self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][0][u'name']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'tasks'][0][u'ping']._line_number, 18) + self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 30) + self.assertEqual(self.data[0][u'tasks'][0][u'ping']._data_source, self.play_filename) + + #self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 25) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._data_source, self.play_filename) + + # + # Second Task + # + self.assertEqual(self.data[0][u'tasks'][1]._line_number, 20) + self.assertEqual(self.data[0][u'tasks'][1]._column_number, 28) + self.assertEqual(self.data[0][u'tasks'][1]._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'tasks'][1][u'name']._line_number, 20) + self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][1][u'name']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'tasks'][1][u'ping']._line_number, 22) + self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 30) + self.assertEqual(self.data[0][u'tasks'][1][u'ping']._data_source, self.play_filename) + + #self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 25) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._data_source, self.play_filename) + + # + # Third Task + # + self.assertEqual(self.data[0][u'tasks'][2]._line_number, 24) + self.assertEqual(self.data[0][u'tasks'][2]._column_number, 28) + self.assertEqual(self.data[0][u'tasks'][2]._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'tasks'][2][u'name']._line_number, 24) + self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][2][u'name']._data_source, self.play_filename) + + #self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25) + self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][2][u'command']._data_source, self.play_filename) + + def test_line_numbers(self): + # Check the line/column numbers are correct + # Note: Remember, currently dicts begin at the start of their first entry's value + self.assertEqual(self.data[0]._line_number, 2) + self.assertEqual(self.data[0]._column_number, 25) + self.assertEqual(self.data[0]._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'hosts']._line_number, 2) + self.assertEqual(self.data[0][u'hosts']._column_number, 19) + self.assertEqual(self.data[0][u'hosts']._data_source, self.play_filename) + + self.assertEqual(self.data[0][u'vars']._line_number, 4) + self.assertEqual(self.data[0][u'vars']._column_number, 28) + self.assertEqual(self.data[0][u'vars']._data_source, self.play_filename) + + self.check_vars() + + # Lists don't yet have line/col info + #self.assertEqual(self.data[0][u'tasks']._line_number, 17) + #self.assertEqual(self.data[0][u'tasks']._column_number, 28) + #self.assertEqual(self.data[0][u'tasks']._data_source, self.play_filename) + + self.check_tasks() From 05f1bed12bd25bf88d87bf9fcbc46bec52772309 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Apr 2015 13:51:01 -0700 Subject: [PATCH 0220/3617] Use the node's start_mark to determine line and column. * Elminates a lot of logic in the AnsibleComposer class. * Update tests with new column offsets. The rule should now be consistently: Column is the start of the entry's value (so for strings, the first non-space after the entry beginning, for dicts, the first character of the first key) --- v2/ansible/parsing/yaml/composer.py | 33 ++---------------- v2/test/parsing/yaml/test_loader.py | 54 ++++++++++++++--------------- 2 files changed, 29 insertions(+), 58 deletions(-) diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py index faf712253ecada..6bdee92fc38180 100644 --- a/v2/ansible/parsing/yaml/composer.py +++ b/v2/ansible/parsing/yaml/composer.py @@ -24,42 +24,15 @@ class AnsibleComposer(Composer): def __init__(self): - self.__mapping_starts = [] super(Composer, self).__init__() def compose_node(self, parent, index): # the line number where the previous token has ended (plus empty lines) node = Composer.compose_node(self, parent, index) - if isinstance(node, ScalarNode): - # Scalars are pretty easy -- assume they start on the current - # token's line (what about multiline strings? Perhaps we also - # need to use previous token ended + if isinstance(node, (ScalarNode, MappingNode)): node.__datasource__ = self.name node.__line__ = self.line - - # Need to investigate why this works... - if self.indents: - node.__column__ = self.indent + 1 - else: - node.__column__ = self.column +1 - elif isinstance(node, MappingNode): - node.__datasource__ = self.name - - # Need extra help to know where the mapping starts - try: - (cur_line, cur_column) = self.__mapping_starts.pop() - except: - cur_line = None - cur_column = None - node.__line__ = cur_line - node.__column__ = cur_column + node.__column__ = node.start_mark.column + 1 + node.__line__ = node.start_mark.line + 1 return node - - def compose_mapping_node(self, anchor): - # the column here will point at the position in the file immediately - # after the first key is found, which could be a space or a newline. - # We could back this up to find the beginning of the key, but this - # should be good enough to determine the error location. - self.__mapping_starts.append((self.line + 1, self.column + 1)) - return Composer.compose_mapping_node(self, anchor) diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py index 4f08d8ea70c3df..aba103d37f6757 100644 --- a/v2/test/parsing/yaml/test_loader.py +++ b/v2/test/parsing/yaml/test_loader.py @@ -83,18 +83,17 @@ def test_parse_dict(self): self.assertIsInstance(data.keys()[0], unicode) self.assertIsInstance(data.values()[0], unicode) - # Note: this is the beginning of the first value. - # May be changed in the future to beginning of the first key + # Beginning of the first key self.assertEqual(data._line_number, 2) - self.assertEqual(data._column_number, 25) + self.assertEqual(data._column_number, 17) self.assertEqual(data._data_source, 'myfile.yml') self.assertEqual(data[u'webster']._line_number, 2) - self.assertEqual(data[u'webster']._column_number, 17) + self.assertEqual(data[u'webster']._column_number, 26) self.assertEqual(data[u'webster']._data_source, 'myfile.yml') self.assertEqual(data[u'oed']._line_number, 3) - self.assertEqual(data[u'oed']._column_number, 17) + self.assertEqual(data[u'oed']._column_number, 22) self.assertEqual(data[u'oed']._data_source, 'myfile.yml') def test_parse_list(self): @@ -147,7 +146,6 @@ def tearDown(self): pass def test_data_complete(self): - return self.assertEqual(len(self.data), 1) self.assertIsInstance(self.data, list) self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks'))) @@ -198,23 +196,23 @@ def check_vars(self): #self.assertEqual(self.data[0][u'vars'][u'number']._data_source, self.play_filename) self.assertEqual(self.data[0][u'vars'][u'string']._line_number, 5) - self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 21) + self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 29) self.assertEqual(self.data[0][u'vars'][u'string']._data_source, self.play_filename) self.assertEqual(self.data[0][u'vars'][u'utf8_string']._line_number, 6) - self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 21) + self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 34) self.assertEqual(self.data[0][u'vars'][u'utf8_string']._data_source, self.play_filename) self.assertEqual(self.data[0][u'vars'][u'dictionary']._line_number, 8) - self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 31) + self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 23) self.assertEqual(self.data[0][u'vars'][u'dictionary']._data_source, self.play_filename) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._line_number, 8) - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 23) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 32) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._data_source, self.play_filename) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._line_number, 9) - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 23) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 28) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._data_source, self.play_filename) # Lists don't yet have line/col information @@ -227,68 +225,68 @@ def check_tasks(self): # First Task # self.assertEqual(self.data[0][u'tasks'][0]._line_number, 16) - self.assertEqual(self.data[0][u'tasks'][0]._column_number, 28) + self.assertEqual(self.data[0][u'tasks'][0]._column_number, 23) self.assertEqual(self.data[0][u'tasks'][0]._data_source, self.play_filename) self.assertEqual(self.data[0][u'tasks'][0][u'name']._line_number, 16) - self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 29) self.assertEqual(self.data[0][u'tasks'][0][u'name']._data_source, self.play_filename) self.assertEqual(self.data[0][u'tasks'][0][u'ping']._line_number, 18) - self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 30) + self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 25) self.assertEqual(self.data[0][u'tasks'][0][u'ping']._data_source, self.play_filename) - #self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18) - self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 25) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 31) self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._data_source, self.play_filename) # # Second Task # self.assertEqual(self.data[0][u'tasks'][1]._line_number, 20) - self.assertEqual(self.data[0][u'tasks'][1]._column_number, 28) + self.assertEqual(self.data[0][u'tasks'][1]._column_number, 23) self.assertEqual(self.data[0][u'tasks'][1]._data_source, self.play_filename) self.assertEqual(self.data[0][u'tasks'][1][u'name']._line_number, 20) - self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 29) self.assertEqual(self.data[0][u'tasks'][1][u'name']._data_source, self.play_filename) self.assertEqual(self.data[0][u'tasks'][1][u'ping']._line_number, 22) - self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 30) + self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 25) self.assertEqual(self.data[0][u'tasks'][1][u'ping']._data_source, self.play_filename) - #self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22) - self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 25) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 31) self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._data_source, self.play_filename) # # Third Task # self.assertEqual(self.data[0][u'tasks'][2]._line_number, 24) - self.assertEqual(self.data[0][u'tasks'][2]._column_number, 28) + self.assertEqual(self.data[0][u'tasks'][2]._column_number, 23) self.assertEqual(self.data[0][u'tasks'][2]._data_source, self.play_filename) self.assertEqual(self.data[0][u'tasks'][2][u'name']._line_number, 24) - self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 29) self.assertEqual(self.data[0][u'tasks'][2][u'name']._data_source, self.play_filename) - #self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25) - self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 23) + self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25) + self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 32) self.assertEqual(self.data[0][u'tasks'][2][u'command']._data_source, self.play_filename) def test_line_numbers(self): # Check the line/column numbers are correct # Note: Remember, currently dicts begin at the start of their first entry's value self.assertEqual(self.data[0]._line_number, 2) - self.assertEqual(self.data[0]._column_number, 25) + self.assertEqual(self.data[0]._column_number, 19) self.assertEqual(self.data[0]._data_source, self.play_filename) self.assertEqual(self.data[0][u'hosts']._line_number, 2) - self.assertEqual(self.data[0][u'hosts']._column_number, 19) + self.assertEqual(self.data[0][u'hosts']._column_number, 26) self.assertEqual(self.data[0][u'hosts']._data_source, self.play_filename) self.assertEqual(self.data[0][u'vars']._line_number, 4) - self.assertEqual(self.data[0][u'vars']._column_number, 28) + self.assertEqual(self.data[0][u'vars']._column_number, 21) self.assertEqual(self.data[0][u'vars']._data_source, self.play_filename) self.check_vars() From e697de6076bea96584b1109eda2287b889aaef09 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Apr 2015 14:54:22 -0700 Subject: [PATCH 0221/3617] Move AnsibleBaseBaseYAMLObject's position_info into a property --- v2/ansible/errors/__init__.py | 2 +- v2/ansible/parsing/__init__.py | 2 +- v2/ansible/parsing/yaml/constructor.py | 22 ++--- v2/ansible/parsing/yaml/objects.py | 17 ++-- v2/ansible/playbook/helpers.py | 2 +- v2/ansible/playbook/playbook_include.py | 2 +- v2/ansible/playbook/role/definition.py | 2 +- v2/ansible/playbook/task.py | 2 +- v2/test/errors/test_errors.py | 14 +-- v2/test/parsing/yaml/test_loader.py | 125 ++++++------------------ 10 files changed, 54 insertions(+), 136 deletions(-) diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index 7effe41df7cf4d..bdd6e524489c72 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -92,7 +92,7 @@ def _get_extended_error(self): error_message = '' try: - (src_file, line_number, col_number) = self._obj.get_position_info() + (src_file, line_number, col_number) = self._obj.ansible_pos error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number) if src_file not in ('', '') and self._show_content: (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1) diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index f8a3e967465da7..75465bdfa3ed1f 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -146,7 +146,7 @@ def _handle_error(self, yaml_exc, file_name, show_content): err_obj = None if hasattr(yaml_exc, 'problem_mark'): err_obj = AnsibleBaseYAMLObject() - err_obj.set_position_info(file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1) + err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1) raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content) diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index b607f46b05548a..0043b8a2f044d5 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -33,23 +33,20 @@ def construct_yaml_map(self, node): yield data value = self.construct_mapping(node) data.update(value) - data._line_number = value._line_number - data._column_number = value._column_number - data._data_source = value._data_source + data.ansible_pos = value.ansible_pos def construct_mapping(self, node, deep=False): ret = AnsibleMapping(super(Constructor, self).construct_mapping(node, deep)) - ret._line_number = node.__line__ - ret._column_number = node.__column__ # in some cases, we may have pre-read the data and then # passed it to the load() call for YAML, in which case we # want to override the default datasource (which would be # '') to the actual filename we read in if self._ansible_file_name: - ret._data_source = self._ansible_file_name + data_source = self._ansible_file_name else: - ret._data_source = node.__datasource__ + data_source = node.__datasource__ + ret.ansible_pos = (data_source, node.__line__, node.__column__) return ret @@ -58,16 +55,15 @@ def construct_yaml_str(self, node): # to always return unicode objects value = self.construct_scalar(node) value = to_unicode(value) - data = AnsibleUnicode(self.construct_scalar(node)) + ret = AnsibleUnicode(self.construct_scalar(node)) - data._line_number = node.__line__ - data._column_number = node.__column__ if self._ansible_file_name: - data._data_source = self._ansible_file_name + data_source = self._ansible_file_name else: - data._data_source = node.__datasource__ + data_source = node.__datasource__ + ret.ansible_pos = (data_source, node.__line__, node.__column__) - return data + return ret AnsibleConstructor.add_constructor( u'tag:yaml.org,2002:map', diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index 69f8c0968d17ec..15850dd4f8749d 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -29,22 +29,19 @@ class AnsibleBaseYAMLObject: _line_number = 0 _column_number = 0 - def get_position_info(self): + def _get_ansible_position(self): return (self._data_source, self._line_number, self._column_number) - def set_position_info(self, src, line, col): + def _set_ansible_position(self, obj): + try: + (src, line, col) = obj + except (TypeError, ValueError): + raise AssertionError('ansible_pos can only be set with a tuple/list of three values: source, line number, column number') self._data_source = src self._line_number = line self._column_number = col - def copy_position_info(self, obj): - ''' copies the position info from another object ''' - assert isinstance(obj, AnsibleBaseYAMLObject) - - (src, line, col) = obj.get_position_info() - self._data_source = src - self._line_number = line - self._column_number = col + ansible_pos = property(_get_ansible_position, _set_ansible_position) class AnsibleMapping(AnsibleBaseYAMLObject, dict): ''' sub class for dictionaries ''' diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index 0e147205578406..4277e201b7bb66 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -74,7 +74,7 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler #if 'include' in task: # cur_basedir = None # if isinstance(task, AnsibleBaseYAMLObject) and loader: - # pos_info = task.get_position_info() + # pos_info = task.ansible_pos # new_basedir = os.path.dirname(pos_info[0]) # cur_basedir = loader.get_basedir() # loader.set_basedir(new_basedir) diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py index f7eae230f7c0e8..2e4964fce9617b 100644 --- a/v2/ansible/playbook/playbook_include.py +++ b/v2/ansible/playbook/playbook_include.py @@ -80,7 +80,7 @@ def preprocess_data(self, ds): # items reduced to a standard structure new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): - new_ds.copy_position_info(ds) + new_ds.ansible_pos = ds.ansible_pos for (k,v) in ds.iteritems(): if k == 'include': diff --git a/v2/ansible/playbook/role/definition.py b/v2/ansible/playbook/role/definition.py index fb96a0e55f9c83..0cb1e45760dfa8 100644 --- a/v2/ansible/playbook/role/definition.py +++ b/v2/ansible/playbook/role/definition.py @@ -66,7 +66,7 @@ def preprocess_data(self, ds): # can preserve file:line:column information if it exists new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): - new_ds.copy_position_info(ds) + new_ds.ansible_pos = ds.ansible_pos # first we pull the role name out of the data structure, # and then use that to determine the role path (which may diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 0f5e7674866bbd..b36c24167a426f 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -159,7 +159,7 @@ def preprocess_data(self, ds): # attributes of the task class new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): - new_ds.copy_position_info(ds) + new_ds.ansible_pos = ds.ansible_pos # use the args parsing class to determine the action, args, # and the delegate_to value from the various possible forms diff --git a/v2/test/errors/test_errors.py b/v2/test/errors/test_errors.py index 3e8e0dd7bacad4..3993ea5061b73f 100644 --- a/v2/test/errors/test_errors.py +++ b/v2/test/errors/test_errors.py @@ -44,9 +44,7 @@ def test_basic_error(self): @patch.object(AnsibleError, '_get_error_lines_from_file') def test_error_with_object(self, mock_method): - self.obj._data_source = 'foo.yml' - self.obj._line_number = 1 - self.obj._column_number = 1 + self.obj.ansible_pos = ('foo.yml', 1, 1) mock_method.return_value = ('this is line 1\n', '') e = AnsibleError(self.message, self.obj) @@ -59,16 +57,12 @@ def test_get_error_lines_from_file(self): with patch('{0}.open'.format(BUILTINS), m): # this line will be found in the file - self.obj._data_source = 'foo.yml' - self.obj._line_number = 1 - self.obj._column_number = 1 + self.obj.ansible_pos = ('foo.yml', 1, 1) e = AnsibleError(self.message, self.obj) self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n") # this line will not be found, as it is out of the index range - self.obj._data_source = 'foo.yml' - self.obj._line_number = 2 - self.obj._column_number = 1 + self.obj.ansible_pos = ('foo.yml', 2, 1) e = AnsibleError(self.message, self.obj) self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)") - + diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py index aba103d37f6757..f9144fb2925400 100644 --- a/v2/test/parsing/yaml/test_loader.py +++ b/v2/test/parsing/yaml/test_loader.py @@ -54,9 +54,7 @@ def test_parse_string(self): self.assertEqual(data, u'Ansible') self.assertIsInstance(data, unicode) - self.assertEqual(data._line_number, 2) - self.assertEqual(data._column_number, 17) - self.assertEqual(data._data_source, 'myfile.yml') + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) def test_parse_utf8_string(self): stream = StringIO(""" @@ -67,9 +65,7 @@ def test_parse_utf8_string(self): self.assertEqual(data, u'Cafè Eñyei') self.assertIsInstance(data, unicode) - self.assertEqual(data._line_number, 2) - self.assertEqual(data._column_number, 17) - self.assertEqual(data._data_source, 'myfile.yml') + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) def test_parse_dict(self): stream = StringIO(""" @@ -84,17 +80,10 @@ def test_parse_dict(self): self.assertIsInstance(data.values()[0], unicode) # Beginning of the first key - self.assertEqual(data._line_number, 2) - self.assertEqual(data._column_number, 17) - self.assertEqual(data._data_source, 'myfile.yml') + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) - self.assertEqual(data[u'webster']._line_number, 2) - self.assertEqual(data[u'webster']._column_number, 26) - self.assertEqual(data[u'webster']._data_source, 'myfile.yml') - - self.assertEqual(data[u'oed']._line_number, 3) - self.assertEqual(data[u'oed']._column_number, 22) - self.assertEqual(data[u'oed']._data_source, 'myfile.yml') + self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26)) + self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22)) def test_parse_list(self): stream = StringIO(""" @@ -191,109 +180,51 @@ def test_no_str_in_data(self): def check_vars(self): # Numbers don't have line/col information yet - #self.assertEqual(self.data[0][u'vars'][u'number']._line_number, 4) - #self.assertEqual(self.data[0][u'vars'][u'number']._column_number, 21) - #self.assertEqual(self.data[0][u'vars'][u'number']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'vars'][u'string']._line_number, 5) - self.assertEqual(self.data[0][u'vars'][u'string']._column_number, 29) - self.assertEqual(self.data[0][u'vars'][u'string']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'vars'][u'utf8_string']._line_number, 6) - self.assertEqual(self.data[0][u'vars'][u'utf8_string']._column_number, 34) - self.assertEqual(self.data[0][u'vars'][u'utf8_string']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'vars'][u'dictionary']._line_number, 8) - self.assertEqual(self.data[0][u'vars'][u'dictionary']._column_number, 23) - self.assertEqual(self.data[0][u'vars'][u'dictionary']._data_source, self.play_filename) + #self.assertEqual(self.data[0][u'vars'][u'number'].ansible_pos, (self.play_filename, 4, 21)) - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._line_number, 8) - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._column_number, 32) - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._line_number, 9) - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._column_number, 28) - self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed']._data_source, self.play_filename) + self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29)) + self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34)) + self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23)) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32)) + self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28)) # Lists don't yet have line/col information - #self.assertEqual(self.data[0][u'vars'][u'list']._line_number, 10) - #self.assertEqual(self.data[0][u'vars'][u'list']._column_number, 21) - #self.assertEqual(self.data[0][u'vars'][u'list']._data_source, self.play_filename) + #self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 10, 21)) def check_tasks(self): # # First Task # - self.assertEqual(self.data[0][u'tasks'][0]._line_number, 16) - self.assertEqual(self.data[0][u'tasks'][0]._column_number, 23) - self.assertEqual(self.data[0][u'tasks'][0]._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][0][u'name']._line_number, 16) - self.assertEqual(self.data[0][u'tasks'][0][u'name']._column_number, 29) - self.assertEqual(self.data[0][u'tasks'][0][u'name']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][0][u'ping']._line_number, 18) - self.assertEqual(self.data[0][u'tasks'][0][u'ping']._column_number, 25) - self.assertEqual(self.data[0][u'tasks'][0][u'ping']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._line_number, 18) - self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._column_number, 31) - self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data']._data_source, self.play_filename) + self.assertEqual(self.data[0][u'tasks'][0].ansible_pos, (self.play_filename, 16, 23)) + self.assertEqual(self.data[0][u'tasks'][0][u'name'].ansible_pos, (self.play_filename, 16, 29)) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'].ansible_pos, (self.play_filename, 18, 25)) + self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data'].ansible_pos, (self.play_filename, 18, 31)) # # Second Task # - self.assertEqual(self.data[0][u'tasks'][1]._line_number, 20) - self.assertEqual(self.data[0][u'tasks'][1]._column_number, 23) - self.assertEqual(self.data[0][u'tasks'][1]._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][1][u'name']._line_number, 20) - self.assertEqual(self.data[0][u'tasks'][1][u'name']._column_number, 29) - self.assertEqual(self.data[0][u'tasks'][1][u'name']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][1][u'ping']._line_number, 22) - self.assertEqual(self.data[0][u'tasks'][1][u'ping']._column_number, 25) - self.assertEqual(self.data[0][u'tasks'][1][u'ping']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._line_number, 22) - self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._column_number, 31) - self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data']._data_source, self.play_filename) + self.assertEqual(self.data[0][u'tasks'][1].ansible_pos, (self.play_filename, 20, 23)) + self.assertEqual(self.data[0][u'tasks'][1][u'name'].ansible_pos, (self.play_filename, 20, 29)) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'].ansible_pos, (self.play_filename, 22, 25)) + self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data'].ansible_pos, (self.play_filename, 22, 31)) # # Third Task # - self.assertEqual(self.data[0][u'tasks'][2]._line_number, 24) - self.assertEqual(self.data[0][u'tasks'][2]._column_number, 23) - self.assertEqual(self.data[0][u'tasks'][2]._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][2][u'name']._line_number, 24) - self.assertEqual(self.data[0][u'tasks'][2][u'name']._column_number, 29) - self.assertEqual(self.data[0][u'tasks'][2][u'name']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'tasks'][2][u'command']._line_number, 25) - self.assertEqual(self.data[0][u'tasks'][2][u'command']._column_number, 32) - self.assertEqual(self.data[0][u'tasks'][2][u'command']._data_source, self.play_filename) + self.assertEqual(self.data[0][u'tasks'][2].ansible_pos, (self.play_filename, 24, 23)) + self.assertEqual(self.data[0][u'tasks'][2][u'name'].ansible_pos, (self.play_filename, 24, 29)) + self.assertEqual(self.data[0][u'tasks'][2][u'command'].ansible_pos, (self.play_filename, 25, 32)) def test_line_numbers(self): # Check the line/column numbers are correct - # Note: Remember, currently dicts begin at the start of their first entry's value - self.assertEqual(self.data[0]._line_number, 2) - self.assertEqual(self.data[0]._column_number, 19) - self.assertEqual(self.data[0]._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'hosts']._line_number, 2) - self.assertEqual(self.data[0][u'hosts']._column_number, 26) - self.assertEqual(self.data[0][u'hosts']._data_source, self.play_filename) - - self.assertEqual(self.data[0][u'vars']._line_number, 4) - self.assertEqual(self.data[0][u'vars']._column_number, 21) - self.assertEqual(self.data[0][u'vars']._data_source, self.play_filename) + # Note: Remember, currently dicts begin at the start of their first entry + self.assertEqual(self.data[0].ansible_pos, (self.play_filename, 2, 19)) + self.assertEqual(self.data[0][u'hosts'].ansible_pos, (self.play_filename, 2, 26)) + self.assertEqual(self.data[0][u'vars'].ansible_pos, (self.play_filename, 4, 21)) self.check_vars() # Lists don't yet have line/col info - #self.assertEqual(self.data[0][u'tasks']._line_number, 17) - #self.assertEqual(self.data[0][u'tasks']._column_number, 28) - #self.assertEqual(self.data[0][u'tasks']._data_source, self.play_filename) + #self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 17, 28)) self.check_tasks() From 69cf95bd0e969af247d74365c6edc5564113beaa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Apr 2015 15:00:50 -0700 Subject: [PATCH 0222/3617] Add __init__ to the yaml test dir --- v2/test/parsing/yaml/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 v2/test/parsing/yaml/__init__.py diff --git a/v2/test/parsing/yaml/__init__.py b/v2/test/parsing/yaml/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 From 785c0c0c8ca8d90f3bccc7206f0c267977f77882 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 25 Mar 2015 13:51:40 -0500 Subject: [PATCH 0223/3617] V2 fixing bugs --- v2/ansible/executor/host_log.py | 43 ----- v2/ansible/executor/host_log_manager.py | 29 --- v2/ansible/executor/play_iterator.py | 206 +++++++++++++-------- v2/ansible/executor/playbook_executor.py | 35 +++- v2/ansible/executor/stats.py | 51 +++++ v2/ansible/executor/task_executor.py | 15 +- v2/ansible/executor/task_queue_manager.py | 47 +++-- v2/ansible/executor/task_queue_manager.py: | 0 v2/ansible/parsing/__init__.py | 7 +- v2/ansible/playbook/block.py | 20 +- v2/ansible/playbook/helpers.py | 46 ++--- v2/ansible/playbook/play.py | 2 +- v2/ansible/playbook/role/__init__.py | 2 +- v2/ansible/playbook/task.py | 5 +- v2/ansible/plugins/__init__.py | 5 +- v2/ansible/plugins/action/copy.py | 2 +- v2/ansible/plugins/callback/__init__.py | 6 +- v2/ansible/plugins/callback/default.py | 77 ++++---- v2/ansible/plugins/callback/minimal.py | 2 + v2/ansible/plugins/strategies/__init__.py | 107 +++++------ v2/ansible/plugins/strategies/linear.py | 35 ++-- v2/ansible/utils/cli.py | 2 + v2/ansible/utils/color.py | 17 ++ v2/ansible/utils/display.py | 12 ++ v2/ansible/vars/__init__.py | 15 +- v2/samples/include.yml | 4 +- v2/samples/localhost_include.yml | 3 + v2/samples/test_blocks_of_blocks.yml | 5 + v2/samples/test_include.yml | 2 +- v2/test/mock/loader.py | 3 + v2/test/playbook/test_block.py | 6 - v2/test/playbook/test_playbook.py | 9 +- v2/test/playbook/test_task_include.py | 64 ------- v2/test/vars/test_variable_manager.py | 53 ++++-- 34 files changed, 508 insertions(+), 429 deletions(-) delete mode 100644 v2/ansible/executor/host_log.py delete mode 100644 v2/ansible/executor/host_log_manager.py create mode 100644 v2/ansible/executor/stats.py create mode 100644 v2/ansible/executor/task_queue_manager.py: create mode 100644 v2/samples/localhost_include.yml delete mode 100644 v2/test/playbook/test_task_include.py diff --git a/v2/ansible/executor/host_log.py b/v2/ansible/executor/host_log.py deleted file mode 100644 index 495ad79f7d4185..00000000000000 --- a/v2/ansible/executor/host_log.py +++ /dev/null @@ -1,43 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -class HostLog: - - def __init__(self, host): - self.host = host - - def add_task_result(self, task_result): - pass - - def has_failures(self): - assert False - - def has_changes(self): - assert False - - def get_tasks(self, are_executed=None, are_changed=None, are_successful=None): - assert False - - def get_current_running_task(self) - # atomic decorator likely required? - assert False - - diff --git a/v2/ansible/executor/host_log_manager.py b/v2/ansible/executor/host_log_manager.py deleted file mode 100644 index 727d06ce5912a7..00000000000000 --- a/v2/ansible/executor/host_log_manager.py +++ /dev/null @@ -1,29 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -class HostLogManager: - - def __init__(self): - pass - - def get_log_for_host(self, host): - assert False - diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py index 4a149243d9118d..d6fe3750955943 100644 --- a/v2/ansible/executor/play_iterator.py +++ b/v2/ansible/executor/play_iterator.py @@ -20,6 +20,7 @@ __metaclass__ = type from ansible.errors import * +from ansible.playbook.block import Block from ansible.playbook.task import Task from ansible.utils.boolean import boolean @@ -38,9 +39,10 @@ def __init__(self, blocks): self.run_state = PlayIterator.ITERATING_SETUP self.fail_state = PlayIterator.FAILED_NONE self.pending_setup = False + self.child_state = None def __repr__(self): - return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s" % ( + return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, child state? %s" % ( self.cur_block, self.cur_regular_task, self.cur_rescue_task, @@ -49,6 +51,7 @@ def __repr__(self): self.run_state, self.fail_state, self.pending_setup, + self.child_state, ) def get_current_block(self): @@ -64,6 +67,7 @@ def copy(self): new_state.run_state = self.run_state new_state.fail_state = self.fail_state new_state.pending_setup = self.pending_setup + new_state.child_state = self.child_state return new_state class PlayIterator: @@ -104,75 +108,35 @@ def get_host_state(self, host): except KeyError: raise AnsibleError("invalid host (%s) specified for playbook iteration" % host) - def get_next_task_for_host(self, host, peek=False, lock_step=True): + def get_next_task_for_host(self, host, peek=False): + s = self.get_host_state(host) task = None if s.run_state == self.ITERATING_COMPLETE: return None - else: - while True: - try: - cur_block = s._blocks[s.cur_block] - except IndexError: - s.run_state = self.ITERATING_COMPLETE - break - - if s.run_state == self.ITERATING_SETUP: - s.run_state = self.ITERATING_TASKS - if self._play._gather_facts == 'smart' and not host.gathered_facts or boolean(self._play._gather_facts): - # mark the host as having gathered facts - host.set_gathered_facts(True) - - task = Task() - task.action = 'setup' - task.set_loader(self._play._loader) - - elif s.run_state == self.ITERATING_TASKS: - # clear the pending setup flag, since we're past that and it didn't fail - if s.pending_setup: - s.pending_setup = False - - if s.fail_state & self.FAILED_TASKS == self.FAILED_TASKS: - s.run_state = self.ITERATING_RESCUE - elif s.cur_regular_task >= len(cur_block.block): - s.run_state = self.ITERATING_ALWAYS - else: - task = cur_block.block[s.cur_regular_task] - s.cur_regular_task += 1 - break - elif s.run_state == self.ITERATING_RESCUE: - if s.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: - s.run_state = self.ITERATING_ALWAYS - elif s.cur_rescue_task >= len(cur_block.rescue): - if len(cur_block.rescue) > 0: - s.fail_state = self.FAILED_NONE - s.run_state = self.ITERATING_ALWAYS - else: - task = cur_block.rescue[s.cur_rescue_task] - s.cur_rescue_task += 1 - break - elif s.run_state == self.ITERATING_ALWAYS: - if s.cur_always_task >= len(cur_block.always): - if s.fail_state != self.FAILED_NONE: - s.run_state = self.ITERATING_COMPLETE - break - else: - s.cur_block += 1 - s.cur_regular_task = 0 - s.cur_rescue_task = 0 - s.cur_always_task = 0 - s.run_state = self.ITERATING_TASKS - else: - task= cur_block.always[s.cur_always_task] - s.cur_always_task += 1 - break + elif s.run_state == self.ITERATING_SETUP: + s.run_state = self.ITERATING_TASKS + s.pending_setup = True + if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts): + if not peek: + # mark the host as having gathered facts + host.set_gathered_facts(True) + + task = Task() + task.action = 'setup' + task.args = {} + task.set_loader(self._play._loader) + else: + s.pending_setup = False + + if not task: + (s, task) = self._get_next_task_from_state(s, peek=peek) if task and task._role: # if we had a current role, mark that role as completed if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek: s.cur_role._completed = True - s.cur_role = task._role if not peek: @@ -180,6 +144,86 @@ def get_next_task_for_host(self, host, peek=False, lock_step=True): return (s, task) + + def _get_next_task_from_state(self, state, peek): + + task = None + + # if we previously encountered a child block and we have a + # saved child state, try and get the next task from there + if state.child_state: + (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek) + if task: + return (state.child_state, task) + else: + state.child_state = None + + # try and find the next task, given the current state. + while True: + # try to get the current block from the list of blocks, and + # if we run past the end of the list we know we're done with + # this block + try: + block = state._blocks[state.cur_block] + except IndexError: + state.run_state = self.ITERATING_COMPLETE + return (state, None) + + if state.run_state == self.ITERATING_TASKS: + # clear the pending setup flag, since we're past that and it didn't fail + if state.pending_setup: + state.pending_setup = False + + if state.fail_state & self.FAILED_TASKS == self.FAILED_TASKS: + state.run_state = self.ITERATING_RESCUE + elif state.cur_regular_task >= len(block.block): + state.run_state = self.ITERATING_ALWAYS + else: + task = block.block[state.cur_regular_task] + state.cur_regular_task += 1 + + elif state.run_state == self.ITERATING_RESCUE: + if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE: + state.run_state = self.ITERATING_ALWAYS + elif state.cur_rescue_task >= len(block.rescue): + if len(block.rescue) > 0: + state.fail_state = self.FAILED_NONE + state.run_state = self.ITERATING_ALWAYS + else: + task = block.rescue[state.cur_rescue_task] + state.cur_rescue_task += 1 + + elif state.run_state == self.ITERATING_ALWAYS: + if state.cur_always_task >= len(block.always): + if state.fail_state != self.FAILED_NONE: + state.run_state = self.ITERATING_COMPLETE + else: + state.cur_block += 1 + state.cur_regular_task = 0 + state.cur_rescue_task = 0 + state.cur_always_task = 0 + state.run_state = self.ITERATING_TASKS + state.child_state = None + else: + task = block.always[state.cur_always_task] + state.cur_always_task += 1 + + elif state.run_state == self.ITERATING_COMPLETE: + return (state, None) + + # if the current task is actually a child block, we dive into it + if isinstance(task, Block): + state.child_state = HostState(blocks=[task]) + state.child_state.run_state = self.ITERATING_TASKS + state.child_state.cur_role = state.cur_role + (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek) + + # if something above set the task, break out of the loop now + if task: + break + + return (state, task) + def mark_host_failed(self, host): s = self.get_host_state(host) if s.pending_setup: @@ -206,25 +250,41 @@ def get_original_task(self, host, task): the different processes, and not all data structures are preserved. This method allows us to find the original task passed into the executor engine. ''' + def _search_block(block, task): + for t in block.block: + if isinstance(t, Block): + res = _search_block(t, task) + if res: + return res + elif t._uuid == task._uuid: + return t + for t in block.rescue: + if isinstance(t, Block): + res = _search_block(t, task) + if res: + return res + elif t._uuid == task._uuid: + return t + for t in block.always: + if isinstance(t, Block): + res = _search_block(t, task) + if res: + return res + elif t._uuid == task._uuid: + return t + return None + s = self.get_host_state(host) for block in s._blocks: - if block.block: - for t in block.block: - if t._uuid == task._uuid: - return t - if block.rescue: - for t in block.rescue: - if t._uuid == task._uuid: - return t - if block.always: - for t in block.always: - if t._uuid == task._uuid: - return t + res = _search_block(block, task) + if res: + return res + return None def add_tasks(self, host, task_list): s = self.get_host_state(host) - target_block = s._blocks[s.cur_block].copy() + target_block = s._blocks[s.cur_block].copy(exclude_parent=True) if s.run_state == self.ITERATING_TASKS: before = target_block.block[:s.cur_regular_task] diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 88ec05b9e8679e..324e6b01af9dfb 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -26,6 +26,7 @@ from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook +from ansible.utils.color import colorize, hostcolor from ansible.utils.debug import debug class PlaybookExecutor: @@ -70,8 +71,8 @@ def run(self): for batch in self._get_serialized_batches(new_play): if len(batch) == 0: - self._tqm._callback.playbook_on_play_start(new_play.name) - self._tqm._callback.playbook_on_no_hosts_matched() + self._tqm.send_callback('v2_playbook_on_play_start', new_play) + self._tqm.send_callback('v2_playbook_on_no_hosts_matched') result = 0 break # restrict the inventory to the hosts in the serialized batch @@ -90,6 +91,36 @@ def run(self): raise self._cleanup() + + # FIXME: this stat summary stuff should be cleaned up and moved + # to a new method, if it even belongs here... + self._tqm._display.banner("PLAY RECAP") + + hosts = sorted(self._tqm._stats.processed.keys()) + for h in hosts: + t = self._tqm._stats.summarize(h) + + self._tqm._display.display("%s : %s %s %s %s" % ( + hostcolor(h, t), + colorize('ok', t['ok'], 'green'), + colorize('changed', t['changed'], 'yellow'), + colorize('unreachable', t['unreachable'], 'red'), + colorize('failed', t['failures'], 'red')), + screen_only=True + ) + + self._tqm._display.display("%s : %s %s %s %s" % ( + hostcolor(h, t, False), + colorize('ok', t['ok'], None), + colorize('changed', t['changed'], None), + colorize('unreachable', t['unreachable'], None), + colorize('failed', t['failures'], None)), + log_only=True + ) + + self._tqm._display.display("", screen_only=True) + # END STATS STUFF + return result def _cleanup(self, signum=None, framenum=None): diff --git a/v2/ansible/executor/stats.py b/v2/ansible/executor/stats.py new file mode 100644 index 00000000000000..626b2959a4721e --- /dev/null +++ b/v2/ansible/executor/stats.py @@ -0,0 +1,51 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +class AggregateStats: + ''' holds stats about per-host activity during playbook runs ''' + + def __init__(self): + + self.processed = {} + self.failures = {} + self.ok = {} + self.dark = {} + self.changed = {} + self.skipped = {} + + def increment(self, what, host): + ''' helper function to bump a statistic ''' + + self.processed[host] = 1 + prev = (getattr(self, what)).get(host, 0) + getattr(self, what)[host] = prev+1 + + def summarize(self, host): + ''' return information about a particular host ''' + + return dict( + ok = self.ok.get(host, 0), + failures = self.failures.get(host, 0), + unreachable = self.dark.get(host,0), + changed = self.changed.get(host, 0), + skipped = self.skipped.get(host, 0) + ) + diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 7eaba0061ef29e..3a3aa73f1080c6 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -237,10 +237,14 @@ def _execute(self, variables=None): if self._task.poll > 0: result = self._poll_async_result(result=result) - # update the local copy of vars with the registered value, if specified + # update the local copy of vars with the registered value, if specified, + # or any facts which may have been generated by the module execution if self._task.register: vars_copy[self._task.register] = result + if 'ansible_facts' in result: + vars_copy.update(result['ansible_facts']) + # create a conditional object to evaluate task conditions cond = Conditional(loader=self._loader) @@ -266,6 +270,15 @@ def _execute(self, variables=None): if attempt < retries - 1: time.sleep(delay) + # do the final update of the local variables here, for both registered + # values and any facts which may have been created + if self._task.register: + variables[self._task.register] = result + + if 'ansible_facts' in result: + variables.update(result['ansible_facts']) + + # and return debug("attempt loop complete, returning result") return result diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index 7c77f8e3a70941..0693e9dc56ccaa 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -29,9 +29,11 @@ from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.process.result import ResultProcess +from ansible.executor.stats import AggregateStats from ansible.plugins import callback_loader, strategy_loader from ansible.utils.debug import debug +from ansible.utils.display import Display __all__ = ['TaskQueueManager'] @@ -53,6 +55,9 @@ def __init__(self, inventory, callback, variable_manager, loader, options): self._variable_manager = variable_manager self._loader = loader self._options = options + self._stats = AggregateStats() + + self._display = Display() # a special flag to help us exit cleanly self._terminated = False @@ -66,9 +71,14 @@ def __init__(self, inventory, callback, variable_manager, loader, options): self._final_q = multiprocessing.Queue() - # FIXME: hard-coded the default callback plugin here, which - # should be configurable. - self._callback = callback_loader.get(callback) + # load all available callback plugins + # FIXME: we need an option to white-list callback plugins + self._callback_plugins = [] + for callback_plugin in callback_loader.all(class_only=True): + if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: + self._callback_plugins.append(callback_plugin(self._display)) + else: + self._callback_plugins.append(callback_plugin()) # create the pool of worker threads, based on the number of forks specified try: @@ -131,16 +141,11 @@ def run(self, play): ''' connection_info = ConnectionInformation(play, self._options) - self._callback.set_connection_info(connection_info) - - # run final validation on the play now, to make sure fields are templated - # FIXME: is this even required? Everything is validated and merged at the - # task level, so else in the play needs to be templated - #all_vars = self._vmw.get_vars(loader=self._dlw, play=play) - #all_vars = self._vmw.get_vars(loader=self._loader, play=play) - #play.post_validate(all_vars=all_vars) + for callback_plugin in self._callback_plugins: + if hasattr(callback_plugin, 'set_connection_info'): + callback_plugin.set_connection_info(connection_info) - self._callback.playbook_on_play_start(play.name) + self.send_callback('v2_playbook_on_play_start', play) # initialize the shared dictionary containing the notified handlers self._initialize_notified_handlers(play.handlers) @@ -172,9 +177,6 @@ def cleanup(self): def get_inventory(self): return self._inventory - def get_callback(self): - return self._callback - def get_variable_manager(self): return self._variable_manager @@ -201,3 +203,18 @@ def get_workers(self): def terminate(self): self._terminated = True + + def send_callback(self, method_name, *args, **kwargs): + for callback_plugin in self._callback_plugins: + # a plugin that set self.disabled to True will not be called + # see osx_say.py example for such a plugin + if getattr(callback_plugin, 'disabled', False): + continue + methods = [ + getattr(callback_plugin, method_name, None), + getattr(callback_plugin, 'on_any', None) + ] + for method in methods: + if method is not None: + method(*args, **kwargs) + diff --git a/v2/ansible/executor/task_queue_manager.py: b/v2/ansible/executor/task_queue_manager.py: new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index f8a3e967465da7..673fa95a551b37 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -99,11 +99,14 @@ def load_from_file(self, file_name): def path_exists(self, path): return os.path.exists(path) + def is_file(self, path): + return os.path.isfile(path) + def is_directory(self, path): return os.path.isdir(path) - def is_file(self, path): - return os.path.isfile(path) + def list_directory(self, path): + return os.path.listdir(path) def _safe_load(self, stream, file_name=None): ''' Implements yaml.safe_load(), except using our custom loader class. ''' diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 03957bfe2f6691..7341e4d71471d8 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -43,6 +43,7 @@ def __init__(self, parent_block=None, role=None, task_include=None, use_handlers self._task_include = task_include self._use_handlers = use_handlers self._dep_chain = [] + self._vars = dict() super(Block, self).__init__() @@ -56,9 +57,12 @@ def get_vars(self): if self._role: all_vars.update(self._role.get_vars()) + if self._parent_block: + all_vars.update(self._parent_block.get_vars()) if self._task_include: all_vars.update(self._task_include.get_vars()) + all_vars.update(self._vars) return all_vars @staticmethod @@ -131,25 +135,29 @@ def _load_always(self, attr, ds): # use_handlers=self._use_handlers, # ) - def copy(self): + def copy(self, exclude_parent=False): def _dupe_task_list(task_list, new_block): new_task_list = [] for task in task_list: - new_task = task.copy(exclude_block=True) - new_task._block = new_block + if isinstance(task, Block): + new_task = task.copy(exclude_parent=True) + new_task._parent_block = new_block + else: + new_task = task.copy(exclude_block=True) + new_task._block = new_block new_task_list.append(new_task) return new_task_list new_me = super(Block, self).copy() new_me._use_handlers = self._use_handlers - new_me._dep_chain = self._dep_chain[:] + new_me._dep_chain = self._dep_chain[:] new_me.block = _dupe_task_list(self.block or [], new_me) new_me.rescue = _dupe_task_list(self.rescue or [], new_me) new_me.always = _dupe_task_list(self.always or [], new_me) new_me._parent_block = None - if self._parent_block: + if self._parent_block and not exclude_parent: new_me._parent_block = self._parent_block.copy() new_me._role = None @@ -260,7 +268,7 @@ def _get_parent_attribute(self, attr): value = self._attributes[attr] if not value: if self._parent_block: - value = getattr(self._block, attr) + value = getattr(self._parent_block, attr) elif self._role: value = getattr(self._role, attr) if not value and len(self._dep_chain): diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index 0e147205578406..cc262b4fb51b94 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -60,9 +60,9 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler ''' # we import here to prevent a circular dependency with imports + from ansible.playbook.block import Block from ansible.playbook.handler import Handler from ansible.playbook.task import Task - #from ansible.playbook.task_include import TaskInclude assert type(ds) == list @@ -71,27 +71,17 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler if not isinstance(task, dict): raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds) - #if 'include' in task: - # cur_basedir = None - # if isinstance(task, AnsibleBaseYAMLObject) and loader: - # pos_info = task.get_position_info() - # new_basedir = os.path.dirname(pos_info[0]) - # cur_basedir = loader.get_basedir() - # loader.set_basedir(new_basedir) - - # t = TaskInclude.load( - # task, - # block=block, - # role=role, - # task_include=task_include, - # use_handlers=use_handlers, - # loader=loader - # ) - - # if cur_basedir and loader: - # loader.set_basedir(cur_basedir) - #else: - if True: + if 'block' in task: + t = Block.load( + task, + parent_block=block, + role=role, + task_include=task_include, + use_handlers=use_handlers, + variable_manager=variable_manager, + loader=loader, + ) + else: if use_handlers: t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader) else: @@ -120,15 +110,3 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader return roles -def compile_block_list(block_list): - ''' - Given a list of blocks, compile them into a flat list of tasks - ''' - - task_list = [] - - for block in block_list: - task_list.extend(block.compile()) - - return task_list - diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index cbe4e038617a82..5814650adb6810 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -24,7 +24,7 @@ from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.base import Base from ansible.playbook.become import Become -from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles, compile_block_list +from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index 21bcd21803e423..72dd2a27d3f311 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -32,7 +32,7 @@ from ansible.playbook.base import Base from ansible.playbook.become import Become from ansible.playbook.conditional import Conditional -from ansible.playbook.helpers import load_list_of_blocks, compile_block_list +from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role.include import RoleInclude from ansible.playbook.role.metadata import RoleMetadata from ansible.playbook.taggable import Taggable diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index ab66898242cb5a..66afbec7a3e70c 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -78,7 +78,7 @@ class Task(Base, Conditional, Taggable, Become): # FIXME: this should not be a Task _meta = FieldAttribute(isa='string') - _name = FieldAttribute(isa='string') + _name = FieldAttribute(isa='string', default='') _no_log = FieldAttribute(isa='bool') _notify = FieldAttribute(isa='list') @@ -167,7 +167,6 @@ def munge(self, ds): args_parser = ModuleArgsParser(task_ds=ds) (action, args, delegate_to) = args_parser.parse() - new_ds['action'] = action new_ds['args'] = args new_ds['delegate_to'] = delegate_to @@ -199,6 +198,8 @@ def post_validate(self, all_vars=dict(), fail_on_undefined=True): def get_vars(self): all_vars = self.vars.copy() + if self._block: + all_vars.update(self._block.get_vars()) if self._task_include: all_vars.update(self._task_include.get_vars()) diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index 31b684e70dd1e4..bf074b78978ca2 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -240,7 +240,10 @@ def all(self, *args, **kwargs): continue if path not in self._module_cache: self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) - yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + if kwargs.get('class_only', False): + yield getattr(self._module_cache[path], self.class_name) + else: + yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs) action_loader = PluginLoader( 'ActionModule', diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py index 088a806b61b0ae..a9a078b28964ba 100644 --- a/v2/ansible/plugins/action/copy.py +++ b/v2/ansible/plugins/action/copy.py @@ -231,7 +231,7 @@ def run(self, tmp=None, task_vars=dict()): self._remove_tempfile_if_content_defined(content, content_tempfile) # fix file permissions when the copy is done as a different user - if (self._connection_info.become and self._connection_info.become_user != 'root': + if self._connection_info.become and self._connection_info.become_user != 'root': self._remote_chmod('a+r', tmp_src, tmp) if raw: diff --git a/v2/ansible/plugins/callback/__init__.py b/v2/ansible/plugins/callback/__init__.py index c6905229f934fd..2c2e7e74c65779 100644 --- a/v2/ansible/plugins/callback/__init__.py +++ b/v2/ansible/plugins/callback/__init__.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.utils.display import Display +#from ansible.utils.display import Display __all__ = ["CallbackBase"] @@ -34,8 +34,8 @@ class CallbackBase: # FIXME: the list of functions here needs to be updated once we have # finalized the list of callback methods used in the default callback - def __init__(self): - self._display = Display() + def __init__(self, display): + self._display = display def set_connection_info(self, conn_info): # FIXME: this is a temporary hack, as the connection info object diff --git a/v2/ansible/plugins/callback/default.py b/v2/ansible/plugins/callback/default.py index 6200aee7d43f58..bb87dc4a942c41 100644 --- a/v2/ansible/plugins/callback/default.py +++ b/v2/ansible/plugins/callback/default.py @@ -30,25 +30,15 @@ class CallbackModule(CallbackBase): to stdout when new callback events are received. ''' - def _print_banner(self, msg, color=None): - ''' - Prints a header-looking line with stars taking up to 80 columns - of width (3 columns, minimum) - ''' - msg = msg.strip() - star_len = (80 - len(msg)) - if star_len < 0: - star_len = 3 - stars = "*" * star_len - self._display.display("\n%s %s" % (msg, stars), color=color) - - def on_any(self, *args, **kwargs): + CALLBACK_VERSION = 2.0 + + def v2_on_any(self, *args, **kwargs): pass - def runner_on_failed(self, task, result, ignore_errors=False): + def v2_runner_on_failed(self, result, ignore_errors=False): self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red') - def runner_on_ok(self, task, result): + def v2_runner_on_ok(self, result): if result._task.action == 'include': msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name) @@ -68,7 +58,7 @@ def runner_on_ok(self, task, result): msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False) self._display.display(msg, color=color) - def runner_on_skipped(self, task, result): + def v2_runner_on_skipped(self, result): msg = "skipping: [%s]" % result._host.get_name() if self._display._verbosity > 0 or 'verbose_always' in result._result: indent = None @@ -78,57 +68,66 @@ def runner_on_skipped(self, task, result): msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False) self._display.display(msg, color='cyan') - def runner_on_unreachable(self, task, result): + def v2_runner_on_unreachable(self, result): self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red') - def runner_on_no_hosts(self, task): + def v2_runner_on_no_hosts(self, task): + pass + + def v2_runner_on_async_poll(self, result): pass - def runner_on_async_poll(self, host, res, jid, clock): + def v2_runner_on_async_ok(self, result): pass - def runner_on_async_ok(self, host, res, jid): + def v2_runner_on_async_failed(self, result): pass - def runner_on_async_failed(self, host, res, jid): + def v2_runner_on_file_diff(self, result, diff): pass - def playbook_on_start(self): + def v2_playbook_on_start(self): pass - def playbook_on_notify(self, host, handler): + def v2_playbook_on_notify(self, result, handler): pass - def playbook_on_no_hosts_matched(self): + def v2_playbook_on_no_hosts_matched(self): self._display.display("skipping: no hosts matched", color='cyan') - def playbook_on_no_hosts_remaining(self): - self._print_banner("NO MORE HOSTS LEFT") + def v2_playbook_on_no_hosts_remaining(self): + self._display.banner("NO MORE HOSTS LEFT") - def playbook_on_task_start(self, name, is_conditional): - self._print_banner("TASK [%s]" % name.strip()) + def v2_playbook_on_task_start(self, task, is_conditional): + self._display.banner("TASK [%s]" % task.get_name().strip()) - def playbook_on_cleanup_task_start(self, name): - self._print_banner("CLEANUP TASK [%s]" % name.strip()) + def v2_playbook_on_cleanup_task_start(self, task): + self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) - def playbook_on_handler_task_start(self, name): - self._print_banner("RUNNING HANDLER [%s]" % name.strip()) + def v2_playbook_on_handler_task_start(self, task): + self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): pass - def playbook_on_setup(self): + def v2_playbook_on_setup(self): pass - def playbook_on_import_for_host(self, host, imported_file): + def v2_playbook_on_import_for_host(self, result, imported_file): pass - def playbook_on_not_import_for_host(self, host, missing_file): + def v2_playbook_on_not_import_for_host(self, result, missing_file): pass - def playbook_on_play_start(self, name): - self._print_banner("PLAY [%s]" % name.strip()) + def v2_playbook_on_play_start(self, play): + name = play.get_name().strip() + if not name: + msg = "PLAY" + else: + msg = "PLAY [%s]" % name + + self._display.banner(name) - def playbook_on_stats(self, stats): + def v2_playbook_on_stats(self, stats): pass diff --git a/v2/ansible/plugins/callback/minimal.py b/v2/ansible/plugins/callback/minimal.py index 0b20eee64d5c0d..8ba883307b89f6 100644 --- a/v2/ansible/plugins/callback/minimal.py +++ b/v2/ansible/plugins/callback/minimal.py @@ -31,6 +31,8 @@ class CallbackModule(CallbackBase): to stdout when new callback events are received. ''' + CALLBACK_VERSION = 2.0 + def _print_banner(self, msg): ''' Prints a header-looking line with stars taking up to 80 columns diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index 196868ba96c723..59c0b9b84eef6a 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -28,7 +28,7 @@ from ansible.inventory.group import Group from ansible.playbook.handler import Handler -from ansible.playbook.helpers import load_list_of_blocks, compile_block_list +from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role import ROLE_CACHE, hash_params from ansible.plugins import module_loader from ansible.utils.debug import debug @@ -49,7 +49,7 @@ def __init__(self, tqm): self._inventory = tqm.get_inventory() self._workers = tqm.get_workers() self._notified_handlers = tqm.get_notified_handlers() - self._callback = tqm.get_callback() + #self._callback = tqm.get_callback() self._variable_manager = tqm.get_variable_manager() self._loader = tqm.get_loader() self._final_q = tqm._final_q @@ -73,6 +73,9 @@ def run(self, iterator, connection_info, result=True): debug("running handlers") result &= self.run_handlers(iterator, connection_info) + # send the stats callback + self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) + if not result: if num_unreachable > 0: return 3 @@ -84,7 +87,7 @@ def run(self, iterator, connection_info, result=True): return 0 def get_hosts_remaining(self, play): - return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.get_name() not in self._tqm._unreachable_hosts] + return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts] def get_failed_hosts(self, play): return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts] @@ -132,17 +135,23 @@ def _process_pending_results(self, iterator): task = task_result._task if result[0] == 'host_task_failed': if not task.ignore_errors: - debug("marking %s as failed" % host.get_name()) + debug("marking %s as failed" % host.name) iterator.mark_host_failed(host) - self._tqm._failed_hosts[host.get_name()] = True - self._callback.runner_on_failed(task, task_result) + self._tqm._failed_hosts[host.name] = True + self._tqm._stats.increment('failures', host.name) + self._tqm.send_callback('v2_runner_on_failed', task_result) elif result[0] == 'host_unreachable': - self._tqm._unreachable_hosts[host.get_name()] = True - self._callback.runner_on_unreachable(task, task_result) + self._tqm._unreachable_hosts[host.name] = True + self._tqm._stats.increment('dark', host.name) + self._tqm.send_callback('v2_runner_on_unreachable', task_result) elif result[0] == 'host_task_skipped': - self._callback.runner_on_skipped(task, task_result) + self._tqm._stats.increment('skipped', host.name) + self._tqm.send_callback('v2_runner_on_skipped', task_result) elif result[0] == 'host_task_ok': - self._callback.runner_on_ok(task, task_result) + self._tqm._stats.increment('ok', host.name) + if 'changed' in task_result._result and task_result._result['changed']: + self._tqm._stats.increment('changed', host.name) + self._tqm.send_callback('v2_runner_on_ok', task_result) self._pending_results -= 1 if host.name in self._blocked_hosts: @@ -160,22 +169,6 @@ def _process_pending_results(self, iterator): ret_results.append(task_result) - #elif result[0] == 'include': - # host = result[1] - # task = result[2] - # include_file = result[3] - # include_vars = result[4] - # - # if isinstance(task, Handler): - # # FIXME: figure out how to make includes work for handlers - # pass - # else: - # original_task = iterator.get_original_task(host, task) - # if original_task and original_task._role: - # include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_file) - # new_tasks = self._load_included_file(original_task, include_file, include_vars) - # iterator.add_tasks(host, new_tasks) - elif result[0] == 'add_host': task_result = result[1] new_host_info = task_result.get('add_host', dict()) @@ -322,14 +315,11 @@ def _load_included_file(self, included_file): loader=self._loader ) - - task_list = compile_block_list(block_list) - # set the vars for this task from those specified as params to the include - for t in task_list: - t.vars = included_file._args.copy() + for b in block_list: + b._vars = included_file._args.copy() - return task_list + return block_list def cleanup(self, iterator, connection_info): ''' @@ -361,7 +351,7 @@ def cleanup(self, iterator, connection_info): while work_to_do: work_to_do = False for host in failed_hosts: - host_name = host.get_name() + host_name = host.name if host_name in self._tqm._failed_hosts: iterator.mark_host_failed(host) @@ -377,7 +367,7 @@ def cleanup(self, iterator, connection_info): self._blocked_hosts[host_name] = True task = iterator.get_next_task_for_host(host) task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) - self._callback.playbook_on_cleanup_task_start(task.get_name()) + self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task) self._queue_task(host, task, task_vars, connection_info) self._process_pending_results(iterator) @@ -398,31 +388,28 @@ def run_handlers(self, iterator, connection_info): # FIXME: getting the handlers from the iterators play should be # a method on the iterator, which may also filter the list # of handlers based on the notified list - handlers = compile_block_list(iterator._play.handlers) - - debug("handlers are: %s" % handlers) - for handler in handlers: - handler_name = handler.get_name() - - if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): - if not len(self.get_hosts_remaining(iterator._play)): - self._callback.playbook_on_no_hosts_remaining() - result = False - break - - self._callback.playbook_on_handler_task_start(handler_name) - for host in self._notified_handlers[handler_name]: - if not handler.has_triggered(host): - task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) - self._queue_task(host, handler, task_vars, connection_info) - handler.flag_for_host(host) - - self._process_pending_results(iterator) - - self._wait_on_pending_results(iterator) - - # wipe the notification list - self._notified_handlers[handler_name] = [] - debug("done running handlers, result is: %s" % result) + for handler_block in iterator._play.handlers: + debug("handlers are: %s" % handlers) + # FIXME: handlers need to support the rescue/always portions of blocks too, + # but this may take some work in the iterator and gets tricky when + # we consider the ability of meta tasks to flush handlers + for handler in handler_block.block: + handler_name = handler.get_name() + if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): + if not len(self.get_hosts_remaining(iterator._play)): + self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') + result = False + break + self._tqm.send_callback('v2_playbook_on_handler_task_start', handler) + for host in self._notified_handlers[handler_name]: + if not handler.has_triggered(host): + task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) + self._queue_task(host, handler, task_vars, connection_info) + handler.flag_for_host(host) + self._process_pending_results(iterator) + self._wait_on_pending_results(iterator) + # wipe the notification list + self._notified_handlers[handler_name] = [] + debug("done running handlers, result is: %s" % result) return result diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py index b503d6ebd51022..fcda46a7af0686 100644 --- a/v2/ansible/plugins/strategies/linear.py +++ b/v2/ansible/plugins/strategies/linear.py @@ -21,6 +21,7 @@ from ansible.errors import AnsibleError from ansible.executor.play_iterator import PlayIterator +from ansible.playbook.block import Block from ansible.playbook.task import Task from ansible.plugins import action_loader from ansible.plugins.strategies import StrategyBase @@ -52,6 +53,9 @@ def _get_next_task_lockstep(self, hosts, iterator): lowest_cur_block = len(iterator._blocks) for (k, v) in host_tasks.iteritems(): + if v is None: + continue + (s, t) = v if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE: lowest_cur_block = s.cur_block @@ -131,7 +135,7 @@ def run(self, iterator, connection_info): debug("done getting the remaining hosts for this loop") if len(hosts_left) == 0: debug("out of hosts to run on") - self._callback.playbook_on_no_hosts_remaining() + self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break @@ -184,7 +188,6 @@ def run(self, iterator, connection_info): meta_action = task.args.get('_raw_params') if meta_action == 'noop': # FIXME: issue a callback for the noop here? - print("%s => NOOP" % host) continue elif meta_action == 'flush_handlers': self.run_handlers(iterator, connection_info) @@ -192,7 +195,7 @@ def run(self, iterator, connection_info): raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) else: if not callback_sent: - self._callback.playbook_on_task_start(task.get_name(), False) + self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True @@ -234,6 +237,10 @@ def __repr__(self): include_results = [ res._result ] for include_result in include_results: + # if the task result was skipped or failed, continue + if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: + continue + original_task = iterator.get_original_task(res._host, res._task) if original_task and original_task._role: include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include']) @@ -263,27 +270,31 @@ def __repr__(self): noop_task.args['_raw_params'] = 'noop' noop_task.set_loader(iterator._play._loader) - all_tasks = dict((host, []) for host in hosts_left) + all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: - new_tasks = self._load_included_file(included_file) + new_blocks = self._load_included_file(included_file) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) # FIXME: callback here? print(e) - noop_tasks = [noop_task for t in new_tasks] - for host in hosts_left: - if host in included_file._hosts: - all_tasks[host].extend(new_tasks) - else: - all_tasks[host].extend(noop_tasks) + for new_block in new_blocks: + noop_block = Block(parent_block=task._block) + noop_block.block = [noop_task for t in new_block.block] + noop_block.always = [noop_task for t in new_block.always] + noop_block.rescue = [noop_task for t in new_block.rescue] + for host in hosts_left: + if host in included_file._hosts: + all_blocks[host].append(new_block) + else: + all_blocks[host].append(noop_block) for host in hosts_left: - iterator.add_tasks(host, all_tasks[host]) + iterator.add_tasks(host, all_blocks[host]) debug("results queue empty") except (IOError, EOFError), e: diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index 09f5ef4a30f9de..f771452a9d3417 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -68,6 +68,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, default=None) parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) + parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', + help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) if subset_opts: parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', diff --git a/v2/ansible/utils/color.py b/v2/ansible/utils/color.py index ebcb4317f7013f..a87717073ebf67 100644 --- a/v2/ansible/utils/color.py +++ b/v2/ansible/utils/color.py @@ -73,3 +73,20 @@ def stringc(text, color): # --- end "pretty" +def colorize(lead, num, color): + """ Print 'lead' = 'num' in 'color' """ + if num != 0 and ANSIBLE_COLOR and color is not None: + return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color)) + else: + return "%s=%-4s" % (lead, str(num)) + +def hostcolor(host, stats, color=True): + if ANSIBLE_COLOR and color: + if stats['failures'] != 0 or stats['unreachable'] != 0: + return "%-37s" % stringc(host, 'red') + elif stats['changed'] != 0: + return "%-37s" % stringc(host, 'yellow') + else: + return "%-37s" % stringc(host, 'green') + return "%-26s" % host + diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index 3976198703955a..758a62fceea7b5 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -112,3 +112,15 @@ def system_warning(self, msg): if C.SYSTEM_WARNINGS: self._warning(msg) + def banner(self, msg, color=None): + ''' + Prints a header-looking line with stars taking up to 80 columns + of width (3 columns, minimum) + ''' + msg = msg.strip() + star_len = (80 - len(msg)) + if star_len < 0: + star_len = 3 + stars = "*" * star_len + self.display("\n%s %s" % (msg, stars), color=color) + diff --git a/v2/ansible/vars/__init__.py b/v2/ansible/vars/__init__.py index f9e7cba9cd008d..eb75d9c9929b8a 100644 --- a/v2/ansible/vars/__init__.py +++ b/v2/ansible/vars/__init__.py @@ -162,10 +162,9 @@ def get_vars(self, loader, play=None, host=None, task=None): all_vars = self._combine_vars(all_vars, self._group_vars_files['all']) for group in host.get_groups(): - group_name = group.get_name() all_vars = self._combine_vars(all_vars, group.get_vars()) - if group_name in self._group_vars_files and group_name != 'all': - all_vars = self._combine_vars(all_vars, self._group_vars_files[group_name]) + if group.name in self._group_vars_files and group.name != 'all': + all_vars = self._combine_vars(all_vars, self._group_vars_files[group.name]) host_name = host.get_name() if host_name in self._host_vars_files: @@ -228,7 +227,7 @@ def _get_inventory_basename(self, path): ''' (name, ext) = os.path.splitext(os.path.basename(path)) - if ext not in ('yml', 'yaml'): + if ext not in ('.yml', '.yaml'): return os.path.basename(path) else: return name @@ -239,11 +238,11 @@ def _load_inventory_file(self, path, loader): basename of the file without the extension ''' - if os.path.isdir(path): + if loader.is_directory(path): data = dict() try: - names = os.listdir(path) + names = loader.list_directory(path) except os.error, err: raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror)) @@ -270,7 +269,7 @@ def add_host_vars_file(self, path, loader): the extension, for matching against a given inventory host name ''' - if os.path.exists(path): + if loader.path_exists(path): (name, data) = self._load_inventory_file(path, loader) self._host_vars_files[name] = data @@ -281,7 +280,7 @@ def add_group_vars_file(self, path, loader): the extension, for matching against a given inventory host name ''' - if os.path.exists(path): + if loader.path_exists(path): (name, data) = self._load_inventory_file(path, loader) self._group_vars_files[name] = data diff --git a/v2/samples/include.yml b/v2/samples/include.yml index 2ffdc3dd76561d..3a2e88f8985976 100644 --- a/v2/samples/include.yml +++ b/v2/samples/include.yml @@ -1,4 +1,4 @@ - debug: msg="this is the include, a=={{a}}" -- debug: msg="this is the second debug in the include" -- debug: msg="this is the third debug in the include, and a is still {{a}}" +#- debug: msg="this is the second debug in the include" +#- debug: msg="this is the third debug in the include, and a is still {{a}}" diff --git a/v2/samples/localhost_include.yml b/v2/samples/localhost_include.yml new file mode 100644 index 00000000000000..eca8b5716caa5a --- /dev/null +++ b/v2/samples/localhost_include.yml @@ -0,0 +1,3 @@ +- debug: msg="this is the localhost include" +- include: common_include.yml + diff --git a/v2/samples/test_blocks_of_blocks.yml b/v2/samples/test_blocks_of_blocks.yml index 8092a9ad8b3fcd..7933cb61833bf5 100644 --- a/v2/samples/test_blocks_of_blocks.yml +++ b/v2/samples/test_blocks_of_blocks.yml @@ -6,3 +6,8 @@ - block: - block: - debug: msg="are we there yet?" + always: + - debug: msg="a random always block" + - fail: + rescue: + - debug: msg="rescuing from the fail" diff --git a/v2/samples/test_include.yml b/v2/samples/test_include.yml index c81e5ecd5a951d..60befd9911d50d 100644 --- a/v2/samples/test_include.yml +++ b/v2/samples/test_include.yml @@ -19,7 +19,7 @@ always: - include: include.yml a=always - handlers: + #handlers: #- name: foo # include: include.yml a="this is a handler" diff --git a/v2/test/mock/loader.py b/v2/test/mock/loader.py index b79dfa509db405..cf9d7ea72d0fe0 100644 --- a/v2/test/mock/loader.py +++ b/v2/test/mock/loader.py @@ -47,6 +47,9 @@ def is_file(self, path): def is_directory(self, path): return path in self._known_directories + def list_directory(self, path): + return [x for x in self._known_directories] + def _add_known_directory(self, directory): if directory not in self._known_directories: self._known_directories.append(directory) diff --git a/v2/test/playbook/test_block.py b/v2/test/playbook/test_block.py index 9c1d06cbcb8c1a..348681527bb7b4 100644 --- a/v2/test/playbook/test_block.py +++ b/v2/test/playbook/test_block.py @@ -75,9 +75,3 @@ def test_load_implicit_block(self): self.assertEqual(len(b.block), 1) assert isinstance(b.block[0], Task) - def test_block_compile(self): - ds = [dict(action='foo')] - b = Block.load(ds) - tasks = b.compile() - self.assertEqual(len(tasks), 1) - self.assertIsInstance(tasks[0], Task) diff --git a/v2/test/playbook/test_playbook.py b/v2/test/playbook/test_playbook.py index f3ba6785f3f59a..1e72421818be6e 100644 --- a/v2/test/playbook/test_playbook.py +++ b/v2/test/playbook/test_playbook.py @@ -24,6 +24,7 @@ from ansible.errors import AnsibleError, AnsibleParserError from ansible.playbook import Playbook +from ansible.vars import VariableManager from test.mock.loader import DictDataLoader @@ -36,7 +37,8 @@ def tearDown(self): pass def test_empty_playbook(self): - p = Playbook() + fake_loader = DictDataLoader({}) + p = Playbook(loader=fake_loader) def test_basic_playbook(self): fake_loader = DictDataLoader({ @@ -61,6 +63,7 @@ def test_bad_playbook_files(self): """, }) - self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", fake_loader) - self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", fake_loader) + vm = VariableManager() + self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader) + self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader) diff --git a/v2/test/playbook/test_task_include.py b/v2/test/playbook/test_task_include.py deleted file mode 100644 index 55f7461f050a2a..00000000000000 --- a/v2/test/playbook/test_task_include.py +++ /dev/null @@ -1,64 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.compat.tests import unittest -from ansible.errors import AnsibleParserError -from ansible.parsing.yaml.objects import AnsibleMapping -from ansible.playbook.task_include import TaskInclude - -from test.mock.loader import DictDataLoader - -class TestTaskInclude(unittest.TestCase): - - def setUp(self): - self._fake_loader = DictDataLoader({ - "foo.yml": """ - - shell: echo "hello world" - """ - }) - - pass - - def tearDown(self): - pass - - def test_empty_task_include(self): - ti = TaskInclude() - - def test_basic_task_include(self): - ti = TaskInclude.load(AnsibleMapping(include='foo.yml'), loader=self._fake_loader) - tasks = ti.compile() - - def test_task_include_with_loop(self): - ti = TaskInclude.load(AnsibleMapping(include='foo.yml', with_items=['a', 'b', 'c']), loader=self._fake_loader) - - def test_task_include_with_conditional(self): - ti = TaskInclude.load(AnsibleMapping(include='foo.yml', when="1 == 1"), loader=self._fake_loader) - - def test_task_include_with_tags(self): - ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags="foo"), loader=self._fake_loader) - ti = TaskInclude.load(AnsibleMapping(include='foo.yml', tags=["foo", "bar"]), loader=self._fake_loader) - - def test_task_include_errors(self): - self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include=''), loader=self._fake_loader) - self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml', vars="1"), loader=self._fake_loader) - self.assertRaises(AnsibleParserError, TaskInclude.load, AnsibleMapping(include='foo.yml a=1', vars=dict(b=2)), loader=self._fake_loader) - diff --git a/v2/test/vars/test_variable_manager.py b/v2/test/vars/test_variable_manager.py index 63a80a7a1c5f59..f8d815eb6f78a7 100644 --- a/v2/test/vars/test_variable_manager.py +++ b/v2/test/vars/test_variable_manager.py @@ -35,8 +35,10 @@ def tearDown(self): pass def test_basic_manager(self): + fake_loader = DictDataLoader({}) + v = VariableManager() - self.assertEqual(v.get_vars(), dict()) + self.assertEqual(v.get_vars(loader=fake_loader), dict()) self.assertEqual( v._merge_dicts( @@ -52,23 +54,26 @@ def test_basic_manager(self): ) - def test_manager_extra_vars(self): + def test_variable_manager_extra_vars(self): + fake_loader = DictDataLoader({}) + extra_vars = dict(a=1, b=2, c=3) v = VariableManager() v.set_extra_vars(extra_vars) - self.assertEqual(v.get_vars(), extra_vars) - self.assertIsNot(v.extra_vars, extra_vars) + for (key, val) in extra_vars.iteritems(): + self.assertEqual(v.get_vars(loader=fake_loader).get(key), val) + self.assertIsNot(v.extra_vars.get(key), val) - def test_manager_host_vars_file(self): + def test_variable_manager_host_vars_file(self): fake_loader = DictDataLoader({ "host_vars/hostname1.yml": """ foo: bar """ }) - v = VariableManager(loader=fake_loader) - v.add_host_vars_file("host_vars/hostname1.yml") + v = VariableManager() + v.add_host_vars_file("host_vars/hostname1.yml", loader=fake_loader) self.assertIn("hostname1", v._host_vars_files) self.assertEqual(v._host_vars_files["hostname1"], dict(foo="bar")) @@ -77,37 +82,43 @@ def test_manager_host_vars_file(self): mock_host.get_vars.return_value = dict() mock_host.get_groups.return_value = () - self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar")) + self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar") - def test_manager_group_vars_file(self): + def test_variable_manager_group_vars_file(self): fake_loader = DictDataLoader({ "group_vars/somegroup.yml": """ foo: bar """ }) - v = VariableManager(loader=fake_loader) - v.add_group_vars_file("group_vars/somegroup.yml") + v = VariableManager() + v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader) self.assertIn("somegroup", v._group_vars_files) self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar")) + mock_group = MagicMock() + mock_group.name.return_value = "somegroup" + mock_group.get_ancestors.return_value = () + mock_host = MagicMock() mock_host.get_name.return_value = "hostname1" mock_host.get_vars.return_value = dict() - mock_host.get_groups.return_value = ["somegroup"] + mock_host.get_groups.return_value = (mock_group) + + self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar") - self.assertEqual(v.get_vars(host=mock_host), dict(foo="bar")) + def test_variable_manager_play_vars(self): + fake_loader = DictDataLoader({}) - def test_manager_play_vars(self): mock_play = MagicMock() mock_play.get_vars.return_value = dict(foo="bar") mock_play.get_roles.return_value = [] mock_play.get_vars_files.return_value = [] v = VariableManager() - self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar")) + self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar") - def test_manager_play_vars_files(self): + def test_variable_manager_play_vars_files(self): fake_loader = DictDataLoader({ "/path/to/somefile.yml": """ foo: bar @@ -119,13 +130,15 @@ def test_manager_play_vars_files(self): mock_play.get_roles.return_value = [] mock_play.get_vars_files.return_value = ['/path/to/somefile.yml'] - v = VariableManager(loader=fake_loader) - self.assertEqual(v.get_vars(play=mock_play), dict(foo="bar")) + v = VariableManager() + self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar") + + def test_variable_manager_task_vars(self): + fake_loader = DictDataLoader({}) - def test_manager_task_vars(self): mock_task = MagicMock() mock_task.get_vars.return_value = dict(foo="bar") v = VariableManager() - self.assertEqual(v.get_vars(task=mock_task), dict(foo="bar")) + self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task).get("foo"), "bar") From 34aba2dd9a18d8e2cea5c8cdb7eb70b5f9fc0bbd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 2 Apr 2015 11:26:42 -0500 Subject: [PATCH 0224/3617] Fixing dupe option for -u in v2 --- v2/ansible/utils/cli.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index 20998cb43fa504..6500234c74125e 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -70,8 +70,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, default=None) parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) - parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', - help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) if subset_opts: parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', From 811a906332eed12e9d3d976032341a6912b56247 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 2 Apr 2015 11:54:45 -0500 Subject: [PATCH 0225/3617] Fixing the synchronize action plugin for v2 --- v2/ansible/executor/task_executor.py | 22 +++++++++ v2/ansible/plugins/action/synchronize.py | 57 ++++++++++-------------- 2 files changed, 46 insertions(+), 33 deletions(-) diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 6d19349ba4dbd0..256d26f8dcf843 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -73,7 +73,29 @@ def run(self): if items is not None: if len(items) > 0: item_results = self._run_loop(items) + + # loop through the item results, and remember the changed/failed + # result flags based on any item there. + changed = False + failed = False + for item in item_results: + if 'changed' in item: + changed = True + if 'failed' in item: + failed = True + + # create the overall result item, and set the changed/failed + # flags there to reflect the overall result of the loop res = dict(results=item_results) + + if changed: + res['changed'] = True + + if failed: + res['failed'] = True + res['msg'] = 'One or more items failed' + else: + res['msg'] = 'All items completed' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: diff --git a/v2/ansible/plugins/action/synchronize.py b/v2/ansible/plugins/action/synchronize.py index 298d6a19599399..81e335b0098414 100644 --- a/v2/ansible/plugins/action/synchronize.py +++ b/v2/ansible/plugins/action/synchronize.py @@ -23,20 +23,18 @@ class ActionModule(ActionBase): - def _get_absolute_path(self, path, task_vars): - if 'vars' in task_vars: - if '_original_file' in task_vars['vars']: - # roles - original_path = path - path = self._loader.path_dwim_relative(task_vars['_original_file'], 'files', path, self.runner.basedir) - if original_path and original_path[-1] == '/' and path[-1] != '/': - # make sure the dwim'd path ends in a trailing "/" - # if the original path did - path += '/' + def _get_absolute_path(self, path): + if self._task._role is not None: + original_path = path + path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path) + if original_path and original_path[-1] == '/' and path[-1] != '/': + # make sure the dwim'd path ends in a trailing "/" + # if the original path did + path += '/' return path - def _process_origin(self, host, path, user, task_vars): + def _process_origin(self, host, path, user): if not host in ['127.0.0.1', 'localhost']: if user: @@ -46,10 +44,10 @@ def _process_origin(self, host, path, user, task_vars): else: if not ':' in path: if not path.startswith('/'): - path = self._get_absolute_path(path=path, task_vars=task_vars) + path = self._get_absolute_path(path=path) return path - def _process_remote(self, host, path, user, task_vars): + def _process_remote(self, host, task, path, user): transport = self._connection_info.connection return_data = None if not host in ['127.0.0.1', 'localhost'] or transport != "local": @@ -62,7 +60,7 @@ def _process_remote(self, host, path, user, task_vars): if not ':' in return_data: if not return_data.startswith('/'): - return_data = self._get_absolute_path(path=return_data, task_vars=task_vars) + return_data = self._get_absolute_path(path=return_data) return return_data @@ -76,7 +74,7 @@ def run(self, tmp=None, task_vars=dict()): # IF original transport is not local, override transport and disable sudo. if original_transport != 'local': task_vars['ansible_connection'] = 'local' - self.transport_overridden = True + transport_overridden = True self.runner.sudo = False src = self._task.args.get('src', None) @@ -90,8 +88,6 @@ def run(self, tmp=None, task_vars=dict()): dest_host = task_vars.get('ansible_ssh_host', task_vars.get('inventory_hostname')) # allow ansible_ssh_host to be templated - # FIXME: does this still need to be templated? - #dest_host = template.template(self.runner.basedir, dest_host, task_vars, fail_on_undefined=True) dest_is_local = dest_host in ['127.0.0.1', 'localhost'] # CHECK FOR NON-DEFAULT SSH PORT @@ -113,13 +109,13 @@ def run(self, tmp=None, task_vars=dict()): # FIXME: not sure if this is in connection info yet or not... #if conn.delegate != conn.host: # if 'hostvars' in task_vars: - # if conn.delegate in task_vars['hostvars'] and self.original_transport != 'local': + # if conn.delegate in task_vars['hostvars'] and original_transport != 'local': # # use a delegate host instead of localhost # use_delegate = True # COMPARE DELEGATE, HOST AND TRANSPORT process_args = False - if not dest_host is src_host and self.original_transport != 'local': + if not dest_host is src_host and original_transport != 'local': # interpret and task_vars remote host info into src or dest process_args = True @@ -127,7 +123,7 @@ def run(self, tmp=None, task_vars=dict()): if process_args or use_delegate: user = None - if boolean(options.get('set_remote_user', 'yes')): + if boolean(task_vars.get('set_remote_user', 'yes')): if use_delegate: user = task_vars['hostvars'][conn.delegate].get('ansible_ssh_user') @@ -146,31 +142,26 @@ def run(self, tmp=None, task_vars=dict()): # use the mode to define src and dest's url if self._task.args.get('mode', 'push') == 'pull': # src is a remote path: @, dest is a local path - src = self._process_remote(src_host, src, user, task_vars) - dest = self._process_origin(dest_host, dest, user, task_vars) + src = self._process_remote(src_host, src, user) + dest = self._process_origin(dest_host, dest, user) else: # src is a local path, dest is a remote path: @ - src = self._process_origin(src_host, src, user, task_vars) - dest = self._process_remote(dest_host, dest, user, task_vars) + src = self._process_origin(src_host, src, user) + dest = self._process_remote(dest_host, dest, user) # Allow custom rsync path argument. rsync_path = self._task.args.get('rsync_path', None) # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument. - if not rsync_path and self.transport_overridden and self._connection_info.sudo and not dest_is_local: - self._task.args['rsync_path'] = 'sudo rsync' + if not rsync_path and transport_overridden and self._connection_info.become and self._connection_info.become_method == 'sudo' and not dest_is_local: + rsync_path = 'sudo rsync' # make sure rsync path is quoted. if rsync_path: - rsync_path = '"%s"' % rsync_path - - # FIXME: noop stuff still needs to be figured out - #module_args = "" - #if self.runner.noop_on_check(task_vars): - # module_args = "CHECKMODE=True" + self._task.args['rsync_path'] = '"%s"' % rsync_path # run the module and store the result - result = self.runner._execute_module('synchronize', module_args=, complex_args=options, task_vars=task_vars) + result = self._execute_module('synchronize') return result From bfae708bbf70a7e9bf1eda5c5983368fed5c9420 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Apr 2015 16:25:37 -0700 Subject: [PATCH 0226/3617] Port v2 to the PyYAML C extension --- v2/ansible/parsing/__init__.py | 22 ++++++++++++--- v2/ansible/parsing/yaml/constructor.py | 36 ++++++++++++------------ v2/ansible/parsing/yaml/loader.py | 38 ++++++++++++++++++-------- 3 files changed, 61 insertions(+), 35 deletions(-) diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index 31a97af5089179..bce5b2b667824a 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -29,7 +29,7 @@ from ansible.parsing.vault import VaultLib from ansible.parsing.splitter import unquote from ansible.parsing.yaml.loader import AnsibleLoader -from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode from ansible.utils.path import unfrackpath class DataLoader(): @@ -70,13 +70,27 @@ def load(self, data, file_name='', show_content=True): # we first try to load this data as JSON return json.loads(data) except: + # if loading JSON failed for any reason, we go ahead + # and try to parse it as YAML instead + + if isinstance(data, AnsibleUnicode): + # The PyYAML's libyaml bindings use PyUnicode_CheckExact so + # they are unable to cope with our subclass. + # Unwrap and re-wrap the unicode so we can keep track of line + # numbers + new_data = unicode(data) + else: + new_data = data try: - # if loading JSON failed for any reason, we go ahead - # and try to parse it as YAML instead - return self._safe_load(data, file_name=file_name) + new_data = self._safe_load(new_data, file_name=file_name) except YAMLError as yaml_exc: self._handle_error(yaml_exc, file_name, show_content) + if isinstance(data, AnsibleUnicode): + new_data = AnsibleUnicode(new_data) + new_data.ansible_pos = data.ansible_pos + return new_data + def load_from_file(self, file_name): ''' Loads data from a file, which can contain either JSON or YAML. ''' diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index 0043b8a2f044d5..aed2553c05b9af 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -20,7 +20,6 @@ __metaclass__ = type from yaml.constructor import Constructor -from ansible.utils.unicode import to_unicode from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleUnicode class AnsibleConstructor(Constructor): @@ -33,20 +32,11 @@ def construct_yaml_map(self, node): yield data value = self.construct_mapping(node) data.update(value) - data.ansible_pos = value.ansible_pos + data.ansible_pos = self._node_position_info(node) def construct_mapping(self, node, deep=False): ret = AnsibleMapping(super(Constructor, self).construct_mapping(node, deep)) - - # in some cases, we may have pre-read the data and then - # passed it to the load() call for YAML, in which case we - # want to override the default datasource (which would be - # '') to the actual filename we read in - if self._ansible_file_name: - data_source = self._ansible_file_name - else: - data_source = node.__datasource__ - ret.ansible_pos = (data_source, node.__line__, node.__column__) + ret.ansible_pos = self._node_position_info(node) return ret @@ -54,17 +44,25 @@ def construct_yaml_str(self, node): # Override the default string handling function # to always return unicode objects value = self.construct_scalar(node) - value = to_unicode(value) - ret = AnsibleUnicode(self.construct_scalar(node)) + ret = AnsibleUnicode(value) - if self._ansible_file_name: - data_source = self._ansible_file_name - else: - data_source = node.__datasource__ - ret.ansible_pos = (data_source, node.__line__, node.__column__) + ret.ansible_pos = self._node_position_info(node) return ret + def _node_position_info(self, node): + # the line number where the previous token has ended (plus empty lines) + column = node.start_mark.column + 1 + line = node.start_mark.line + 1 + + # in some cases, we may have pre-read the data and then + # passed it to the load() call for YAML, in which case we + # want to override the default datasource (which would be + # '') to the actual filename we read in + datasource = self._ansible_file_name or node.start_mark.name + + return (datasource, line, column) + AnsibleConstructor.add_constructor( u'tag:yaml.org,2002:map', AnsibleConstructor.construct_yaml_map) diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py index 0d1300781901b7..4e0049ed2a8f3e 100644 --- a/v2/ansible/parsing/yaml/loader.py +++ b/v2/ansible/parsing/yaml/loader.py @@ -19,20 +19,34 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from yaml.reader import Reader -from yaml.scanner import Scanner -from yaml.parser import Parser +try: + from _yaml import CParser, CEmitter + HAVE_PYYAML_C = True +except ImportError: + HAVE_PYYAML_C = False + from yaml.resolver import Resolver -from ansible.parsing.yaml.composer import AnsibleComposer from ansible.parsing.yaml.constructor import AnsibleConstructor -class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver): - def __init__(self, stream, file_name=None): - Reader.__init__(self, stream) - Scanner.__init__(self) - Parser.__init__(self) - AnsibleComposer.__init__(self) - AnsibleConstructor.__init__(self, file_name=file_name) - Resolver.__init__(self) +if HAVE_PYYAML_C: + class AnsibleLoader(CParser, AnsibleConstructor, Resolver): + def __init__(self, stream, file_name=None): + CParser.__init__(self, stream) + AnsibleConstructor.__init__(self, file_name=file_name) + Resolver.__init__(self) +else: + from yaml.reader import Reader + from yaml.scanner import Scanner + from yaml.parser import Parser + + from ansible.parsing.yaml.composer import AnsibleComposer + class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver): + def __init__(self, stream, file_name=None): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + AnsibleComposer.__init__(self) + AnsibleConstructor.__init__(self, file_name=file_name) + Resolver.__init__(self) From ac6b7045dbc45b7d6f42bf46a2df3a6c9a8c1aaf Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 2 Apr 2015 11:09:44 -0700 Subject: [PATCH 0227/3617] A little py3 compat, side effect of making this work under profile --- v2/bin/ansible-playbook | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index f1b590958b343b..8e80966ed7fe19 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import print_function import os import stat @@ -19,7 +20,8 @@ from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager # Implement an ansible.utils.warning() function later -warning = getattr(__builtins__, 'print') +def warning(*args, **kwargs): + print(*args, **kwargs) #--------------------------------------------------------------------------------------------------- @@ -136,10 +138,10 @@ if __name__ == "__main__": sys.exit(main(sys.argv[1:])) except AnsibleError, e: #display("ERROR: %s" % e, color='red', stderr=True) - print e + print(e) sys.exit(1) except KeyboardInterrupt, ke: #display("ERROR: interrupted", color='red', stderr=True) - print "keyboard interrupt" + print("keyboard interrupt") sys.exit(1) From d277c6b82187a8cdbb23fec4467a00069681c646 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 2 Apr 2015 11:38:37 -0700 Subject: [PATCH 0228/3617] Few more py3 cleanups --- v2/bin/ansible-playbook | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 8e80966ed7fe19..d9771249794fd0 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -1,5 +1,6 @@ #!/usr/bin/env python -from __future__ import print_function +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import stat @@ -136,11 +137,11 @@ if __name__ == "__main__": #display(" ", log_only=True) try: sys.exit(main(sys.argv[1:])) - except AnsibleError, e: + except AnsibleError as e: #display("ERROR: %s" % e, color='red', stderr=True) print(e) sys.exit(1) - except KeyboardInterrupt, ke: + except KeyboardInterrupt: #display("ERROR: interrupted", color='red', stderr=True) print("keyboard interrupt") sys.exit(1) From 369bf0d214095fd02614702ecf25ebc0cb712f98 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 2 Apr 2015 12:35:50 -0700 Subject: [PATCH 0229/3617] No longer need AnsibleComposer --- v2/ansible/parsing/yaml/composer.py | 38 ----------------------------- v2/ansible/parsing/yaml/loader.py | 7 +++--- 2 files changed, 3 insertions(+), 42 deletions(-) delete mode 100644 v2/ansible/parsing/yaml/composer.py diff --git a/v2/ansible/parsing/yaml/composer.py b/v2/ansible/parsing/yaml/composer.py deleted file mode 100644 index 6bdee92fc38180..00000000000000 --- a/v2/ansible/parsing/yaml/composer.py +++ /dev/null @@ -1,38 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from yaml.composer import Composer -from yaml.nodes import MappingNode, ScalarNode - -class AnsibleComposer(Composer): - def __init__(self): - super(Composer, self).__init__() - - def compose_node(self, parent, index): - # the line number where the previous token has ended (plus empty lines) - node = Composer.compose_node(self, parent, index) - if isinstance(node, (ScalarNode, MappingNode)): - node.__datasource__ = self.name - node.__line__ = self.line - node.__column__ = node.start_mark.column + 1 - node.__line__ = node.start_mark.line + 1 - - return node diff --git a/v2/ansible/parsing/yaml/loader.py b/v2/ansible/parsing/yaml/loader.py index 4e0049ed2a8f3e..e8547ff0d141fe 100644 --- a/v2/ansible/parsing/yaml/loader.py +++ b/v2/ansible/parsing/yaml/loader.py @@ -36,17 +36,16 @@ def __init__(self, stream, file_name=None): AnsibleConstructor.__init__(self, file_name=file_name) Resolver.__init__(self) else: + from yaml.composer import Composer from yaml.reader import Reader from yaml.scanner import Scanner from yaml.parser import Parser - from ansible.parsing.yaml.composer import AnsibleComposer - - class AnsibleLoader(Reader, Scanner, Parser, AnsibleComposer, AnsibleConstructor, Resolver): + class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver): def __init__(self, stream, file_name=None): Reader.__init__(self, stream) Scanner.__init__(self) Parser.__init__(self) - AnsibleComposer.__init__(self) + Composer.__init__(self) AnsibleConstructor.__init__(self, file_name=file_name) Resolver.__init__(self) From 2cddb093f5b245474514c2137684d67a37fde1e7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 2 Apr 2015 12:37:02 -0700 Subject: [PATCH 0230/3617] Add AnsibleList for keeping track of line numbers in lists parsed from yaml --- v2/ansible/parsing/yaml/constructor.py | 11 ++++++++++- v2/ansible/parsing/yaml/objects.py | 4 ++++ v2/test/parsing/yaml/test_loader.py | 18 +++++++++++++----- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index aed2553c05b9af..97f9c71ef8bd44 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -20,7 +20,7 @@ __metaclass__ = type from yaml.constructor import Constructor -from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleUnicode +from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode class AnsibleConstructor(Constructor): def __init__(self, file_name=None): @@ -50,6 +50,12 @@ def construct_yaml_str(self, node): return ret + def construct_yaml_seq(self, node): + data = AnsibleSequence() + yield data + data.extend(self.construct_sequence(node)) + data.ansible_pos = self._node_position_info(node) + def _node_position_info(self, node): # the line number where the previous token has ended (plus empty lines) column = node.start_mark.column + 1 @@ -79,3 +85,6 @@ def _node_position_info(self, node): u'tag:yaml.org,2002:python/unicode', AnsibleConstructor.construct_yaml_str) +AnsibleConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + AnsibleConstructor.construct_yaml_seq) diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index 15850dd4f8749d..fe37eaab94a8df 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -50,3 +50,7 @@ class AnsibleMapping(AnsibleBaseYAMLObject, dict): class AnsibleUnicode(AnsibleBaseYAMLObject, unicode): ''' sub class for unicode objects ''' pass + +class AnsibleSequence(AnsibleBaseYAMLObject, list): + ''' sub class for lists ''' + pass diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py index f9144fb2925400..4c56962610099d 100644 --- a/v2/test/parsing/yaml/test_loader.py +++ b/v2/test/parsing/yaml/test_loader.py @@ -95,7 +95,11 @@ def test_parse_list(self): self.assertEqual(data, [u'a', u'b']) self.assertEqual(len(data), 2) self.assertIsInstance(data[0], unicode) - # No line/column info saved yet + + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) + + self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19)) + self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19)) class TestAnsibleLoaderPlay(unittest.TestCase): @@ -184,12 +188,17 @@ def check_vars(self): self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29)) self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34)) + self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23)) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32)) self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28)) - # Lists don't yet have line/col information - #self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 10, 21)) + self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 11, 23)) + self.assertEqual(self.data[0][u'vars'][u'list'][0].ansible_pos, (self.play_filename, 11, 25)) + self.assertEqual(self.data[0][u'vars'][u'list'][1].ansible_pos, (self.play_filename, 12, 25)) + # Numbers don't have line/col info yet + #self.assertEqual(self.data[0][u'vars'][u'list'][2].ansible_pos, (self.play_filename, 13, 25)) + #self.assertEqual(self.data[0][u'vars'][u'list'][3].ansible_pos, (self.play_filename, 14, 25)) def check_tasks(self): # @@ -224,7 +233,6 @@ def test_line_numbers(self): self.check_vars() - # Lists don't yet have line/col info - #self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 17, 28)) + self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 16, 21)) self.check_tasks() From 5808b68d35e19762b34cc8aad1557fa2f482381b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 2 Apr 2015 12:41:30 -0700 Subject: [PATCH 0231/3617] Update module pointers --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- v2/ansible/modules/extras | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 613961c592ed23..04c34cfa02185a 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 613961c592ed23ded2d7e3771ad45b01de5a95f3 +Subproject commit 04c34cfa02185a8d74165f5bdc96371ec6df37a8 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index eb04e45311683d..21fce8ac730346 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit eb04e45311683dba1d54c8e5db293a2d3877eb68 +Subproject commit 21fce8ac730346b4e77427e3582553f2dc93c675 diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras index 46e316a20a92b5..21fce8ac730346 160000 --- a/v2/ansible/modules/extras +++ b/v2/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 46e316a20a92b5a54b982eddb301eb3d57da397e +Subproject commit 21fce8ac730346b4e77427e3582553f2dc93c675 From fa076591c97ea922fef16495d9e9be46b39a7ad8 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 2 Apr 2015 15:30:37 -0500 Subject: [PATCH 0232/3617] Don't recommend installing ansible via homebrew --- docsite/rst/intro_installation.rst | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index bad6ea068eff07..4a4504388a56fe 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -242,17 +242,14 @@ You may also wish to install from ports, run: $ sudo make -C /usr/ports/sysutils/ansible install -.. _from_brew: +.. _on_macos: -Latest Releases Via Homebrew (Mac OSX) +Latest Releases on Mac OSX ++++++++++++++++++++++++++++++++++++++ -To install on a Mac, make sure you have Homebrew, then run: +The preferred way to install ansible on a Mac is via pip. -.. code-block:: bash - - $ brew update - $ brew install ansible +The instructions can be found in `Latest Releases Via Pip`_ section. .. _from_pkgutil: From 469a1250b6a487fbe9f1df35a9cf02a3292518cd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 2 Apr 2015 16:21:45 -0500 Subject: [PATCH 0233/3617] Moving new patch action plugin over to v2 --- v2/ansible/plugins/action/patch.py | 66 ++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 v2/ansible/plugins/action/patch.py diff --git a/v2/ansible/plugins/action/patch.py b/v2/ansible/plugins/action/patch.py new file mode 100644 index 00000000000000..717cc359f4e806 --- /dev/null +++ b/v2/ansible/plugins/action/patch.py @@ -0,0 +1,66 @@ +# (c) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.plugins.action import ActionBase +from ansible.utils.boolean import boolean + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=dict()): + + src = self._task.args.get('src', None) + dest = self._task.args.get('dest', None) + remote_src = boolean(self._task.args.get('remote_src', 'no')) + + if src is None: + return dict(failed=True, msg="src is required") + elif remote_src: + # everyting is remote, so we just execute the module + # without changing any of the module arguments + return self._execute_module() + + if self._task._role is not None: + src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src) + else: + src = self._loader.path_dwim(src) + + # create the remote tmp dir if needed, and put the source file there + if tmp is None or "-tmp-" not in tmp: + tmp = self._make_tmp_path() + + tmp_src = self._shell.join_path(tmp, os.path.basename(src)) + self._connection.put_file(src, tmp_src) + + if self._connection_info.become and self._connection_info.become_user != 'root': + # FIXME: noop stuff here + #if not self.runner.noop_on_check(inject): + # self._remote_chmod('a+r', tmp_src, tmp) + self._remote_chmod('a+r', tmp_src, tmp) + + new_module_args = self._task.args.copy() + new_module_args.update( + dict( + src=tmp_src, + ) + ) + + return self._execute_module('patch', module_args=new_module_args) From 92e400eb6d8063711e090722b9a2e3bd0bd39c43 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 2 Apr 2015 21:08:17 -0400 Subject: [PATCH 0234/3617] fixed minor issues with openstack docs not being valid yaml --- lib/ansible/utils/module_docs_fragments/openstack.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index 519ad785b9b9dc..f989b3dcb80f8e 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -24,7 +24,7 @@ class ModuleDocFragment(object): cloud: description: - Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin) - required: false + required: false auth: description: - Dictionary containing auth information as needed by the cloud's auth @@ -87,12 +87,11 @@ class ModuleDocFragment(object): required: false endpoint_type: description: - - Endpoint URL type to fetch from the service catalog. + - Endpoint URL type to fetch from the service catalog. choices: [public, internal, admin] required: false default: public -requirements: - - shade +requirements: [shade] notes: - The standard OpenStack environment variables, such as C(OS_USERNAME) may be user instead of providing explicit values. From 7a81167b0697ad261c5b98f5b31c2c5842a96ad8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 2 Apr 2015 23:59:48 -0400 Subject: [PATCH 0235/3617] brought v2 find plugins up 2 date with v1, also added exception handling for whne there is a permissions issue --- v2/ansible/plugins/__init__.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index bf074b78978ca2..7da575162ad3a7 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -26,6 +26,7 @@ import glob import imp from ansible import constants as C +from ansible.utils import warnings from ansible import errors MODULE_CACHE = {} @@ -160,17 +161,14 @@ def add_directory(self, directory, with_subdir=False): self._extra_dirs.append(directory) self._paths = None - def find_plugin(self, name, suffixes=None, transport=''): + def find_plugin(self, name, suffixes=None): ''' Find a plugin named name ''' if not suffixes: if self.class_name: suffixes = ['.py'] else: - if transport == 'winrm': - suffixes = ['.ps1', ''] - else: - suffixes = ['.py', ''] + suffixes = ['.py', ''] potential_names = frozenset('%s%s' % (name, s) for s in suffixes) for full_name in potential_names: @@ -180,18 +178,21 @@ def find_plugin(self, name, suffixes=None, transport=''): found = None for path in [p for p in self._get_paths() if p not in self._searched_paths]: if os.path.isdir(path): - for potential_file in os.listdir(path): + try: + full_paths = (os.path.join(path, f) for f in os.listdir(path)) + except OSError,e: + warnings("Error accessing plugin paths: %s" % str(e)) + for full_path in (f for f in full_paths if os.path.isfile(f)): for suffix in suffixes: - if potential_file.endswith(suffix): - full_path = os.path.join(path, potential_file) + if full_path.endswith(suffix): full_name = os.path.basename(full_path) break else: # Yes, this is a for-else: http://bit.ly/1ElPkyg continue - + if full_name not in self._plugin_path_cache: self._plugin_path_cache[full_name] = full_path - + self._searched_paths.add(path) for full_name in potential_names: if full_name in self._plugin_path_cache: From 25f071b64c11a2142723fa698adba46e297fcbe7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 00:01:32 -0400 Subject: [PATCH 0236/3617] fixed called to find plugin, transport is not needed as suffixes are passed --- v2/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index e56003021588bf..2d258dd5250a9b 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -83,7 +83,7 @@ def _configure_module(self, module_name, module_args): # Search module path(s) for named module. module_suffixes = getattr(self._connection, 'default_suffixes', None) - module_path = self._module_loader.find_plugin(module_name, module_suffixes, transport=self._connection.get_transport()) + module_path = self._module_loader.find_plugin(module_name, module_suffixes) if module_path is None: module_path2 = self._module_loader.find_plugin('ping', module_suffixes) if module_path2 is not None: From 0f8bc038ec57ab93dddb4a748b38f4c054acc6e3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 00:25:09 -0400 Subject: [PATCH 0237/3617] changed to use display as utils.warning doesnt exist in v2 --- v2/ansible/plugins/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index 7da575162ad3a7..a55059f1b7b7bc 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -26,7 +26,7 @@ import glob import imp from ansible import constants as C -from ansible.utils import warnings +from ansible.utils.display import Display from ansible import errors MODULE_CACHE = {} @@ -181,7 +181,8 @@ def find_plugin(self, name, suffixes=None): try: full_paths = (os.path.join(path, f) for f in os.listdir(path)) except OSError,e: - warnings("Error accessing plugin paths: %s" % str(e)) + d = Display() + d.warning("Error accessing plugin paths: %s" % str(e)) for full_path in (f for f in full_paths if os.path.isfile(f)): for suffix in suffixes: if full_path.endswith(suffix): From 2ade17e2f5b9ac48f3e4330617a64adbd04adca4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 04:50:44 -0400 Subject: [PATCH 0238/3617] v2 changed empty inventory to warning that only localhost is available --- v2/bin/ansible | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/v2/bin/ansible b/v2/bin/ansible index 8eb5c97a6f5568..2b2df3df8f2d84 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -29,6 +29,7 @@ from ansible.inventory import Inventory from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play +from ansible.utils.display import Display from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager @@ -98,7 +99,8 @@ class Cli(object): hosts = inventory.list_hosts(pattern) if len(hosts) == 0: - raise AnsibleError("provided hosts list is empty") + d = Display() + d.warning("provided hosts list is empty, only localhost is available") if options.listhosts: for host in hosts: From 20b4492704450c11036476b8ab651fe57e97b11c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 04:51:16 -0400 Subject: [PATCH 0239/3617] started implementing 'list options' --- v2/bin/ansible-playbook | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index d9771249794fd0..3a3793affc6795 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -58,13 +58,16 @@ def main(args): validate_conflicts(parser,options) + # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None becomepass = None vault_pass = None - normalize_become_options(options) - (sshpass, becomepass, vault_pass) = ask_passwords(options) + # don't deal with privilege escalation when we don't need to + if not options.listhosts and not options.listtasks and not options.listtags: + normalize_become_options(options) + (sshpass, becomepass, vault_pass) = ask_passwords(options) if options.vault_password_file: # read vault_pass from a file @@ -109,7 +112,6 @@ def main(args): inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory) variable_manager.set_inventory(inventory) - # Note: slightly wrong, this is written so that implicit localhost # (which is not returned in list_hosts()) is taken into account for # warning if inventory is empty. But it can't be taken into account for # checking if limit doesn't match any hosts. Instead we don't worry about @@ -129,7 +131,18 @@ def main(args): # create the playbook executor, which manages running the plays # via a task queue manager pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=options) - return pbex.run() + + if options.listhosts: + print('TODO: implement') + sys.exit(0) + elif options.listtasks: + print('TODO: implement') + sys.exit(0) + elif options.listtags: + print('TODO: implement') + sys.exit(0) + else: + return pbex.run() if __name__ == "__main__": #display(" ", log_only=True) From e719bf765d49ac7ac14ae056bfe0605756651259 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 09:20:19 -0400 Subject: [PATCH 0240/3617] switched to use cross platform os.sep, added diff output to copy --- v2/ansible/plugins/action/copy.py | 12 +++++------- v2/ansible/plugins/action/fetch.py | 2 +- v2/ansible/plugins/action/template.py | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py index 89c2fde7b3f1e9..ece8b5b11b0973 100644 --- a/v2/ansible/plugins/action/copy.py +++ b/v2/ansible/plugins/action/copy.py @@ -31,7 +31,7 @@ from ansible.utils.boolean import boolean from ansible.utils.hashing import checksum from ansible.utils.unicode import to_bytes - +from ansible.parsing.vault import VaultLib class ActionModule(ActionBase): @@ -55,7 +55,7 @@ def run(self, tmp=None, task_vars=dict()): # Check if the source ends with a "/" source_trailing_slash = False if source: - source_trailing_slash = source.endswith("/") + source_trailing_slash = source.endswith(os.sep) # Define content_tempfile in case we set it after finding content populated. content_tempfile = None @@ -145,6 +145,7 @@ def run(self, tmp=None, task_vars=dict()): dest = self._remote_expand_user(dest, tmp) for source_full, source_rel in source_files: + # Generate a hash of the local file. local_checksum = checksum(source_full) @@ -284,11 +285,8 @@ def run(self, tmp=None, task_vars=dict()): else: result = dict(dest=dest, src=source, changed=changed) - # FIXME: move diffs into the result? - #if len(diffs) == 1: - # return ReturnData(conn=conn, result=result, diff=diffs[0]) - #else: - # return ReturnData(conn=conn, result=result) + if len(diffs) == 1: + result['diff']=diffs[0] return result diff --git a/v2/ansible/plugins/action/fetch.py b/v2/ansible/plugins/action/fetch.py index e63fd88ea5c3c4..7b549f5ecbce48 100644 --- a/v2/ansible/plugins/action/fetch.py +++ b/v2/ansible/plugins/action/fetch.py @@ -82,7 +82,7 @@ def run(self, tmp=None, task_vars=dict()): dest = os.path.expanduser(dest) if flat: - if dest.endswith("/"): + if dest.endswith(os.sep): # if the path ends with "/", we'll use the source filename as the # destination filename base = os.path.basename(source_local) diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py index 1f7a6955a3220b..76b2e78a737d62 100644 --- a/v2/ansible/plugins/action/template.py +++ b/v2/ansible/plugins/action/template.py @@ -91,7 +91,7 @@ def run(self, tmp=None, task_vars=dict()): dest = self._remote_expand_user(dest, tmp) directory_prepended = False - if dest.endswith("/"): # CCTODO: Fix path for Windows hosts. + if dest.endswith(os.sep): directory_prepended = True base = os.path.basename(source) dest = os.path.join(dest, base) From d5eb4df23ee7fd8086eae988a85c42204832777d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 3 Apr 2015 09:42:20 -0500 Subject: [PATCH 0241/3617] Add ability to specify using ssh_args in synchronize --- lib/ansible/runner/action_plugins/synchronize.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py index f8e57ae314e395..fb82194b00a1b2 100644 --- a/lib/ansible/runner/action_plugins/synchronize.py +++ b/lib/ansible/runner/action_plugins/synchronize.py @@ -19,6 +19,7 @@ import os.path from ansible import utils +from ansible import constants from ansible.runner.return_data import ReturnData import ansible.utils.template as template @@ -104,9 +105,11 @@ def run(self, conn, tmp, module_name, module_args, src = options.get('src', None) dest = options.get('dest', None) + use_ssh_args = options.pop('use_ssh_args', None) src = template.template(self.runner.basedir, src, inject) dest = template.template(self.runner.basedir, dest, inject) + use_ssh_args = template.template(self.runner.basedir, use_ssh_args, inject) try: options['local_rsync_path'] = inject['ansible_rsync_path'] @@ -187,6 +190,8 @@ def run(self, conn, tmp, module_name, module_args, options['dest'] = dest if 'mode' in options: del options['mode'] + if use_ssh_args: + options['ssh_args'] = constants.ANSIBLE_SSH_ARGS # Allow custom rsync path argument. rsync_path = options.get('rsync_path', None) From ada86dafaf5b4ee7f5d5b6cb203f982bcb1f9d19 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 13:02:42 -0400 Subject: [PATCH 0242/3617] added listhosts draft fixed assert from list to new yaml ansible object taskqueue is now None when just listing --- v2/ansible/executor/playbook_executor.py | 95 +++++++++++++++--------- v2/ansible/playbook/helpers.py | 8 +- v2/bin/ansible-playbook | 7 +- 3 files changed, 68 insertions(+), 42 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 324e6b01af9dfb..64f3f676210711 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -43,7 +43,10 @@ def __init__(self, playbooks, inventory, variable_manager, loader, options): self._loader = loader self._options = options - self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options) + if options.listhosts or options.listtasks or options.listtags: + self._tqm = None + else: + self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options) def run(self): @@ -58,7 +61,7 @@ def run(self): try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) - + # FIXME: playbook entries are just plays, so we should rename them for play in pb.get_entries(): self._inventory.remove_restriction() @@ -83,43 +86,40 @@ def run(self): break if result != 0: - # FIXME: do something here, to signify the playbook execution failed - self._cleanup() - return result - except: + raise AnsibleError("Play failed!: %d" % result) + finally: self._cleanup() - raise - self._cleanup() - - # FIXME: this stat summary stuff should be cleaned up and moved - # to a new method, if it even belongs here... - self._tqm._display.banner("PLAY RECAP") - - hosts = sorted(self._tqm._stats.processed.keys()) - for h in hosts: - t = self._tqm._stats.summarize(h) - - self._tqm._display.display("%s : %s %s %s %s" % ( - hostcolor(h, t), - colorize('ok', t['ok'], 'green'), - colorize('changed', t['changed'], 'yellow'), - colorize('unreachable', t['unreachable'], 'red'), - colorize('failed', t['failures'], 'red')), - screen_only=True - ) - - self._tqm._display.display("%s : %s %s %s %s" % ( - hostcolor(h, t, False), - colorize('ok', t['ok'], None), - colorize('changed', t['changed'], None), - colorize('unreachable', t['unreachable'], None), - colorize('failed', t['failures'], None)), - log_only=True - ) - - self._tqm._display.display("", screen_only=True) - # END STATS STUFF + if result == 0: + #TODO: move to callback + # FIXME: this stat summary stuff should be cleaned up and moved + # to a new method, if it even belongs here... + self._tqm._display.banner("PLAY RECAP") + + hosts = sorted(self._tqm._stats.processed.keys()) + for h in hosts: + t = self._tqm._stats.summarize(h) + + self._tqm._display.display("%s : %s %s %s %s" % ( + hostcolor(h, t), + colorize('ok', t['ok'], 'green'), + colorize('changed', t['changed'], 'yellow'), + colorize('unreachable', t['unreachable'], 'red'), + colorize('failed', t['failures'], 'red')), + screen_only=True + ) + + self._tqm._display.display("%s : %s %s %s %s" % ( + hostcolor(h, t, False), + colorize('ok', t['ok'], None), + colorize('changed', t['changed'], None), + colorize('unreachable', t['unreachable'], None), + colorize('failed', t['failures'], None)), + log_only=True + ) + + self._tqm._display.display("", screen_only=True) + # END STATS STUFF return result @@ -160,3 +160,24 @@ def _get_serialized_batches(self, play): serialized_batches.append(play_hosts) return serialized_batches + + def listhosts(self): + + playlist = [] + try: + for playbook_path in self._playbooks: + pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) + for play in pb.get_entries(): + + # Use templated copies in case hosts: depends on variables + all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) + new_play = play.copy() + new_play.post_validate(all_vars, fail_on_undefined=False) + + playlist.append(set(self._inventory.get_hosts(new_play.hosts))) + except AnsibleError: + raise + except Exception, e: + raise AnsibleParserError("Failed to process plays: %s" % str(e)) + + return playlist diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index cc262b4fb51b94..dd346c636f03c0 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -21,7 +21,7 @@ from types import NoneType from ansible.errors import AnsibleParserError -from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): @@ -34,7 +34,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block - assert type(ds) in (list, NoneType) + assert ds is None or isinstance(ds, AnsibleSequence), 'block has bad type: %s' % type(ds) block_list = [] if ds: @@ -64,7 +64,7 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler from ansible.playbook.handler import Handler from ansible.playbook.task import Task - assert type(ds) == list + assert isinstance(ds, list), 'task has bad type: %s' % type(ds) task_list = [] for task in ds: @@ -101,7 +101,7 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader # we import here to prevent a circular dependency with imports from ansible.playbook.role.include import RoleInclude - assert isinstance(ds, list) + assert isinstance(ds, list), 'roles has bad type: %s' % type(ds) roles = [] for role_def in ds: diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 3a3793affc6795..57380590c472a5 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -133,7 +133,12 @@ def main(args): pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=options) if options.listhosts: - print('TODO: implement') + i = 1 + for play in pbex.listhosts(): + print("\nplay #%d" % i) + for host in sorted(play): + print(" %s" % host) + i = i + 1 sys.exit(0) elif options.listtasks: print('TODO: implement') From 41d9bfde07853a6b2113ea1ec2fe154a189ce693 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 3 Apr 2015 12:17:01 -0500 Subject: [PATCH 0243/3617] Moving the Display() instantiation outside of v2 classes --- v2/ansible/executor/playbook_executor.py | 13 +++++++------ v2/ansible/executor/task_queue_manager.py | 6 ++---- v2/ansible/playbook/helpers.py | 2 +- v2/bin/ansible | 4 +++- v2/bin/ansible-playbook | 4 +++- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 64f3f676210711..97232cefe8f1fb 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -36,17 +36,18 @@ class PlaybookExecutor: basis for bin/ansible-playbook operation. ''' - def __init__(self, playbooks, inventory, variable_manager, loader, options): + def __init__(self, playbooks, inventory, variable_manager, loader, display, options): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader + self._display = display self._options = options if options.listhosts or options.listtasks or options.listtags: self._tqm = None else: - self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, options=options) + self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, display=display, options=options) def run(self): @@ -94,13 +95,13 @@ def run(self): #TODO: move to callback # FIXME: this stat summary stuff should be cleaned up and moved # to a new method, if it even belongs here... - self._tqm._display.banner("PLAY RECAP") + self._display.banner("PLAY RECAP") hosts = sorted(self._tqm._stats.processed.keys()) for h in hosts: t = self._tqm._stats.summarize(h) - self._tqm._display.display("%s : %s %s %s %s" % ( + self._display.display("%s : %s %s %s %s" % ( hostcolor(h, t), colorize('ok', t['ok'], 'green'), colorize('changed', t['changed'], 'yellow'), @@ -109,7 +110,7 @@ def run(self): screen_only=True ) - self._tqm._display.display("%s : %s %s %s %s" % ( + self._display.display("%s : %s %s %s %s" % ( hostcolor(h, t, False), colorize('ok', t['ok'], None), colorize('changed', t['changed'], None), @@ -118,7 +119,7 @@ def run(self): log_only=True ) - self._tqm._display.display("", screen_only=True) + self._display.display("", screen_only=True) # END STATS STUFF return result diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index 0693e9dc56ccaa..28904676eb28d4 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -33,7 +33,6 @@ from ansible.plugins import callback_loader, strategy_loader from ansible.utils.debug import debug -from ansible.utils.display import Display __all__ = ['TaskQueueManager'] @@ -49,16 +48,15 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, callback, variable_manager, loader, options): + def __init__(self, inventory, callback, variable_manager, loader, display, options): self._inventory = inventory self._variable_manager = variable_manager self._loader = loader + self._display = display self._options = options self._stats = AggregateStats() - self._display = Display() - # a special flag to help us exit cleanly self._terminated = False diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index dd346c636f03c0..7242322b88faf2 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -34,7 +34,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block - assert ds is None or isinstance(ds, AnsibleSequence), 'block has bad type: %s' % type(ds) + assert ds is None or isinstance(ds, list), 'block has bad type: %s' % type(ds) block_list = [] if ds: diff --git a/v2/bin/ansible b/v2/bin/ansible index 2b2df3df8f2d84..79d5f0a28b34ee 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -31,6 +31,7 @@ from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.utils.display import Display from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords +from ansible.utils.display import Display from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager @@ -131,7 +132,8 @@ class Cli(object): # now create a task queue manager to execute the play try: - tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, options=options) + display = Display() + tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, display=display, options=options) result = tqm.run(play) tqm.cleanup() except AnsibleError: diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 57380590c472a5..c1ee70d059c701 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -15,6 +15,7 @@ from ansible.parsing.splitter import parse_kv from ansible.playbook import Playbook from ansible.playbook.task import Task from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords +from ansible.utils.display import Display from ansible.utils.unicode import to_unicode from ansible.utils.vars import combine_vars from ansible.utils.vault import read_vault_file @@ -130,7 +131,8 @@ def main(args): # create the playbook executor, which manages running the plays # via a task queue manager - pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=options) + display = Display() + pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options) if options.listhosts: i = 1 From a811c8841e2e0da5de6b6df056e6c84b6166a432 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 13:41:39 -0400 Subject: [PATCH 0244/3617] now listhosts shows the same info as v1 --- v2/ansible/executor/playbook_executor.py | 16 ++++++++++++++-- v2/bin/ansible-playbook | 8 +++----- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 97232cefe8f1fb..bab6ea4e05d4b5 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -162,10 +162,11 @@ def _get_serialized_batches(self, play): return serialized_batches - def listhosts(self): + def list_hosts_per_play(self): playlist = [] try: + i = 1 for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) for play in pb.get_entries(): @@ -175,10 +176,21 @@ def listhosts(self): new_play = play.copy() new_play.post_validate(all_vars, fail_on_undefined=False) - playlist.append(set(self._inventory.get_hosts(new_play.hosts))) + pname = play.get_name().strip() + if pname == 'PLAY: ': + pname = 'play #%d' % i + + playlist.append( { + 'name': pname, + 'pattern': play.hosts, + 'hosts': set(self._inventory.get_hosts(new_play.hosts)), + } ) + i = i + 1 + except AnsibleError: raise except Exception, e: + #TODO: log exception raise AnsibleParserError("Failed to process plays: %s" % str(e)) return playlist diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index c1ee70d059c701..4dc6d6bad94a4f 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -135,12 +135,10 @@ def main(args): pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options) if options.listhosts: - i = 1 - for play in pbex.listhosts(): - print("\nplay #%d" % i) - for host in sorted(play): + for p in pbex.list_hosts_per_play(): + print("\n %s (%s): host count=%d" % (p['name'], p['pattern'], len(p['hosts']))) + for host in p['hosts']: print(" %s" % host) - i = i + 1 sys.exit(0) elif options.listtasks: print('TODO: implement') From 3c6fdebfe38d3b3d6c4a33e251fd6de3333f50ba Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Apr 2015 13:49:00 -0400 Subject: [PATCH 0245/3617] made listhosts play output name more consistent internally --- v2/ansible/executor/playbook_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index bab6ea4e05d4b5..24b9f8c17baf40 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -178,7 +178,7 @@ def list_hosts_per_play(self): pname = play.get_name().strip() if pname == 'PLAY: ': - pname = 'play #%d' % i + pname = 'PLAY: #%d' % i playlist.append( { 'name': pname, From 22608939eb918504faf25850f71d568756256847 Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Fri, 3 Apr 2015 14:23:04 -0400 Subject: [PATCH 0246/3617] Update intro_windows.rst Refer to PowerShell consistently. --- docsite/rst/intro_windows.rst | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index b5e6be8234046e..544c6fba754b81 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -11,7 +11,7 @@ Windows: How Does It Work As you may have already read, Ansible manages Linux/Unix machines using SSH by default. Starting in version 1.7, Ansible also contains support for managing Windows machines. This uses -native powershell remoting, rather than SSH. +native PowerShell remoting, rather than SSH. Ansible will still be run from a Linux control machine, and uses the "winrm" Python module to talk to remote hosts. @@ -67,7 +67,7 @@ communication channel that leverages Windows remoting:: ansible windows [-i inventory] -m win_ping --ask-vault-pass If you haven't done anything to prep your systems yet, this won't work yet. This is covered in a later -section about how to enable powershell remoting - and if necessary - how to upgrade powershell to +section about how to enable PowerShell remoting - and if necessary - how to upgrade PowerShell to a version that is 3 or higher. You'll run this command again later though, to make sure everything is working. @@ -77,21 +77,21 @@ You'll run this command again later though, to make sure everything is working. Windows System Prep ``````````````````` -In order for Ansible to manage your windows machines, you will have to enable Powershell remoting configured. +In order for Ansible to manage your windows machines, you will have to enable PowerShell remoting configured. -To automate setup of WinRM, you can run `this powershell script `_ on the remote machine. +To automate setup of WinRM, you can run `this PowerShell script `_ on the remote machine. Admins may wish to modify this setup slightly, for instance to increase the timeframe of the certificate. .. _getting_to_powershell_three_or_higher: -Getting to Powershell 3.0 or higher +Getting to PowerShell 3.0 or higher ``````````````````````````````````` -Powershell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. +PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. -Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a powershell console as an administrator. You will now be running Powershell 3 and can try connectivity again using the win_ping technique referenced above. +Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. .. _what_windows_modules_are_available: @@ -105,7 +105,7 @@ Browse this index to see what is available. In many cases, it may not be necessary to even write or use an Ansible module. -In particular, the "script" module can be used to run arbitrary powershell scripts, allowing Windows administrators familiar with powershell a very native way to do things, as in the following playbook:: +In particular, the "script" module can be used to run arbitrary PowerShell scripts, allowing Windows administrators familiar with PowerShell a very native way to do things, as in the following playbook:: - hosts: windows tasks: @@ -121,10 +121,10 @@ Developers: Supported modules and how it works Developing ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix. What if you want to write Windows modules for ansible though? -For Windows, ansible modules are implemented in Powershell. Skim those Linux/Unix module development chapters before proceeding. +For Windows, ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules live in a "windows/" subfolder in the Ansible "library/" subtree. For example, if a module is named -"library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual powershell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. +"library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. Modules (ps1 files) should start as follows:: @@ -169,7 +169,7 @@ Windows Playbook Examples Look to the list of windows modules for most of what is possible, though also some modules like "raw" and "script" also work on Windows, as do "fetch" and "slurp". -Here is an example of pushing and running a powershell script:: +Here is an example of pushing and running a PowerShell script:: - name: test script module hosts: windows @@ -223,7 +223,7 @@ form of new modules, tweaks to existing modules, documentation, or something els :doc:`playbooks` Learning ansible's configuration management language `List of Windows Modules `_ - Windows specific module list, all implemented in powershell + Windows specific module list, all implemented in PowerShell `Mailing List `_ Questions? Help? Ideas? Stop by the list on Google Groups `irc.freenode.net `_ From 7e3b3b6ebe79b56ed2f56347bf7842cb2a9c52d9 Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Fri, 3 Apr 2015 14:26:45 -0400 Subject: [PATCH 0247/3617] Update intro_windows.rst Add a bit about what Windows versions PS3 is actually available for. --- docsite/rst/intro_windows.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 544c6fba754b81..d96478b0a267f0 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -89,7 +89,7 @@ the certificate. Getting to PowerShell 3.0 or higher ``````````````````````````````````` -PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. +PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. Note that PowerShell 3.0 is only supported on Windows 7 SP1, Windows Server 2008 SP1, and later releases of Windows. Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. From 349ecf6efe54e9144285d1f4170ef0d8ef241ff2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 3 Apr 2015 11:35:01 -0700 Subject: [PATCH 0248/3617] Add a vault test to data_loader test and some additional yaml tests to parsing/yaml/test_loader --- v2/test/parsing/test_data_loader.py | 22 ++++++++++++++- v2/test/parsing/yaml/test_loader.py | 43 +++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/v2/test/parsing/test_data_loader.py b/v2/test/parsing/test_data_loader.py index 370046dbf34655..75ceb662f7327c 100644 --- a/v2/test/parsing/test_data_loader.py +++ b/v2/test/parsing/test_data_loader.py @@ -22,7 +22,7 @@ from yaml.scanner import ScannerError from ansible.compat.tests import unittest -from ansible.compat.tests.mock import patch +from ansible.compat.tests.mock import patch, mock_open from ansible.errors import AnsibleParserError from ansible.parsing import DataLoader @@ -62,3 +62,23 @@ def test_parse_fail_from_file(self, mock_def): """, True) self.assertRaises(AnsibleParserError, self._loader.load_from_file, 'dummy_yaml_bad.txt') +class TestDataLoaderWithVault(unittest.TestCase): + + def setUp(self): + self._loader = DataLoader(vault_password='ansible') + + def tearDown(self): + pass + + @patch.multiple(DataLoader, path_exists=lambda s, x: True, is_file=lambda s, x: True) + def test_parse_from_vault_1_1_file(self): + vaulted_data = """$ANSIBLE_VAULT;1.1;AES256 +33343734386261666161626433386662623039356366656637303939306563376130623138626165 +6436333766346533353463636566313332623130383662340a393835656134633665333861393331 +37666233346464636263636530626332623035633135363732623332313534306438393366323966 +3135306561356164310a343937653834643433343734653137383339323330626437313562306630 +3035 +""" + with patch('__builtin__.open', mock_open(read_data=vaulted_data)): + output = self._loader.load_from_file('dummy_vault.txt') + self.assertEqual(output, dict(foo='bar')) diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py index 4c56962610099d..9a4746b99dfeab 100644 --- a/v2/test/parsing/yaml/test_loader.py +++ b/v2/test/parsing/yaml/test_loader.py @@ -101,6 +101,49 @@ def test_parse_list(self): self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19)) self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19)) + def test_parse_short_dict(self): + stream = StringIO("""{"foo": "bar"}""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, dict(foo=u'bar')) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1)) + self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9)) + + stream = StringIO("""foo: bar""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, dict(foo=u'bar')) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1)) + self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6)) + + def test_error_conditions(self): + stream = StringIO("""{""") + loader = AnsibleLoader(stream, 'myfile.yml') + self.assertRaises(loader.get_single_data) + + def test_front_matter(self): + stream = StringIO("""---\nfoo: bar""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, dict(foo=u'bar')) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1)) + self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6)) + + # Initial indent (See: #6348) + stream = StringIO(""" - foo: bar\n baz: qux""") + loader = AnsibleLoader(stream, 'myfile.yml') + data = loader.get_single_data() + self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}]) + + self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2)) + self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4)) + self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9)) + self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9)) + + class TestAnsibleLoaderPlay(unittest.TestCase): def setUp(self): From 2eb2a41d059f5c025055ba5795825fc8f422ea96 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Apr 2015 10:24:03 -0400 Subject: [PATCH 0249/3617] renamed get_entries to get_plays --- v2/ansible/playbook/__init__.py | 2 +- v2/test/playbook/test_playbook.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py index 1c033559075404..40e6638f23921e 100644 --- a/v2/ansible/playbook/__init__.py +++ b/v2/ansible/playbook/__init__.py @@ -81,5 +81,5 @@ def _load_playbook_data(self, file_name, variable_manager): def get_loader(self): return self._loader - def get_entries(self): + def get_plays(self): return self._entries[:] diff --git a/v2/test/playbook/test_playbook.py b/v2/test/playbook/test_playbook.py index 1e72421818be6e..dfb52dc7b12726 100644 --- a/v2/test/playbook/test_playbook.py +++ b/v2/test/playbook/test_playbook.py @@ -47,7 +47,7 @@ def test_basic_playbook(self): """, }) p = Playbook.load("test_file.yml", loader=fake_loader) - entries = p.get_entries() + plays = p.get_plays() def test_bad_playbook_files(self): fake_loader = DictDataLoader({ From e6e69c089414835d448bbffffd21c4775f2b23f0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Apr 2015 10:25:55 -0400 Subject: [PATCH 0250/3617] finished implementing list-hosts, started adding list-tasks/list-tags but getting just task names and have to adjust for having blocks. --- v2/ansible/executor/playbook_executor.py | 170 ++++++++++++----------- v2/ansible/playbook/play.py | 10 ++ v2/bin/ansible-playbook | 36 +++-- 3 files changed, 123 insertions(+), 93 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 24b9f8c17baf40..865b06f1088dcc 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -59,12 +59,18 @@ def run(self): signal.signal(signal.SIGINT, self._cleanup) result = 0 + entrylist = [] + entry = {} try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) - # FIXME: playbook entries are just plays, so we should rename them - for play in pb.get_entries(): + if self._tqm is None: # we are doing a listing + entry = {'playbook': playbook_path} + entry['plays'] = [] + + i = 1 + for play in pb.get_plays(): self._inventory.remove_restriction() # Create a temporary copy of the play here, so we can run post_validate @@ -73,54 +79,91 @@ def run(self): new_play = play.copy() new_play.post_validate(all_vars, fail_on_undefined=False) - for batch in self._get_serialized_batches(new_play): - if len(batch) == 0: - self._tqm.send_callback('v2_playbook_on_play_start', new_play) - self._tqm.send_callback('v2_playbook_on_no_hosts_matched') - result = 0 - break - # restrict the inventory to the hosts in the serialized batch - self._inventory.restrict_to_hosts(batch) - # and run it... - result = self._tqm.run(play=play) + if self._tqm is None: + # we are just doing a listing + + pname = new_play.get_name().strip() + if pname == 'PLAY: ': + pname = 'PLAY: #%d' % i + p = { 'name': pname } + + if self._options.listhosts: + p['pattern']=play.hosts + p['hosts']=set(self._inventory.get_hosts(new_play.hosts)) + + #TODO: play tasks are really blocks, need to figure out how to get task objects from them + elif self._options.listtasks: + p['tasks'] = [] + for task in play.get_tasks(): + p['tasks'].append(task) + #p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags}) + + elif self._options.listtags: + p['tags'] = set(new_play.tags) + for task in play.get_tasks(): + p['tags'].update(task) + #p['tags'].update(task.tags) + entry['plays'].append(p) + + else: + # we are actually running plays + for batch in self._get_serialized_batches(new_play): + if len(batch) == 0: + self._tqm.send_callback('v2_playbook_on_play_start', new_play) + self._tqm.send_callback('v2_playbook_on_no_hosts_matched') + result = 0 + break + # restrict the inventory to the hosts in the serialized batch + self._inventory.restrict_to_hosts(batch) + # and run it... + result = self._tqm.run(play=play) + if result != 0: + break + if result != 0: - break + raise AnsibleError("Play failed!: %d" % result) + + i = i + 1 # per play + + if entry: + entrylist.append(entry) # per playbook + + if entrylist: + return entrylist - if result != 0: - raise AnsibleError("Play failed!: %d" % result) finally: - self._cleanup() - - if result == 0: - #TODO: move to callback - # FIXME: this stat summary stuff should be cleaned up and moved - # to a new method, if it even belongs here... - self._display.banner("PLAY RECAP") - - hosts = sorted(self._tqm._stats.processed.keys()) - for h in hosts: - t = self._tqm._stats.summarize(h) - - self._display.display("%s : %s %s %s %s" % ( - hostcolor(h, t), - colorize('ok', t['ok'], 'green'), - colorize('changed', t['changed'], 'yellow'), - colorize('unreachable', t['unreachable'], 'red'), - colorize('failed', t['failures'], 'red')), - screen_only=True - ) - - self._display.display("%s : %s %s %s %s" % ( - hostcolor(h, t, False), - colorize('ok', t['ok'], None), - colorize('changed', t['changed'], None), - colorize('unreachable', t['unreachable'], None), - colorize('failed', t['failures'], None)), - log_only=True - ) - - self._display.display("", screen_only=True) - # END STATS STUFF + if self._tqm is not None: + self._cleanup() + + #TODO: move to callback + # FIXME: this stat summary stuff should be cleaned up and moved + # to a new method, if it even belongs here... + self._display.banner("PLAY RECAP") + + hosts = sorted(self._tqm._stats.processed.keys()) + for h in hosts: + t = self._tqm._stats.summarize(h) + + self._display.display("%s : %s %s %s %s" % ( + hostcolor(h, t), + colorize('ok', t['ok'], 'green'), + colorize('changed', t['changed'], 'yellow'), + colorize('unreachable', t['unreachable'], 'red'), + colorize('failed', t['failures'], 'red')), + screen_only=True + ) + + self._display.display("%s : %s %s %s %s" % ( + hostcolor(h, t, False), + colorize('ok', t['ok'], None), + colorize('changed', t['changed'], None), + colorize('unreachable', t['unreachable'], None), + colorize('failed', t['failures'], None)), + log_only=True + ) + + self._display.display("", screen_only=True) + # END STATS STUFF return result @@ -161,36 +204,3 @@ def _get_serialized_batches(self, play): serialized_batches.append(play_hosts) return serialized_batches - - def list_hosts_per_play(self): - - playlist = [] - try: - i = 1 - for playbook_path in self._playbooks: - pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) - for play in pb.get_entries(): - - # Use templated copies in case hosts: depends on variables - all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) - new_play = play.copy() - new_play.post_validate(all_vars, fail_on_undefined=False) - - pname = play.get_name().strip() - if pname == 'PLAY: ': - pname = 'PLAY: #%d' % i - - playlist.append( { - 'name': pname, - 'pattern': play.hosts, - 'hosts': set(self._inventory.get_hosts(new_play.hosts)), - } ) - i = i + 1 - - except AnsibleError: - raise - except Exception, e: - #TODO: log exception - raise AnsibleParserError("Failed to process plays: %s" % str(e)) - - return playlist diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index a472d070899b3e..34c4d3e5608fde 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -27,6 +27,7 @@ from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles from ansible.playbook.role import Role from ansible.playbook.taggable import Taggable +from ansible.playbook.block import Block from ansible.utils.vars import combine_vars @@ -233,6 +234,15 @@ def get_handlers(self): def get_roles(self): return self.roles[:] + def get_tasks(self): + tasklist = [] + for task in self.pre_tasks + self.tasks + self.post_tasks: + if isinstance(task, Block): + tasklist.append(task.block + task.rescue + task.always) + else: + tasklist.append(task) + return tasklist + def serialize(self): data = super(Play, self).serialize() diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 4dc6d6bad94a4f..e2cca104844be2 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -134,20 +134,30 @@ def main(args): display = Display() pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options) - if options.listhosts: - for p in pbex.list_hosts_per_play(): - print("\n %s (%s): host count=%d" % (p['name'], p['pattern'], len(p['hosts']))) - for host in p['hosts']: - print(" %s" % host) - sys.exit(0) - elif options.listtasks: - print('TODO: implement') - sys.exit(0) - elif options.listtags: - print('TODO: implement') - sys.exit(0) + results = pbex.run() + + if isinstance(results, list): + for p in results: + + print('') + print('playbook: %s' % p['playbook']) + print('') + + for play in p['plays']: + if options.listhosts: + print("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts']))) + for host in play['hosts']: + print(" %s" % host) + if options.listtasks: #TODO: do we want to display block info? + print("\n %s: task count=%d" % (play['name'], len(play['tasks']))) + for task in play['tasks']: + print(" %s" % task) + if options.listtags: + print("\n %s: tags count=%d" % (play['name'], len(play['tags']))) + for tag in play['tags']: + print(" %s" % tag) else: - return pbex.run() + return results if __name__ == "__main__": #display(" ", log_only=True) From af97e732a07cb5fc24f314894dbfe9f7b47e5c90 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Apr 2015 15:14:40 -0400 Subject: [PATCH 0251/3617] updated ansible-playbook to use display, fixed issues breaking display class --- v2/ansible/executor/playbook_executor.py | 2 +- v2/ansible/playbook/play.py | 2 +- v2/ansible/utils/display.py | 7 ++-- v2/bin/ansible-playbook | 48 ++++++++++-------------- 4 files changed, 25 insertions(+), 34 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 865b06f1088dcc..94bdbf01e1f1ff 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -16,7 +16,7 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import (absolute_import, division) __metaclass__ = type import signal diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 34c4d3e5608fde..eeabfce062a4d6 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -16,7 +16,7 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import (absolute_import, division) __metaclass__ = type from ansible.errors import AnsibleError, AnsibleParserError diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index 758a62fceea7b5..dd44d61dd30eca 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -18,6 +18,7 @@ # FIXME: copied mostly from old code, needs py3 improvements import textwrap +import sys from ansible import constants as C from ansible.errors import * @@ -97,15 +98,15 @@ def deprecated(self, msg, version, removed=False): new_msg = "\n".join(wrapped) + "\n" if new_msg not in deprecations: - self._display(new_msg, color='purple', stderr=True) + self.display(new_msg, color='purple', stderr=True) self._deprecations[new_msg] = 1 def warning(self, msg): new_msg = "\n[WARNING]: %s" % msg wrapped = textwrap.wrap(new_msg, 79) new_msg = "\n".join(wrapped) + "\n" - if new_msg not in warns: - self._display(new_msg, color='bright purple', stderr=True) + if new_msg not in self._warns: + self.display(new_msg, color='bright purple', stderr=True) self._warns[new_msg] = 1 def system_warning(self, msg): diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index e2cca104844be2..49748129e125a2 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -21,13 +21,9 @@ from ansible.utils.vars import combine_vars from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager -# Implement an ansible.utils.warning() function later -def warning(*args, **kwargs): - print(*args, **kwargs) - #--------------------------------------------------------------------------------------------------- -def main(args): +def main(display, args): ''' run ansible-playbook operations ''' # create parser for CLI options @@ -122,16 +118,14 @@ def main(args): no_hosts = False if len(inventory.list_hosts()) == 0: # Empty inventory - warning("provided hosts list is empty, only localhost is available") + display.warning("provided hosts list is empty, only localhost is available") no_hosts = True inventory.subset(options.subset) if len(inventory.list_hosts()) == 0 and no_hosts is False: # Invalid limit raise errors.AnsibleError("Specified --limit does not match any hosts") - # create the playbook executor, which manages running the plays - # via a task queue manager - display = Display() + # create the playbook executor, which manages running the plays via a task queue manager pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options) results = pbex.run() @@ -139,38 +133,34 @@ def main(args): if isinstance(results, list): for p in results: - print('') - print('playbook: %s' % p['playbook']) - print('') - + display.display('\nplaybook: %s\n' % p['playbook']) for play in p['plays']: if options.listhosts: - print("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts']))) + display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts']))) for host in play['hosts']: - print(" %s" % host) + display.display(" %s" % host) if options.listtasks: #TODO: do we want to display block info? - print("\n %s: task count=%d" % (play['name'], len(play['tasks']))) + display.display("\n %s" % (play['name'])) for task in play['tasks']: - print(" %s" % task) - if options.listtags: - print("\n %s: tags count=%d" % (play['name'], len(play['tags']))) + display.display(" %s" % task) + if options.listtags: #TODO: fix once we figure out block handling above + display.display("\n %s: tags count=%d" % (play['name'], len(play['tags']))) for tag in play['tags']: - print(" %s" % tag) + display.display(" %s" % tag) + return 0 else: return results if __name__ == "__main__": - #display(" ", log_only=True) - #display(" ".join(sys.argv), log_only=True) - #display(" ", log_only=True) + + display = Display() + display.display(" ".join(sys.argv), log_only=True) + try: - sys.exit(main(sys.argv[1:])) + sys.exit(main(display, sys.argv[1:])) except AnsibleError as e: - #display("ERROR: %s" % e, color='red', stderr=True) - print(e) + display.display("[ERROR]: %s" % e, color='red', stderr=True) sys.exit(1) except KeyboardInterrupt: - #display("ERROR: interrupted", color='red', stderr=True) - print("keyboard interrupt") + display.display("[ERROR]: interrupted", color='red', stderr=True) sys.exit(1) - From 5531b843602d04c95c2d5aed7bf5bb1580f93889 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Apr 2015 15:21:42 -0400 Subject: [PATCH 0252/3617] moved ad-hoc to use display --- v2/bin/ansible | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/v2/bin/ansible b/v2/bin/ansible index 79d5f0a28b34ee..415a12af2c2359 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -40,8 +40,12 @@ from ansible.vars import VariableManager class Cli(object): ''' code behind bin/ansible ''' - def __init__(self): - pass + def __init__(self, display=None): + + if display is None: + self.display = Display() + else: + self.display = display def parse(self): ''' create an options parser for bin/ansible ''' @@ -105,7 +109,7 @@ class Cli(object): if options.listhosts: for host in hosts: - print(' %s' % host.name) + self.display(' %s' % host.name) sys.exit(0) if ((options.module_name == 'command' or options.module_name == 'shell') and not options.module_args): @@ -157,22 +161,17 @@ class Cli(object): ######################################################## if __name__ == '__main__': - #callbacks.display("", log_only=True) - #callbacks.display(" ".join(sys.argv), log_only=True) - #callbacks.display("", log_only=True) + + display = Display() + #display.display(" ".join(sys.argv), log_only=True) try: - cli = Cli() + cli = Cli(display=display) (options, args) = cli.parse() - result = cli.run(options, args) - - except AnsibleError, e: - print(e) + sys.exit(cli.run(options, args)) + except AnsibleError as e: + display.display("[ERROR]: %s" % e, color='red', stderr=True) sys.exit(1) - - except Exception, e: - # Generic handler for errors - print("ERROR: %s" % str(e)) + except KeyboardInterrupt: + display.display("[ERROR]: interrupted", color='red', stderr=True) sys.exit(1) - - sys.exit(result) From b1e6aaa7903c01b5839af9e7aad4ae1ca0fbc681 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Apr 2015 15:54:54 -0400 Subject: [PATCH 0253/3617] implemented verbosity, added 5th level and now can see how many plays per playbooko if -vvvvv --- v2/ansible/executor/playbook_executor.py | 1 + v2/ansible/utils/display.py | 14 +++++++------- v2/bin/ansible | 5 +++-- v2/bin/ansible-playbook | 3 ++- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 94bdbf01e1f1ff..ad9570963aad80 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -127,6 +127,7 @@ def run(self): if entry: entrylist.append(entry) # per playbook + self._display.vvvvv('%d plays in %s' % (i, playbook_path)) if entrylist: return entrylist diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index dd44d61dd30eca..62dbeabca51f69 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -26,11 +26,9 @@ class Display: - def __init__(self, conn_info=None): - if conn_info: - self._verbosity = conn_info.verbosity - else: - self._verbosity = 0 + def __init__(self, verbosity=0): + + self.verbosity = verbosity # list of all deprecation messages to prevent duplicate display self._deprecations = {} @@ -70,10 +68,13 @@ def vvv(self, msg, host=None): def vvvv(self, msg, host=None): return self.verbose(msg, host=host, caplevel=3) + def vvvvv(self, msg, host=None): + return self.verbose(msg, host=host, caplevel=4) + def verbose(self, msg, host=None, caplevel=2): # FIXME: this needs to be implemented #msg = utils.sanitize_output(msg) - if self._verbosity > caplevel: + if self.verbosity > caplevel: if host is None: self.display(msg, color='blue') else: @@ -124,4 +125,3 @@ def banner(self, msg, color=None): star_len = 3 stars = "*" * star_len self.display("\n%s %s" % (msg, stars), color=color) - diff --git a/v2/bin/ansible b/v2/bin/ansible index 415a12af2c2359..7d2f01bc5c5e7c 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -72,6 +72,7 @@ class Cli(object): parser.print_help() sys.exit(1) + display.verbosity = options.verbosity validate_conflicts(parser,options) return (options, args) @@ -109,7 +110,7 @@ class Cli(object): if options.listhosts: for host in hosts: - self.display(' %s' % host.name) + self.display.display(' %s' % host.name) sys.exit(0) if ((options.module_name == 'command' or options.module_name == 'shell') and not options.module_args): @@ -163,7 +164,7 @@ class Cli(object): if __name__ == '__main__': display = Display() - #display.display(" ".join(sys.argv), log_only=True) + #display.display(" ".join(sys.argv)) try: cli = Cli(display=display) diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 49748129e125a2..79c2eed785df52 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -53,6 +53,7 @@ def main(display, args): parser.print_help(file=sys.stderr) return 1 + display.verbosity = options.verbosity validate_conflicts(parser,options) # Note: slightly wrong, this is written so that implicit localhost @@ -154,7 +155,7 @@ def main(display, args): if __name__ == "__main__": display = Display() - display.display(" ".join(sys.argv), log_only=True) + #display.display(" ".join(sys.argv), log_only=True) try: sys.exit(main(display, sys.argv[1:])) From 4bc79a746ad6f1f9841b6f637d45f69155babf69 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Apr 2015 16:26:05 -0400 Subject: [PATCH 0254/3617] more fine tunnign on verbosity --- v2/ansible/executor/playbook_executor.py | 6 ++++-- v2/ansible/plugins/connections/__init__.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index ad9570963aad80..9f02cddddb6b10 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -70,7 +70,10 @@ def run(self): entry['plays'] = [] i = 1 - for play in pb.get_plays(): + plays = pb.get_plays() + self._display.vv('%d plays in %s' % (len(plays), playbook_path)) + + for play in plays: self._inventory.remove_restriction() # Create a temporary copy of the play here, so we can run post_validate @@ -127,7 +130,6 @@ def run(self): if entry: entrylist.append(entry) # per playbook - self._display.vvvvv('%d plays in %s' % (i, playbook_path)) if entrylist: return entrylist diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py index 11015d7431338d..74ff693a331944 100644 --- a/v2/ansible/plugins/connections/__init__.py +++ b/v2/ansible/plugins/connections/__init__.py @@ -39,7 +39,7 @@ class ConnectionBase: def __init__(self, connection_info, *args, **kwargs): self._connection_info = connection_info - self._display = Display(connection_info) + self._display = Display(verbosity=connection_info.verbosity) def _become_method_supported(self, become_method): From e82ba723e2a8c1dd1b7b4eb218ed15cc3235f0bc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 5 Apr 2015 01:05:17 -0500 Subject: [PATCH 0255/3617] Fixing multiple v2 bugs --- v2/ansible/executor/play_iterator.py | 11 +- v2/ansible/executor/playbook_executor.py | 3 +- v2/ansible/executor/task_queue_manager.py | 20 ++-- v2/ansible/plugins/action/assemble.py | 2 +- v2/ansible/plugins/strategies/__init__.py | 1 - v2/ansible/plugins/strategies/free.py | 135 ++++++++++++++-------- v2/samples/test_free.yml | 10 ++ v2/samples/test_pb.yml | 44 ++----- 8 files changed, 123 insertions(+), 103 deletions(-) create mode 100644 v2/samples/test_free.yml diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py index d6fe3750955943..38bebb21132c9c 100644 --- a/v2/ansible/executor/play_iterator.py +++ b/v2/ansible/executor/play_iterator.py @@ -88,18 +88,11 @@ class PlayIterator: FAILED_ALWAYS = 8 def __init__(self, inventory, play): - # FIXME: should we save the post_validated play from below here instead? self._play = play - # post validate the play, as we need some fields to be finalized now - # so that we can use them to setup the iterator properly - all_vars = inventory._variable_manager.get_vars(loader=inventory._loader, play=play) - new_play = play.copy() - new_play.post_validate(all_vars, fail_on_undefined=False) - - self._blocks = new_play.compile() + self._blocks = self._play.compile() self._host_states = {} - for host in inventory.get_hosts(new_play.hosts): + for host in inventory.get_hosts(self._play.hosts): self._host_states[host.name] = HostState(blocks=self._blocks) def get_host_state(self, host): diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 9f02cddddb6b10..6504fddfc8217a 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -124,7 +124,7 @@ def run(self): break if result != 0: - raise AnsibleError("Play failed!: %d" % result) + break i = i + 1 # per play @@ -138,7 +138,6 @@ def run(self): if self._tqm is not None: self._cleanup() - #TODO: move to callback # FIXME: this stat summary stuff should be cleaned up and moved # to a new method, if it even belongs here... self._display.banner("PLAY RECAP") diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index 28904676eb28d4..d0354786da9b3b 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -123,7 +123,8 @@ def _initialize_notified_handlers(self, handlers): # FIXME: there is a block compile helper for this... handler_list = [] for handler_block in handlers: - handler_list.extend(handler_block.compile()) + for handler in handler_block.block: + handler_list.append(handler) # then initalize it with the handler names from the handler list for handler in handler_list: @@ -138,23 +139,28 @@ def run(self, play): are done with the current task). ''' - connection_info = ConnectionInformation(play, self._options) + all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) + + new_play = play.copy() + new_play.post_validate(all_vars, fail_on_undefined=False) + + connection_info = ConnectionInformation(new_play, self._options) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_connection_info'): callback_plugin.set_connection_info(connection_info) - self.send_callback('v2_playbook_on_play_start', play) + self.send_callback('v2_playbook_on_play_start', new_play) # initialize the shared dictionary containing the notified handlers - self._initialize_notified_handlers(play.handlers) + self._initialize_notified_handlers(new_play.handlers) # load the specified strategy (or the default linear one) - strategy = strategy_loader.get(play.strategy, self) + strategy = strategy_loader.get(new_play.strategy, self) if strategy is None: - raise AnsibleError("Invalid play strategy specified: %s" % play.strategy, obj=play._ds) + raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # build the iterator - iterator = PlayIterator(inventory=self._inventory, play=play) + iterator = PlayIterator(inventory=self._inventory, play=new_play) # and run the play using the strategy return strategy.run(iterator, connection_info) diff --git a/v2/ansible/plugins/action/assemble.py b/v2/ansible/plugins/action/assemble.py index b1bdc06c6d3873..638d4b92bb5568 100644 --- a/v2/ansible/plugins/action/assemble.py +++ b/v2/ansible/plugins/action/assemble.py @@ -90,7 +90,7 @@ def run(self, tmp=None, task_vars=dict()): src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src) else: # the source is local, so expand it here - src = os.path.expanduser(src) + src = self._loader.path_dwim(os.path.expanduser(src)) _re = None if regexp is not None: diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index 59c0b9b84eef6a..afbc373f4f3332 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -390,7 +390,6 @@ def run_handlers(self, iterator, connection_info): # of handlers based on the notified list for handler_block in iterator._play.handlers: - debug("handlers are: %s" % handlers) # FIXME: handlers need to support the rescue/always portions of blocks too, # but this may take some work in the iterator and gets tricky when # we consider the ability of meta tasks to flush handlers diff --git a/v2/ansible/plugins/strategies/free.py b/v2/ansible/plugins/strategies/free.py index 6aab495fec3ed1..4fd8a132018ca3 100644 --- a/v2/ansible/plugins/strategies/free.py +++ b/v2/ansible/plugins/strategies/free.py @@ -22,6 +22,7 @@ import time from ansible.plugins.strategies import StrategyBase +from ansible.utils.debug import debug class StrategyModule(StrategyBase): @@ -42,66 +43,106 @@ def run(self, iterator, connection_info): # the last host to be given a task last_host = 0 + result = True + work_to_do = True while work_to_do and not self._tqm._terminated: - hosts_left = self.get_hosts_remaining() + hosts_left = self.get_hosts_remaining(iterator._play) if len(hosts_left) == 0: - self._callback.playbook_on_no_hosts_remaining() + self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') + result = False break - # using .qsize() is a best estimate anyway, due to the - # multiprocessing/threading concerns (per the python docs) - if 1: #if self._job_queue.qsize() < len(hosts_left): - - work_to_do = False # assume we have no more work to do - starting_host = last_host # save current position so we know when we've - # looped back around and need to break - - # try and find an unblocked host with a task to run - while True: - host = hosts_left[last_host] - host_name = host.get_name() - - # peek at the next task for the host, to see if there's - # anything to do do for this host - if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and iterator.get_next_task_for_host(host, peek=True): - - # FIXME: check task tags, etc. here as we do in linear - # FIXME: handle meta tasks here, which will require a tweak - # to run_handlers so that only the handlers on this host - # are flushed and not all - - # set the flag so the outer loop knows we've still found - # some work which needs to be done - work_to_do = True - - # check to see if this host is blocked (still executing a previous task) - if not host_name in self._blocked_hosts: - # pop the task, mark the host blocked, and queue it - self._blocked_hosts[host_name] = True - task = iterator.get_next_task_for_host(host) - #self._callback.playbook_on_task_start(task.get_name(), False) - self._queue_task(iterator._play, host, task, connection_info) - - # move on to the next host and make sure we - # haven't gone past the end of our hosts list - last_host += 1 - if last_host > len(hosts_left) - 1: - last_host = 0 - - # if we've looped around back to the start, break out - if last_host == starting_host: - break + work_to_do = False # assume we have no more work to do + starting_host = last_host # save current position so we know when we've + # looped back around and need to break + + # try and find an unblocked host with a task to run + host_results = [] + while True: + host = hosts_left[last_host] + debug("next free host: %s" % host) + host_name = host.get_name() + + # peek at the next task for the host, to see if there's + # anything to do do for this host + (state, task) = iterator.get_next_task_for_host(host, peek=True) + debug("free host state: %s" % state) + debug("free host task: %s" % task) + if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task: + + # set the flag so the outer loop knows we've still found + # some work which needs to be done + work_to_do = True + + debug("this host has work to do") + + # check to see if this host is blocked (still executing a previous task) + if not host_name in self._blocked_hosts: + # pop the task, mark the host blocked, and queue it + self._blocked_hosts[host_name] = True + (state, task) = iterator.get_next_task_for_host(host) + + debug("getting variables") + task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) + debug("done getting variables") + + # check to see if this task should be skipped, due to it being a member of a + # role which has already run (and whether that role allows duplicate execution) + if task._role and task._role.has_run(): + # If there is no metadata, the default behavior is to not allow duplicates, + # if there is metadata, check to see if the allow_duplicates flag was set to true + if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: + debug("'%s' skipped because role has already run" % task) + continue + + if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup': + debug("'%s' failed tag evaluation" % task) + continue + + if task.action == 'meta': + # meta tasks store their args in the _raw_params field of args, + # since they do not use k=v pairs, so get that + meta_action = task.args.get('_raw_params') + if meta_action == 'noop': + # FIXME: issue a callback for the noop here? + continue + elif meta_action == 'flush_handlers': + # FIXME: in the 'free' mode, flushing handlers should result in + # only those handlers notified for the host doing the flush + self.run_handlers(iterator, connection_info) + else: + raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) + + self._blocked_hosts[host_name] = False + else: + self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) + self._queue_task(host, task, task_vars, connection_info) + + # move on to the next host and make sure we + # haven't gone past the end of our hosts list + last_host += 1 + if last_host > len(hosts_left) - 1: + last_host = 0 + + # if we've looped around back to the start, break out + if last_host == starting_host: + break + + results = self._process_pending_results(iterator) + host_results.extend(results) # pause briefly so we don't spin lock time.sleep(0.05) try: - self._wait_for_pending_results() - except: + results = self._wait_on_pending_results(iterator) + host_results.extend(results) + except Exception, e: # FIXME: ctrl+c can cause some failures here, so catch them # with the appropriate error type + print("wtf: %s" % e) pass # run the base class run() method, which executes the cleanup function diff --git a/v2/samples/test_free.yml b/v2/samples/test_free.yml new file mode 100644 index 00000000000000..d5f8bcaac944c3 --- /dev/null +++ b/v2/samples/test_free.yml @@ -0,0 +1,10 @@ +- hosts: all + strategy: free + gather_facts: no + tasks: + - debug: msg="all hosts should print this" + - pause: seconds=5 + when: inventory_hostname == 'l2' + - pause: seconds=10 + when: inventory_hostname == 'l3' + - debug: msg="and we're done" diff --git a/v2/samples/test_pb.yml b/v2/samples/test_pb.yml index 3912d4566b293b..ab5b7ab2954f87 100644 --- a/v2/samples/test_pb.yml +++ b/v2/samples/test_pb.yml @@ -1,12 +1,7 @@ # will use linear strategy by default -- hosts: - - "{{hosts|default('all')}}" - #- ubuntu1404 - #- awxlocal - connection: ssh +- hosts: "{{hosts|default('all')}}" #gather_facts: false - #strategy: free - #serial: 3 + strategy: "{{strategy|default('linear')}}" vars: play_var: foo test_dict: @@ -15,14 +10,9 @@ vars_files: - testing/vars.yml tasks: - - block: - - debug: var=ansible_nodename - when: ansible_nodename == "ubuntu1404" - block: - debug: msg="in block for {{inventory_hostname}} ({{ansible_nodename}}), group_var is {{group_var}}, host var is {{host_var}}" notify: foo - - debug: msg="test dictionary is {{test_dict}}" - when: asdf is defined - command: hostname register: hostname_result - debug: msg="registered result is {{hostname_result.stdout}}" @@ -31,26 +21,18 @@ sudo_user: testing - assemble: src=./testing/ dest=/tmp/output.txt remote_src=no - copy: content="hello world\n" dest=/tmp/copy_content.out mode=600 - - command: /bin/false - retries: "{{num_retries|default(5)}}" - delay: 1 - - debug: msg="you shouldn't see me" + #- command: /bin/false + # retries: "{{num_retries|default(5)}}" + # delay: 1 + #- debug: msg="you shouldn't see me" rescue: - debug: msg="this is the rescue" - command: /bin/false - debug: msg="you should not see this rescue message" always: - debug: msg="this is the always block, it should always be seen" - - command: /bin/false - - debug: msg="you should not see this always message" - - #- debug: msg="linear task 01" - #- debug: msg="linear task 02" - #- debug: msg="linear task 03" - # with_items: - # - a - # - b - # - c + #- command: /bin/false + #- debug: msg="you should not see this always message" handlers: - name: foo @@ -58,13 +40,3 @@ - name: bar debug: msg="this is the bar handler, you should not see this" -#- hosts: all -# connection: local -# strategy: free -# tasks: -# - ping: -# - command: /bin/false -# - debug: msg="free task 01" -# - debug: msg="free task 02" -# - debug: msg="free task 03" - From bb3f50361e4c616e57550b15ed609738a7d00ae8 Mon Sep 17 00:00:00 2001 From: Mohamed Hazem Date: Sun, 5 Apr 2015 20:47:22 +0300 Subject: [PATCH 0256/3617] Replaced --start-at with --start-at-task --- docsite/rst/playbooks_startnstep.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_startnstep.rst b/docsite/rst/playbooks_startnstep.rst index 1067c3e121452b..106fd2d5de4109 100644 --- a/docsite/rst/playbooks_startnstep.rst +++ b/docsite/rst/playbooks_startnstep.rst @@ -8,9 +8,9 @@ This shows a few alternative ways to run playbooks. These modes are very useful Start-at-task ````````````` -If you want to start executing your playbook at a particular task, you can do so with the ``--start-at`` option:: +If you want to start executing your playbook at a particular task, you can do so with the ``--start-at-task`` option:: - ansible-playbook playbook.yml --start-at="install packages" + ansible-playbook playbook.yml --start-at-task="install packages" The above will start executing your playbook at a task named "install packages". From e79c9202602f123375dbbdeaef205ec10b74f597 Mon Sep 17 00:00:00 2001 From: Joost Molenaar Date: Tue, 19 Aug 2014 12:04:27 +0200 Subject: [PATCH 0257/3617] Add support for Arch to module_utils.basic.py Fixes ansible/ansible#8653 --- lib/ansible/module_utils/basic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index aaaf85e5e057e5..eeb64d972485ed 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -181,7 +181,8 @@ def get_distribution(): ''' return the distribution name ''' if platform.system() == 'Linux': try: - distribution = platform.linux_distribution()[0].capitalize() + supported_dists = platform._supported_dists + ('arch',) + distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize() if not distribution and os.path.isfile('/etc/system-release'): distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize() if 'Amazon' in distribution: From c7f33627950d352cdca46f71c93b0783981b8c89 Mon Sep 17 00:00:00 2001 From: Johannes 'fish' Ziemke Date: Mon, 6 Apr 2015 14:43:39 +0200 Subject: [PATCH 0258/3617] Replace - in ec2 inventory as well Dash (-) is not a variable ansible group name, so it needs to be replaced as well. --- plugins/inventory/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index e93df1053d1e53..76871b0266dba0 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -787,7 +787,7 @@ def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - return re.sub("[^A-Za-z0-9\-]", "_", word) + return re.sub("[^A-Za-z0-9\_]", "_", word) def json_format_dict(self, data, pretty=False): From 5150d83d01166b498af050c6806b83c94ed5e906 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Apr 2015 12:15:07 -0500 Subject: [PATCH 0259/3617] Fixing the version in lib/ --- lib/ansible/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index 27e79a41cadf52..200ecb79e361d6 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -14,5 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -__version__ = '1.9' +__version__ = '2.0' __author__ = 'Michael DeHaan' From 2244178c6da5faa5a235b1dfcf292521e8f6823c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Apr 2015 14:09:49 -0500 Subject: [PATCH 0260/3617] Updating debian packaging changelog for devel 2.0 version --- packaging/debian/changelog | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/packaging/debian/changelog b/packaging/debian/changelog index 843ca7f6f5ef50..aa03e724d07e3f 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -1,8 +1,14 @@ -ansible (1.9) unstable; urgency=low +ansible (2.0.0) unstable; urgency=low - * 1.9 release (PENDING) + * 2.0.0 (in progress) - -- Ansible, Inc. Wed, 21 Oct 2015 04:29:00 -0500 + -- Ansible, Inc. Fri, 01 Jan 2016 00:00:00 -0500 + +ansible (1.9.0.1) unstable; urgency=low + + * 1.9 release + + -- Ansible, Inc. Wed, 25 Mar 2015 17:00:00 -0500 ansible (1.8.4) unstable; urgency=low From 43775daa4bbcf6c02cfefa491250b1619701f1bf Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Mon, 6 Apr 2015 16:47:52 -0400 Subject: [PATCH 0261/3617] Fix indentation --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 40be989241f6d2..628d1dd267833c 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2452,7 +2452,7 @@ def get_virtual_facts(self): self.facts['virtualization_role'] = 'guest' return - if sys_vendor == 'oVirt': + if sys_vendor == 'oVirt': self.facts['virtualization_type'] = 'kvm' self.facts['virtualization_role'] = 'guest' return From f6c116a81fc19ed1470901b153a72b411b0e8cef Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Apr 2015 18:30:38 -0500 Subject: [PATCH 0262/3617] Updating version to contain the full major/release --- lib/ansible/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index 200ecb79e361d6..ba5ca83b7231d1 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -14,5 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -__version__ = '2.0' +__version__ = '2.0.0' __author__ = 'Michael DeHaan' From d732c94ac23be49e71df1410027b3f39f9d86b68 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 6 Apr 2015 22:31:55 -0400 Subject: [PATCH 0263/3617] a bunch of updates to connection info and related, to pass down passwords also now options populate required fields in required order allowing play to override added capture of debug in action plugins when stdout is not json --- v2/ansible/executor/connection_info.py | 77 +++++++++++++++-------- v2/ansible/executor/playbook_executor.py | 5 +- v2/ansible/executor/task_queue_manager.py | 5 +- v2/ansible/playbook/play.py | 2 +- v2/ansible/plugins/action/__init__.py | 6 +- v2/ansible/plugins/connections/local.py | 3 + v2/ansible/plugins/connections/ssh.py | 4 +- v2/bin/ansible | 3 +- v2/bin/ansible-playbook | 2 +- 9 files changed, 72 insertions(+), 35 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 165cd1245fb44c..19c8b130c72f66 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -38,34 +38,40 @@ class ConnectionInformation: connection/authentication information. ''' - def __init__(self, play=None, options=None): - # FIXME: implement the new methodology here for supporting - # various different auth escalation methods (becomes, etc.) + def __init__(self, play=None, options=None, passwords=None): - self.connection = C.DEFAULT_TRANSPORT + if passwords is None: + passwords = {} + + # connection + self.connection = None self.remote_addr = None - self.remote_user = 'root' - self.password = '' - self.port = 22 + self.remote_user = None + self.password = passwords.get('conn_pass','') + self.port = None self.private_key_file = None - self.verbosity = 0 - self.only_tags = set() - self.skip_tags = set() # privilege escalation - self.become = False - self.become_method = C.DEFAULT_BECOME_METHOD - self.become_user = '' - self.become_pass = '' + self.become = None + self.become_method = None + self.become_user = None + self.become_pass = passwords.get('become_pass','') + # general flags (should we move out?) + self.verbosity = 0 + self.only_tags = set() + self.skip_tags = set() self.no_log = False self.check_mode = False + #TODO: just pull options setup to above? + # set options before play to allow play to override them + if options: + self.set_options(options) + if play: self.set_play(play) - if options: - self.set_options(options) def __repr__(self): value = "CONNECTION INFO:\n" @@ -84,12 +90,18 @@ def set_play(self, play): if play.connection: self.connection = play.connection - self.remote_user = play.remote_user - self.password = '' - self.port = int(play.port) if play.port else 22 - self.become = play.become - self.become_method = play.become_method - self.become_user = play.become_user + if play.remote_user: + self.remote_user = play.remote_user + + if play.port: + self.port = int(play.port) + + if play.become is not None: + self.become = play.become + if play.become_method: + self.become_method = play.become_method + if play.become_user: + self.become_user = play.become_user self.become_pass = play.become_pass # non connection related @@ -103,15 +115,30 @@ def set_options(self, options): higher precedence than those set on the play or host. ''' - # FIXME: set other values from options here? - - self.verbosity = options.verbosity if options.connection: self.connection = options.connection + self.remote_user = options.remote_user + #if 'port' in options and options.port is not None: + # self.port = options.port + self.private_key_file = None + + # privilege escalation + self.become = options.become + self.become_method = options.become_method + self.become_user = options.become_user + self.become_pass = '' + + # general flags (should we move out?) + if options.verbosity: + self.verbosity = options.verbosity + #if options.no_log: + # self.no_log = boolean(options.no_log) if options.check: self.check_mode = boolean(options.check) + + # get the tag info from options, converting a comma-separated list # of values into a proper list if need be. We check to see if the # options have the attribute, as it is not always added via the CLI diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 6504fddfc8217a..40c0798b0034b4 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -36,18 +36,19 @@ class PlaybookExecutor: basis for bin/ansible-playbook operation. ''' - def __init__(self, playbooks, inventory, variable_manager, loader, display, options): + def __init__(self, playbooks, inventory, variable_manager, loader, display, options, conn_pass, become_pass): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._display = display self._options = options + self.passwords = {'conn_pass': conn_pass, 'become_pass': become_pass} if options.listhosts or options.listtasks or options.listtags: self._tqm = None else: - self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, display=display, options=options) + self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords) def run(self): diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index d0354786da9b3b..026726b3d8e02d 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -48,7 +48,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, callback, variable_manager, loader, display, options): + def __init__(self, inventory, callback, variable_manager, loader, display, options, passwords): self._inventory = inventory self._variable_manager = variable_manager @@ -56,6 +56,7 @@ def __init__(self, inventory, callback, variable_manager, loader, display, optio self._display = display self._options = options self._stats = AggregateStats() + self.passwords = passwords # a special flag to help us exit cleanly self._terminated = False @@ -144,7 +145,7 @@ def run(self, play): new_play = play.copy() new_play.post_validate(all_vars, fail_on_undefined=False) - connection_info = ConnectionInformation(new_play, self._options) + connection_info = ConnectionInformation(new_play, self._options, self.passwords) for callback_plugin in self._callback_plugins: if hasattr(callback_plugin, 'set_connection_info'): callback_plugin.set_connection_info(connection_info) diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index eeabfce062a4d6..33fd5efd9fa417 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -61,7 +61,7 @@ class Play(Base, Taggable, Become): _hosts = FieldAttribute(isa='list', default=[], required=True) _name = FieldAttribute(isa='string', default='') _port = FieldAttribute(isa='int', default=22) - _remote_user = FieldAttribute(isa='string', default='root') + _remote_user = FieldAttribute(isa='string') # Variable Attributes _vars = FieldAttribute(isa='dict', default=dict()) diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 2d258dd5250a9b..2f56c4df582eb6 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -415,7 +415,11 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ # FIXME: in error situations, the stdout may not contain valid data, so we # should check for bad rc codes better to catch this here if 'stdout' in res and res['stdout'].strip(): - data = json.loads(self._filter_leading_non_json_lines(res['stdout'])) + try: + data = json.loads(self._filter_leading_non_json_lines(res['stdout'])) + except ValueError: + # not valid json, lets try to capture error + data = {'traceback': res['stdout']} if 'parsed' in data and data['parsed'] == False: data['msg'] += res['stderr'] # pre-split stdout into lines, if stdout is in the data and there diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py index c847ee79d5d0ef..31d0b296e4aee1 100644 --- a/v2/ansible/plugins/connections/local.py +++ b/v2/ansible/plugins/connections/local.py @@ -37,6 +37,9 @@ def get_transport(self): def connect(self, port=None): ''' connect to the local host; nothing to do here ''' + + self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr) + return self def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index e233a704f987a4..e59311ead96df6 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -57,7 +57,7 @@ def get_transport(self): def connect(self): ''' connect to the remote host ''' - self._display.vvv("ESTABLISH CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr) + self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr) self._common_args = [] extra_args = C.ANSIBLE_SSH_ARGS @@ -99,7 +99,7 @@ def connect(self): self._common_args += ["-o", "KbdInteractiveAuthentication=no", "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey", "-o", "PasswordAuthentication=no"] - if self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]: + if self._connection_info.remote_user is not None and self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]: self._common_args += ["-o", "User="+self._connection_info.remote_user] # FIXME: figure out where this goes #self._common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout] diff --git a/v2/bin/ansible b/v2/bin/ansible index 7d2f01bc5c5e7c..9b3ccd38be673b 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -93,6 +93,7 @@ class Cli(object): normalize_become_options(options) (sshpass, becomepass, vault_pass) = ask_passwords(options) + passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } if options.vault_password_file: # read vault_pass from a file @@ -138,7 +139,7 @@ class Cli(object): # now create a task queue manager to execute the play try: display = Display() - tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, display=display, options=options) + tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords) result = tqm.run(play) tqm.cleanup() except AnsibleError: diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 79c2eed785df52..000a0b74c7a84c 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -127,7 +127,7 @@ def main(display, args): raise errors.AnsibleError("Specified --limit does not match any hosts") # create the playbook executor, which manages running the plays via a task queue manager - pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options) + pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, conn_pass=sshpass, become_pass=becomepass) results = pbex.run() From 7076298dc1eb03fbf6bea1fe5f58fcdc2a6b54e0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Apr 2015 22:27:14 -0500 Subject: [PATCH 0264/3617] Adding FIXME note to playbook executor code regarding password params --- v2/ansible/executor/playbook_executor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 40c0798b0034b4..20aad364766ba5 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -36,6 +36,8 @@ class PlaybookExecutor: basis for bin/ansible-playbook operation. ''' + # FIXME: passwords should not be passed in piecemeal like this, + # if they're just going to be stuck in a dict later. def __init__(self, playbooks, inventory, variable_manager, loader, display, options, conn_pass, become_pass): self._playbooks = playbooks self._inventory = inventory From faadb6830899138de2dfcfca3973a898c5ace3a2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 6 Apr 2015 23:37:32 -0400 Subject: [PATCH 0265/3617] backup_local now only tries to back up exising files, returns '' otherwise --- lib/ansible/module_utils/basic.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index aaaf85e5e057e5..54a1a9cfff7f88 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1297,14 +1297,18 @@ def sha256(self, filename): def backup_local(self, fn): '''make a date-marked backup of the specified file, return True or False on success or failure''' - # backups named basename-YYYY-MM-DD@HH:MM:SS~ - ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time())) - backupdest = '%s.%s' % (fn, ext) - try: - shutil.copy2(fn, backupdest) - except (shutil.Error, IOError), e: - self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e)) + backupdest = '' + if os.path.exists(fn): + # backups named basename-YYYY-MM-DD@HH:MM:SS~ + ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time())) + backupdest = '%s.%s' % (fn, ext) + + try: + shutil.copy2(fn, backupdest) + except (shutil.Error, IOError), e: + self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e)) + return backupdest def cleanup(self, tmpfile): From 9409cc74432e4841b469481ffb250ee4459ef2cc Mon Sep 17 00:00:00 2001 From: Kimmo Koskinen Date: Tue, 7 Apr 2015 14:26:42 +0300 Subject: [PATCH 0266/3617] Use codecs module while reading & writing json cache file --- lib/ansible/cache/jsonfile.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index b7d72c8d2e865d..93ee69903beff6 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -18,6 +18,7 @@ import os import time import errno +import codecs try: import simplejson as json @@ -57,7 +58,7 @@ def get(self, key): cachefile = "%s/%s" % (self._cache_dir, key) try: - f = open( cachefile, 'r') + f = codecs.open(cachefile, 'r', encoding='utf-8') except (OSError,IOError), e: utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) else: @@ -73,7 +74,7 @@ def set(self, key, value): cachefile = "%s/%s" % (self._cache_dir, key) try: - f = open(cachefile, 'w') + f = codecs.open(cachefile, 'w', encoding='utf-8') except (OSError,IOError), e: utils.warning("error while trying to read %s : %s" % (cachefile, str(e))) else: From b8a9d87f30c86b7737b3cf63c4de67fd8547ce0e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Apr 2015 08:22:56 -0500 Subject: [PATCH 0267/3617] Fixing the VERSION file to match the expected "version release" format --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index cd5ac039d67e0b..a4b5d82d9e5211 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0 +2.0.0 0.0.pre From 1cf911d5244bc15640823bfa59acd08c421d7940 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Apr 2015 09:54:19 -0500 Subject: [PATCH 0268/3617] Back-porting Makefile changes for version/release --- Makefile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 81e24efab367d5..636986028e8628 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,8 @@ PYTHON=python SITELIB = $(shell $(PYTHON) -c "from distutils.sysconfig import get_python_lib; print get_python_lib()") # VERSION file provides one place to update the software version -VERSION := $(shell cat VERSION) +VERSION := $(shell cat VERSION | cut -f1 -d' ') +RELEASE := $(shell cat VERSION | cut -f2 -d' ') # Get the branch information from git ifneq ($(shell which git),) @@ -53,7 +54,7 @@ DEBUILD_OPTS = --source-option="-I" DPUT_BIN ?= dput DPUT_OPTS ?= ifeq ($(OFFICIAL),yes) - DEB_RELEASE = 1ppa + DEB_RELEASE = $(RELEASE)ppa # Sign OFFICIAL builds using 'DEBSIGN_KEYID' # DEBSIGN_KEYID is required when signing ifneq ($(DEBSIGN_KEYID),) @@ -74,7 +75,7 @@ DEB_DIST ?= unstable RPMSPECDIR= packaging/rpm RPMSPEC = $(RPMSPECDIR)/ansible.spec RPMDIST = $(shell rpm --eval '%{?dist}') -RPMRELEASE = 1 +RPMRELEASE = $(RELEASE) ifneq ($(OFFICIAL),yes) RPMRELEASE = 0.git$(DATE) endif From 72457e4326b51cd6066dbdeea75755de0d1a4caf Mon Sep 17 00:00:00 2001 From: John Galt Date: Tue, 7 Apr 2015 12:19:37 -0400 Subject: [PATCH 0269/3617] Fixed typo --- plugins/inventory/ec2.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index 523a80ed8334bf..1866f0bf3d6c27 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -33,7 +33,7 @@ destination_variable = public_dns_name # has 'subnet_id' set, this variable is used. If the subnet is public, setting # this to 'ip_address' will return the public IP address. For instances in a # private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from with EC2. The key of an EC2 tag may optionally be used; however +# be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. vpc_destination_variable = ip_address From 665babdaab7fc5949cf319f66854711b1bc01a60 Mon Sep 17 00:00:00 2001 From: Mengdi Gao Date: Wed, 8 Apr 2015 14:19:45 +0800 Subject: [PATCH 0270/3617] Remove redundant whitespace. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 4751467b016857..4e10528b8c65cd 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -334,7 +334,7 @@ Here's an example handlers section:: handlers: - name: restart memcached - service: name=memcached state=restarted + service: name=memcached state=restarted - name: restart apache service: name=apache state=restarted From 3c9890a35893f63ff7ba61ba1795d3fa1fbaa8f6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Apr 2015 03:16:13 -0400 Subject: [PATCH 0271/3617] now in v2 everything passes a single passwords hash --- v2/ansible/executor/playbook_executor.py | 6 ++---- v2/bin/ansible-playbook | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 20aad364766ba5..8af19ed378fa3d 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -36,16 +36,14 @@ class PlaybookExecutor: basis for bin/ansible-playbook operation. ''' - # FIXME: passwords should not be passed in piecemeal like this, - # if they're just going to be stuck in a dict later. - def __init__(self, playbooks, inventory, variable_manager, loader, display, options, conn_pass, become_pass): + def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords): self._playbooks = playbooks self._inventory = inventory self._variable_manager = variable_manager self._loader = loader self._display = display self._options = options - self.passwords = {'conn_pass': conn_pass, 'become_pass': become_pass} + self.passwords = passwords if options.listhosts or options.listtasks or options.listtags: self._tqm = None diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 000a0b74c7a84c..d663e2e0a3fd4a 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -66,6 +66,7 @@ def main(display, args): if not options.listhosts and not options.listtasks and not options.listtags: normalize_become_options(options) (sshpass, becomepass, vault_pass) = ask_passwords(options) + passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } if options.vault_password_file: # read vault_pass from a file @@ -127,7 +128,7 @@ def main(display, args): raise errors.AnsibleError("Specified --limit does not match any hosts") # create the playbook executor, which manages running the plays via a task queue manager - pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, conn_pass=sshpass, become_pass=becomepass) + pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords) results = pbex.run() From e122236f55d8666a0ad5f9df7833597a1105beec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Apr 2015 03:18:13 -0400 Subject: [PATCH 0272/3617] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 04c34cfa02185a..5f58240d176a74 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 04c34cfa02185a8d74165f5bdc96371ec6df37a8 +Subproject commit 5f58240d176a74b8eb0da0b45cf60e498d11ab34 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 21fce8ac730346..4048de9c1e2333 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 21fce8ac730346b4e77427e3582553f2dc93c675 +Subproject commit 4048de9c1e2333aa7880b61f34af8cbdce5cbcec From 1c796543c9d9e46c0beefb9b3f6d22d4d97f875b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Apr 2015 03:30:21 -0400 Subject: [PATCH 0273/3617] fix for when calling bootinfo throws permmission errors (AIX) fixes https://github.com/ansible/ansible-modules-core/issues/1108 --- lib/ansible/module_utils/facts.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 628d1dd267833c..21bbc93d4d102a 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -172,9 +172,12 @@ def get_platform_facts(self): if self.facts['system'] == 'Linux': self.get_distribution_facts() elif self.facts['system'] == 'AIX': - rc, out, err = module.run_command("/usr/sbin/bootinfo -p") - data = out.split('\n') - self.facts['architecture'] = data[0] + try: + rc, out, err = module.run_command("/usr/sbin/bootinfo -p") + data = out.split('\n') + self.facts['architecture'] = data[0] + except: + self.facts['architectrure' = 'Not Available' elif self.facts['system'] == 'OpenBSD': self.facts['architecture'] = platform.uname()[5] From 3ae4ee9c52171d58068d90a6c11ad48ad86a8769 Mon Sep 17 00:00:00 2001 From: Niall Donegan Date: Wed, 8 Apr 2015 14:24:21 +0100 Subject: [PATCH 0274/3617] Updated outdated link to module directory. Core modules link updated and Extras link added. --- docsite/rst/common_return_values.rst | 4 +++- docsite/rst/developing_modules.rst | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docsite/rst/common_return_values.rst b/docsite/rst/common_return_values.rst index ff2b92b4af0a92..fe147c2dee027a 100644 --- a/docsite/rst/common_return_values.rst +++ b/docsite/rst/common_return_values.rst @@ -40,8 +40,10 @@ a stdout in the results it will append a stdout_lines which is just a list or th :doc:`modules` Learn about available modules - `GitHub modules directory `_ + `GitHub Core modules directory `_ Browse source of core modules + `Github Extras modules directory `_ + Browse source of extras modules. `Mailing List `_ Development mailing list `irc.freenode.net `_ diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 82edea9de894bd..3b563ee755f42f 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -474,8 +474,10 @@ This example allows the stat module to be called with fileinfo, making the follo Learn about developing plugins :doc:`developing_api` Learn about the Python API for playbook and task execution - `GitHub modules directory `_ + `GitHub Core modules directory `_ Browse source of core modules + `Github Extras modules directory `_ + Browse source of extras modules. `Mailing List `_ Development mailing list `irc.freenode.net `_ From a3b35ed1a6e46f2f63f08476400d94026d92e2b8 Mon Sep 17 00:00:00 2001 From: Erinn Looney-Triggs Date: Wed, 8 Apr 2015 20:33:38 -0600 Subject: [PATCH 0275/3617] Small change for FreeIPA < 4.0 compatibility. --- plugins/inventory/freeipa.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/freeipa.py b/plugins/inventory/freeipa.py index caf336239ccd8b..05a8dba356ad6f 100755 --- a/plugins/inventory/freeipa.py +++ b/plugins/inventory/freeipa.py @@ -13,7 +13,11 @@ def initialize(): api.bootstrap(context='cli') api.finalize() - api.Backend.xmlclient.connect() + try: + api.Backend.rpcclient.connect() + except AttributeError: + #FreeIPA < 4.0 compatibility + api.Backend.xmlclient.connect() return api From bbc05a2cf5d0c72c51f62d28b4565f6da2796c1d Mon Sep 17 00:00:00 2001 From: James Laska Date: Thu, 9 Apr 2015 09:30:24 -0400 Subject: [PATCH 0276/3617] Improve generation of debian changelog --- Makefile | 3 ++- packaging/debian/changelog | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 636986028e8628..e01e1a9713c6aa 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,7 @@ DEBUILD_BIN ?= debuild DEBUILD_OPTS = --source-option="-I" DPUT_BIN ?= dput DPUT_OPTS ?= +DEB_DATE := $(shell date +"%a, %d %b %Y %T %z") ifeq ($(OFFICIAL),yes) DEB_RELEASE = $(RELEASE)ppa # Sign OFFICIAL builds using 'DEBSIGN_KEYID' @@ -217,7 +218,7 @@ debian: sdist mkdir -p deb-build/$${DIST} ; \ tar -C deb-build/$${DIST} -xvf dist/$(NAME)-$(VERSION).tar.gz ; \ cp -a packaging/debian deb-build/$${DIST}/$(NAME)-$(VERSION)/ ; \ - sed -ie "s#^$(NAME) (\([^)]*\)) \([^;]*\);#ansible (\1-$(DEB_RELEASE)~$${DIST}) $${DIST};#" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \ + sed -ie "s|%VERSION%|$(VERSION)|g;s|%RELEASE%|$(DEB_RELEASE)|;s|%DIST%|$${DIST}|g;s|%DATE%|$(DEB_DATE)|g" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \ done deb: debian diff --git a/packaging/debian/changelog b/packaging/debian/changelog index aa03e724d07e3f..84bf7e770336c4 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -1,8 +1,9 @@ -ansible (2.0.0) unstable; urgency=low +ansible (%VERSION%-%RELEASE%~%DIST%) %DIST%; urgency=low - * 2.0.0 (in progress) + * %VERSION% release - -- Ansible, Inc. Fri, 01 Jan 2016 00:00:00 -0500 + -- Ansible, Inc. %DATE% +>>>>>>> Stashed changes ansible (1.9.0.1) unstable; urgency=low From 7f034a74d1c71907b407f00c9150850b35dba0d2 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Thu, 9 Apr 2015 13:29:38 -0400 Subject: [PATCH 0277/3617] Add -ExecutionPolicy Unrestricted back, was removed by #9602. --- lib/ansible/runner/shell_plugins/powershell.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/lib/ansible/runner/shell_plugins/powershell.py index 50b759ae63389e..850b380eddb997 100644 --- a/lib/ansible/runner/shell_plugins/powershell.py +++ b/lib/ansible/runner/shell_plugins/powershell.py @@ -57,7 +57,7 @@ def _build_file_cmd(cmd_parts, quote_args=True): '''Build command line to run a file, given list of file name plus args.''' if quote_args: cmd_parts = ['"%s"' % x for x in cmd_parts] - return ' '.join(['&'] + cmd_parts) + return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + cmd_parts) class ShellModule(object): From 5675982b0f64cbc3bf01eff63951d1302132c6d2 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Thu, 9 Apr 2015 13:36:58 -0400 Subject: [PATCH 0278/3617] Only try kerberos auth when username contains `@` and pass realm to pywinrm. Alternative to #10644, fixes #10577. --- lib/ansible/runner/connection_plugins/winrm.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py index 7a2d6d3318ddbb..eb02d743072b0d 100644 --- a/lib/ansible/runner/connection_plugins/winrm.py +++ b/lib/ansible/runner/connection_plugins/winrm.py @@ -90,13 +90,18 @@ def _winrm_connect(self): return _winrm_cache[cache_key] exc = None for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']: - if transport == 'kerberos' and not HAVE_KERBEROS: + if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user): continue + if transport == 'kerberos': + realm = self.user.split('@', 1)[1].strip() or None + else: + realm = None endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', '')) vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self.host) protocol = Protocol(endpoint, transport=transport, - username=self.user, password=self.password) + username=self.user, password=self.password, + realm=realm) try: protocol.send_message('') _winrm_cache[cache_key] = protocol From 7ba2950c5ae9c51226276c6da7acac9b99757f87 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Thu, 9 Apr 2015 13:45:21 -0400 Subject: [PATCH 0279/3617] Remove winrm connection cache (only useful when running against one host). Also fixes #10391. --- lib/ansible/runner/connection_plugins/winrm.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/runner/connection_plugins/winrm.py index eb02d743072b0d..b41a74c8e1f994 100644 --- a/lib/ansible/runner/connection_plugins/winrm.py +++ b/lib/ansible/runner/connection_plugins/winrm.py @@ -18,8 +18,6 @@ from __future__ import absolute_import import base64 -import hashlib -import imp import os import re import shlex @@ -44,10 +42,6 @@ except ImportError: pass -_winrm_cache = { - # 'user:pwhash@host:port': -} - def vvvvv(msg, host=None): verbose(msg, host=host, caplevel=4) @@ -84,10 +78,6 @@ def _winrm_connect(self): vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \ (self.user, port, self.host), host=self.host) netloc = '%s:%d' % (self.host, port) - cache_key = '%s:%s@%s:%d' % (self.user, hashlib.md5(self.password).hexdigest(), self.host, port) - if cache_key in _winrm_cache: - vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host) - return _winrm_cache[cache_key] exc = None for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']: if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user): @@ -104,7 +94,6 @@ def _winrm_connect(self): realm=realm) try: protocol.send_message('') - _winrm_cache[cache_key] = protocol return protocol except WinRMTransportError, exc: err_msg = str(exc) @@ -116,7 +105,6 @@ def _winrm_connect(self): if code == 401: raise errors.AnsibleError("the username/password specified for this server was incorrect") elif code == 411: - _winrm_cache[cache_key] = protocol return protocol vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host) continue From 944690118f824247ef2cb1a7db5c1f6a23f4254e Mon Sep 17 00:00:00 2001 From: Chris Church Date: Thu, 9 Apr 2015 15:51:43 -0400 Subject: [PATCH 0280/3617] Update windows documentation to indicate how to specify kerberos vs. basic auth. --- docsite/rst/intro_windows.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index d96478b0a267f0..00cd8af404f038 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -57,7 +57,7 @@ In group_vars/windows.yml, define the following inventory variables:: Notice that the ssh_port is not actually for SSH, but this is a holdover variable name from how Ansible is mostly an SSH-oriented system. Again, Windows management will not happen over SSH. -If you have installed the ``kerberos`` module, Ansible will first attempt Kerberos authentication. *This uses the principal you are authenticated to Kerberos with on the control machine and not the ``ansible_ssh_user`` specified above*. If that fails, either because you are not signed into Kerberos on the control machine or because the corresponding domain account on the remote host is not available, then Ansible will fall back to "plain" username/password authentication. +If you have installed the ``kerberos`` module and ``ansible_ssh_user`` contains ``@`` (e.g. ``username@realm``), Ansible will first attempt Kerberos authentication. *This method uses the principal you are authenticated to Kerberos with on the control machine and not ``ansible_ssh_user``*. If that fails, either because you are not signed into Kerberos on the control machine or because the corresponding domain account on the remote host is not available, then Ansible will fall back to "plain" username/password authentication. When using your playbook, don't forget to specify --ask-vault-pass to provide the password to unlock the file. From 79f9fbd50efc23217ef28184a09d685b51c39aee Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 9 Apr 2015 10:40:04 -0700 Subject: [PATCH 0281/3617] Reverse the error messages from jsonfile get and set --- lib/ansible/cache/jsonfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index 93ee69903beff6..9c45dc22fd7912 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -60,7 +60,7 @@ def get(self, key): try: f = codecs.open(cachefile, 'r', encoding='utf-8') except (OSError,IOError), e: - utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) + utils.warning("error while trying to read %s : %s" % (cachefile, str(e))) else: value = json.load(f) self._cache[key] = value @@ -76,7 +76,7 @@ def set(self, key, value): try: f = codecs.open(cachefile, 'w', encoding='utf-8') except (OSError,IOError), e: - utils.warning("error while trying to read %s : %s" % (cachefile, str(e))) + utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) else: f.write(utils.jsonify(value)) finally: From 2af6314f57676b88895ed88996cd71d6c33cb162 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 10 Apr 2015 04:01:18 -0700 Subject: [PATCH 0282/3617] Comment to clarify why we add one to the line and column recording --- v2/ansible/parsing/yaml/constructor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/v2/ansible/parsing/yaml/constructor.py b/v2/ansible/parsing/yaml/constructor.py index 97f9c71ef8bd44..d1a2a01bc28649 100644 --- a/v2/ansible/parsing/yaml/constructor.py +++ b/v2/ansible/parsing/yaml/constructor.py @@ -58,6 +58,7 @@ def construct_yaml_seq(self, node): def _node_position_info(self, node): # the line number where the previous token has ended (plus empty lines) + # Add one so that the first line is line 1 rather than line 0 column = node.start_mark.column + 1 line = node.start_mark.line + 1 From 652cd6cd5e60879cac3e74088930de1fc603cdda Mon Sep 17 00:00:00 2001 From: Jesse Rusak Date: Sat, 4 Apr 2015 16:37:14 -0400 Subject: [PATCH 0283/3617] Fix --force-handlers, and allow it in plays and ansible.cfg The --force-handlers command line argument was not correctly running handlers on hosts which had tasks that later failed. This corrects that, and also allows you to specify force_handlers in ansible.cfg or in a play. --- bin/ansible-playbook | 3 +- docsite/rst/intro_configuration.rst | 14 ++++++++++ docsite/rst/playbooks_error_handling.rst | 20 +++++++++++++ lib/ansible/constants.py | 2 ++ lib/ansible/playbook/__init__.py | 17 +++++------ lib/ansible/playbook/play.py | 8 ++++-- test/integration/Makefile | 14 ++++++++++ .../test_force_handlers/handlers/main.yml | 2 ++ .../roles/test_force_handlers/tasks/main.yml | 26 +++++++++++++++++ test/integration/test_force_handlers.yml | 28 +++++++++++++++++++ test/units/TestPlayVarsFiles.py | 1 + 11 files changed, 123 insertions(+), 12 deletions(-) create mode 100644 test/integration/roles/test_force_handlers/handlers/main.yml create mode 100644 test/integration/roles/test_force_handlers/tasks/main.yml create mode 100644 test/integration/test_force_handlers.yml diff --git a/bin/ansible-playbook b/bin/ansible-playbook index 118a0198e4293f..3d6e1f9f4029de 100755 --- a/bin/ansible-playbook +++ b/bin/ansible-playbook @@ -97,7 +97,8 @@ def main(args): help="one-step-at-a-time: confirm each task before running") parser.add_option('--start-at-task', dest='start_at', help="start the playbook at the task matching this name") - parser.add_option('--force-handlers', dest='force_handlers', action='store_true', + parser.add_option('--force-handlers', dest='force_handlers', + default=C.DEFAULT_FORCE_HANDLERS, action='store_true', help="run handlers even if a task fails") parser.add_option('--flush-cache', dest='flush_cache', action='store_true', help="clear the fact cache") diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 4cb1f3599486ec..a13f6c6ecd990d 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -252,6 +252,20 @@ This options forces color mode even when running without a TTY:: force_color = 1 +.. _force_handlers: + +force_handlers +============== + +.. versionadded:: 1.9.1 + +This option causes notified handlers to run on a host even if a failure occurs on that host:: + + force_handlers = True + +The default is False, meaning that handlers will not run if a failure has occurred on a host. +This can also be set per play or on the command line. See :doc:`_handlers_and_failure` for more details. + .. _forks: forks diff --git a/docsite/rst/playbooks_error_handling.rst b/docsite/rst/playbooks_error_handling.rst index 98ffb2860f3f41..ac573d86ba6ae1 100644 --- a/docsite/rst/playbooks_error_handling.rst +++ b/docsite/rst/playbooks_error_handling.rst @@ -29,6 +29,26 @@ write a task that looks like this:: Note that the above system only governs the failure of the particular task, so if you have an undefined variable used, it will still raise an error that users will need to address. +.. _handlers_and_failure: + +Handlers and Failure +```````````````````` + +.. versionadded:: 1.9.1 + +When a task fails on a host, handlers which were previously notified +will *not* be run on that host. This can lead to cases where an unrelated failure +can leave a host in an unexpected state. For example, a task could update +a configuration file and notify a handler to restart some service. If a +task later on in the same play fails, the service will not be restarted despite +the configuration change. + +You can change this behavior with the ``--force-handlers`` command-line option, +or by including ``force_handlers: True`` in a play, or ``force_handlers = True`` +in ansible.cfg. When handlers are forced, they will run when notified even +if a task fails on that host. (Note that certain errors could still prevent +the handler from running, such as a host becoming unreachable.) + .. _controlling_what_defines_failure: Controlling What Defines Failure diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 71efefdbc383da..089de5b7c5bf15 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -173,6 +173,8 @@ def shell_expand_path(path): DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) +DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) + RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index d58657012c625f..93804d123c8726 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -375,17 +375,17 @@ def _async_poll(self, poller, async_seconds, async_poll_interval): # ***************************************************** - def _trim_unavailable_hosts(self, hostlist=[]): + def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False): ''' returns a list of hosts that haven't failed and aren't dark ''' - return [ h for h in hostlist if (h not in self.stats.failures) and (h not in self.stats.dark)] + return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)] # ***************************************************** - def _run_task_internal(self, task): + def _run_task_internal(self, task, include_failed=False): ''' run a particular module step in a playbook ''' - hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts)) + hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed) self.inventory.restrict_to(hosts) runner = ansible.runner.Runner( @@ -493,7 +493,8 @@ def _run_task(self, play, task, is_handler): task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) # load up an appropriate ansible runner to run the task in parallel - results = self._run_task_internal(task) + include_failed = is_handler and play.force_handlers + results = self._run_task_internal(task, include_failed=include_failed) # if no hosts are matched, carry on hosts_remaining = True @@ -811,7 +812,7 @@ def _run_play(self, play): # if no hosts remain, drop out if not host_list: - if self.force_handlers: + if play.force_handlers: task_errors = True break else: @@ -821,7 +822,7 @@ def _run_play(self, play): # lift restrictions after each play finishes self.inventory.lift_also_restriction() - if task_errors and not self.force_handlers: + if task_errors and not play.force_handlers: # if there were failed tasks and handler execution # is not forced, quit the play with an error return False @@ -856,7 +857,7 @@ def run_handlers(self, play): play.max_fail_pct = 0 if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): host_list = None - if not host_list and not self.force_handlers: + if not host_list and not play.force_handlers: self.callbacks.on_no_hosts_remaining() return False diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 78f2f6d9ba8000..9fd8a86f4e4127 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -34,9 +34,10 @@ class Play(object): _pb_common = [ 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become', - 'become_method', 'become_user', 'environment', 'gather_facts', 'handlers', 'hosts', - 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', 'su_user', 'sudo', - 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', 'vault_password', + 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts', + 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', + 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', + 'vault_password', ] __slots__ = _pb_common + [ @@ -153,6 +154,7 @@ def __init__(self, playbook, ds, basedir, vault_password=None): self.accelerate_ipv6 = ds.get('accelerate_ipv6', False) self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) self.no_log = utils.boolean(ds.get('no_log', 'false')) + self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers)) # Fail out if user specifies conflicting privelege escalations if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')): diff --git a/test/integration/Makefile b/test/integration/Makefile index ac526cf752ecbc..6e2acec341d131 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -56,6 +56,20 @@ test_group_by: test_handlers: ansible-playbook test_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + # Not forcing, should only run on successful host + [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ] + # Forcing from command line + [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + # Forcing from command line, should only run later tasks on unfailed hosts + [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_TASK_. | sort | uniq | xargs)" = "CALLED_TASK_B CALLED_TASK_D CALLED_TASK_E" ] + # Forcing from command line, should call handlers even if all hosts fail + [ "$$(ansible-playbook test_force_handlers.yml --tags normal -i inventory.handlers --force-handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v -e fail_all=yes $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + # Forcing from ansible.cfg + [ "$$(ANSIBLE_FORCE_HANDLERS=true ansible-playbook --tags normal test_force_handlers.yml -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + # Forcing true in play + [ "$$(ansible-playbook test_force_handlers.yml --tags force_true_in_play -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_A CALLED_HANDLER_B" ] + # Forcing false in play, which overrides command line + [ "$$(ansible-playbook test_force_handlers.yml --force-handlers --tags force_false_in_play -i inventory.handlers -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | egrep -o CALLED_HANDLER_. | sort | uniq | xargs)" = "CALLED_HANDLER_B" ] test_hash: ANSIBLE_HASH_BEHAVIOUR=replace ansible-playbook test_hash.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -v -e '{"test_hash":{"extra_args":"this is an extra arg"}}' diff --git a/test/integration/roles/test_force_handlers/handlers/main.yml b/test/integration/roles/test_force_handlers/handlers/main.yml new file mode 100644 index 00000000000000..2cfb1ef7109201 --- /dev/null +++ b/test/integration/roles/test_force_handlers/handlers/main.yml @@ -0,0 +1,2 @@ +- name: echoing handler + command: echo CALLED_HANDLER_{{ inventory_hostname }} \ No newline at end of file diff --git a/test/integration/roles/test_force_handlers/tasks/main.yml b/test/integration/roles/test_force_handlers/tasks/main.yml new file mode 100644 index 00000000000000..a3948756d71704 --- /dev/null +++ b/test/integration/roles/test_force_handlers/tasks/main.yml @@ -0,0 +1,26 @@ +--- + +# We notify for A and B, and hosts B and C fail. +# When forcing, we expect A and B to run handlers +# When not forcing, we expect only B to run handlers + +- name: notify the handler for host A and B + shell: echo + notify: + - echoing handler + when: inventory_hostname == 'A' or inventory_hostname == 'B' + +- name: fail task for all + fail: msg="Fail All" + when: fail_all is defined and fail_all + +- name: fail task for A + fail: msg="Fail A" + when: inventory_hostname == 'A' + +- name: fail task for C + fail: msg="Fail C" + when: inventory_hostname == 'C' + +- name: echo after A and C have failed + command: echo CALLED_TASK_{{ inventory_hostname }} \ No newline at end of file diff --git a/test/integration/test_force_handlers.yml b/test/integration/test_force_handlers.yml new file mode 100644 index 00000000000000..a700da08f0be28 --- /dev/null +++ b/test/integration/test_force_handlers.yml @@ -0,0 +1,28 @@ +--- + +- name: test force handlers (default) + tags: normal + hosts: testgroup + gather_facts: False + connection: local + roles: + - { role: test_force_handlers } + +- name: test force handlers (set to true) + tags: force_true_in_play + hosts: testgroup + gather_facts: False + connection: local + force_handlers: True + roles: + - { role: test_force_handlers } + + +- name: test force handlers (set to false) + tags: force_false_in_play + hosts: testgroup + gather_facts: False + connection: local + force_handlers: False + roles: + - { role: test_force_handlers } diff --git a/test/units/TestPlayVarsFiles.py b/test/units/TestPlayVarsFiles.py index 497c3112ede0d4..9d42b73e8b6ed1 100644 --- a/test/units/TestPlayVarsFiles.py +++ b/test/units/TestPlayVarsFiles.py @@ -47,6 +47,7 @@ def __init__(self): self.transport = None self.only_tags = None self.skip_tags = None + self.force_handlers = None self.VARS_CACHE = {} self.SETUP_CACHE = {} self.inventory = FakeInventory() From 56f4bf44f53881162ec7a0f35526eaaa68fa9398 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Tue, 30 Sep 2014 11:52:05 -0400 Subject: [PATCH 0284/3617] Add integration tests for win_user module. --- .../roles/test_win_user/defaults/main.yml | 5 + .../test_win_user/files/lockout_user.ps1 | 17 + .../roles/test_win_user/tasks/main.yml | 400 ++++++++++++++++++ test/integration/test_winrm.yml | 1 + 4 files changed, 423 insertions(+) create mode 100644 test/integration/roles/test_win_user/defaults/main.yml create mode 100644 test/integration/roles/test_win_user/files/lockout_user.ps1 create mode 100644 test/integration/roles/test_win_user/tasks/main.yml diff --git a/test/integration/roles/test_win_user/defaults/main.yml b/test/integration/roles/test_win_user/defaults/main.yml new file mode 100644 index 00000000000000..c6a18ed3a30797 --- /dev/null +++ b/test/integration/roles/test_win_user/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +test_win_user_name: test_win_user +test_win_user_password: "T35Tus3rP@ssW0rd" +test_win_user_password2: "pa55wOrd4te5tU53R!" diff --git a/test/integration/roles/test_win_user/files/lockout_user.ps1 b/test/integration/roles/test_win_user/files/lockout_user.ps1 new file mode 100644 index 00000000000000..e15f13f3bf2b61 --- /dev/null +++ b/test/integration/roles/test_win_user/files/lockout_user.ps1 @@ -0,0 +1,17 @@ +trap +{ + Write-Error -ErrorRecord $_ + exit 1; +} + +$username = $args[0] +[void][system.reflection.assembly]::LoadWithPartialName('System.DirectoryServices.AccountManagement') +$pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $env:COMPUTERNAME +For ($i = 1; $i -le 10; $i++) { + try { + $pc.ValidateCredentials($username, 'b@DP@ssw0rd') + } + catch { + break + } +} diff --git a/test/integration/roles/test_win_user/tasks/main.yml b/test/integration/roles/test_win_user/tasks/main.yml new file mode 100644 index 00000000000000..ebe8c5da3e8edf --- /dev/null +++ b/test/integration/roles/test_win_user/tasks/main.yml @@ -0,0 +1,400 @@ +# test code for the win_user module +# (c) 2014, Chris Church + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: remove existing test user if present + win_user: name="{{ test_win_user_name }}" state="absent" + register: win_user_remove_result + +- name: check user removal result + assert: + that: + - "win_user_remove_result.name" + - "win_user_remove_result.state == 'absent'" + +- name: try to remove test user again + win_user: name="{{ test_win_user_name }}" state="absent" + register: win_user_remove_result_again + +- name: check user removal result again + assert: + that: + - "not win_user_remove_result_again|changed" + - "win_user_remove_result_again.name" + - "win_user_remove_result_again.msg" + - "win_user_remove_result.state == 'absent'" + +- name: test missing user with query state + win_user: name="{{ test_win_user_name }}" state="query" + register: win_user_missing_query_result + +- name: check missing query result + assert: + that: + - "not win_user_missing_query_result|changed" + - "win_user_missing_query_result.name" + - "win_user_missing_query_result.msg" + - "win_user_missing_query_result.state == 'absent'" + +- name: test create user + win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}" + register: win_user_create_result + +- name: check user creation result + assert: + that: + - "win_user_create_result|changed" + - "win_user_create_result.name == '{{ test_win_user_name }}'" + - "win_user_create_result.fullname == '{{ test_win_user_name }}'" + - "win_user_create_result.path" + - "win_user_create_result.state == 'present'" + +- name: update user full name and description + win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible" + register: win_user_update_result + +- name: check full name and description update result + assert: + that: + - "win_user_update_result|changed" + - "win_user_update_result.fullname == 'Test Ansible User'" + - "win_user_update_result.description == 'Test user account created by Ansible'" + +- name: update user full name and description again with same values + win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible" + register: win_user_update_result_again + +- name: check full name and description result again + assert: + that: + - "not win_user_update_result_again|changed" + - "win_user_update_result_again.fullname == 'Test Ansible User'" + - "win_user_update_result_again.description == 'Test user account created by Ansible'" + +- name: test again with no options or changes + win_user: name="{{ test_win_user_name }}" + register: win_user_nochange_result + +- name: check no changes result + assert: + that: + - "not win_user_nochange_result|changed" + +- name: test again with query state + win_user: name="{{ test_win_user_name }}" state="query" + register: win_user_query_result + +- name: check query result + assert: + that: + - "not win_user_query_result|changed" + - "win_user_query_result.state == 'present'" + - "win_user_query_result.name == '{{ test_win_user_name }}'" + - "win_user_query_result.fullname == 'Test Ansible User'" + - "win_user_query_result.description == 'Test user account created by Ansible'" + - "win_user_query_result.path" + - "win_user_query_result.sid" + - "win_user_query_result.groups == []" + +- name: change user password + win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password2 }}" + register: win_user_password_result + +- name: check password change result + assert: + that: + - "win_user_password_result|changed" + +- name: change user password again to same value + win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password2 }}" + register: win_user_password_result_again + +- name: check password change result again + assert: + that: + - "not win_user_password_result_again|changed" + +- name: check update_password=on_create for existing user + win_user: name="{{ test_win_user_name }}" password="ThisP@ssW0rdShouldNotBeUsed" update_password=on_create + register: win_user_nopasschange_result + +- name: check password change with on_create flag result + assert: + that: + - "not win_user_nopasschange_result|changed" + +- name: set password expired flag + win_user: name="{{ test_win_user_name }}" password_expired=yes + register: win_user_password_expired_result + +- name: check password expired result + assert: + that: + - "win_user_password_expired_result|changed" + - "win_user_password_expired_result.password_expired" + +- name: clear password expired flag + win_user: name="{{ test_win_user_name }}" password_expired=no + register: win_user_clear_password_expired_result + +- name: check clear password expired result + assert: + that: + - "win_user_clear_password_expired_result|changed" + - "not win_user_clear_password_expired_result.password_expired" + +- name: set password never expires flag + win_user: name="{{ test_win_user_name }}" password_never_expires=yes + register: win_user_password_never_expires_result + +- name: check password never expires result + assert: + that: + - "win_user_password_never_expires_result|changed" + - "win_user_password_never_expires_result.password_never_expires" + +- name: clear password never expires flag + win_user: name="{{ test_win_user_name }}" password_never_expires=no + register: win_user_clear_password_never_expires_result + +- name: check clear password never expires result + assert: + that: + - "win_user_clear_password_never_expires_result|changed" + - "not win_user_clear_password_never_expires_result.password_never_expires" + +- name: set user cannot change password flag + win_user: name="{{ test_win_user_name }}" user_cannot_change_password=yes + register: win_user_cannot_change_password_result + +- name: check user cannot change password result + assert: + that: + - "win_user_cannot_change_password_result|changed" + - "win_user_cannot_change_password_result.user_cannot_change_password" + +- name: clear user cannot change password flag + win_user: name="{{ test_win_user_name }}" user_cannot_change_password=no + register: win_user_can_change_password_result + +- name: check clear user cannot change password result + assert: + that: + - "win_user_can_change_password_result|changed" + - "not win_user_can_change_password_result.user_cannot_change_password" + +- name: set account disabled flag + win_user: name="{{ test_win_user_name }}" account_disabled=true + register: win_user_account_disabled_result + +- name: check account disabled result + assert: + that: + - "win_user_account_disabled_result|changed" + - "win_user_account_disabled_result.account_disabled" + +- name: clear account disabled flag + win_user: name="{{ test_win_user_name }}" account_disabled=false + register: win_user_clear_account_disabled_result + +- name: check clear account disabled result + assert: + that: + - "win_user_clear_account_disabled_result|changed" + - "not win_user_clear_account_disabled_result.account_disabled" + +- name: attempt to set account locked flag + win_user: name="{{ test_win_user_name }}" account_locked=yes + register: win_user_set_account_locked_result + ignore_errors: true + +- name: verify that attempting to set account locked flag fails + assert: + that: + - "win_user_set_account_locked_result|failed" + - "not win_user_set_account_locked_result|changed" + +- name: attempt to lockout test account + script: lockout_user.ps1 "{{ test_win_user_name }}" + +- name: get user to check if account locked flag is set + win_user: name="{{ test_win_user_name }}" state="query" + register: win_user_account_locked_result + +- name: clear account locked flag if set + win_user: name="{{ test_win_user_name }}" account_locked=no + register: win_user_clear_account_locked_result + when: "win_user_account_locked_result.account_locked" + +- name: check clear account lockout result if account was locked + assert: + that: + - "win_user_clear_account_locked_result|changed" + - "not win_user_clear_account_locked_result.account_locked" + when: "win_user_account_locked_result.account_locked" + +- name: assign test user to a group + win_user: name="{{ test_win_user_name }}" groups="Users" + register: win_user_replace_groups_result + +- name: check assign user to group result + assert: + that: + - "win_user_replace_groups_result|changed" + - "win_user_replace_groups_result.groups|length == 1" + - "win_user_replace_groups_result.groups[0]['name'] == 'Users'" + +- name: assign test user to the same group + win_user: + name: "{{ test_win_user_name }}" + groups: ["Users"] + register: win_user_replace_groups_again_result + +- name: check assign user to group again result + assert: + that: + - "not win_user_replace_groups_again_result|changed" + +- name: add user to another group + win_user: name="{{ test_win_user_name }}" groups="Power Users" groups_action="add" + register: win_user_add_groups_result + +- name: check add user to another group result + assert: + that: + - "win_user_add_groups_result|changed" + - "win_user_add_groups_result.groups|length == 2" + - "win_user_add_groups_result.groups[0]['name'] in ('Users', 'Power Users')" + - "win_user_add_groups_result.groups[1]['name'] in ('Users', 'Power Users')" + +- name: add user to another group again + win_user: + name: "{{ test_win_user_name }}" + groups: "Power Users" + groups_action: add + register: win_user_add_groups_again_result + +- name: check add user to another group again result + assert: + that: + - "not win_user_add_groups_again_result|changed" + +- name: remove user from a group + win_user: name="{{ test_win_user_name }}" groups="Users" groups_action="remove" + register: win_user_remove_groups_result + +- name: check remove user from group result + assert: + that: + - "win_user_remove_groups_result|changed" + - "win_user_remove_groups_result.groups|length == 1" + - "win_user_remove_groups_result.groups[0]['name'] == 'Power Users'" + +- name: remove user from a group again + win_user: + name: "{{ test_win_user_name }}" + groups: + - "Users" + groups_action: remove + register: win_user_remove_groups_again_result + +- name: check remove user from group again result + assert: + that: + - "not win_user_remove_groups_again_result|changed" + +- name: reassign test user to multiple groups + win_user: name="{{ test_win_user_name }}" groups="Users, Guests" groups_action="replace" + register: win_user_reassign_groups_result + +- name: check reassign user groups result + assert: + that: + - "win_user_reassign_groups_result|changed" + - "win_user_reassign_groups_result.groups|length == 2" + - "win_user_reassign_groups_result.groups[0]['name'] in ('Users', 'Guests')" + - "win_user_reassign_groups_result.groups[1]['name'] in ('Users', 'Guests')" + +- name: reassign test user to multiple groups again + win_user: + name: "{{ test_win_user_name }}" + groups: + - "Users" + - "Guests" + groups_action: replace + register: win_user_reassign_groups_again_result + +- name: check reassign user groups again result + assert: + that: + - "not win_user_reassign_groups_again_result|changed" + +- name: remove user from all groups + win_user: name="{{ test_win_user_name }}" groups="" + register: win_user_remove_all_groups_result + +- name: check remove user from all groups result + assert: + that: + - "win_user_remove_all_groups_result|changed" + - "win_user_remove_all_groups_result.groups|length == 0" + +- name: remove user from all groups again + win_user: + name: "{{ test_win_user_name }}" + groups: [] + register: win_user_remove_all_groups_again_result + +- name: check remove user from all groups again result + assert: + that: + - "not win_user_remove_all_groups_again_result|changed" + +- name: assign user to invalid group + win_user: name="{{ test_win_user_name }}" groups="Userz" + register: win_user_invalid_group_result + ignore_errors: true + +- name: check invalid group result + assert: + that: + - "win_user_invalid_group_result|failed" + - "win_user_invalid_group_result.msg" + +- name: remove test user when finished + win_user: name="{{ test_win_user_name }}" state="absent" + register: win_user_final_remove_result + +- name: check final user removal result + assert: + that: + - "win_user_final_remove_result|changed" + - "win_user_final_remove_result.name" + - "win_user_final_remove_result.msg" + - "win_user_final_remove_result.state == 'absent'" + +- name: test removed user with query state + win_user: name="{{ test_win_user_name }}" state="query" + register: win_user_removed_query_result + +- name: check removed query result + assert: + that: + - "not win_user_removed_query_result|changed" + - "win_user_removed_query_result.name" + - "win_user_removed_query_result.msg" + - "win_user_removed_query_result.state == 'absent'" diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml index e2a282e061f247..69d3b652a6f727 100644 --- a/test/integration/test_winrm.yml +++ b/test/integration/test_winrm.yml @@ -30,6 +30,7 @@ - { role: test_win_msi, tags: test_win_msi } - { role: test_win_service, tags: test_win_service } - { role: test_win_feature, tags: test_win_feature } + - { role: test_win_user, tags: test_win_user } - { role: test_win_file, tags: test_win_file } - { role: test_win_copy, tags: test_win_copy } - { role: test_win_template, tags: test_win_template } From 42bd640d143740f3d2613320ec7df67377a5f5a0 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 24 Nov 2014 00:44:45 -0500 Subject: [PATCH 0285/3617] Update win_user tests to set a group on user creation. --- test/integration/roles/test_win_user/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_win_user/tasks/main.yml b/test/integration/roles/test_win_user/tasks/main.yml index ebe8c5da3e8edf..0e22e332ae923a 100644 --- a/test/integration/roles/test_win_user/tasks/main.yml +++ b/test/integration/roles/test_win_user/tasks/main.yml @@ -51,7 +51,7 @@ - "win_user_missing_query_result.state == 'absent'" - name: test create user - win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}" + win_user: name="{{ test_win_user_name }}" password="{{ test_win_user_password }}" groups="Guests" register: win_user_create_result - name: check user creation result @@ -64,7 +64,7 @@ - "win_user_create_result.state == 'present'" - name: update user full name and description - win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible" + win_user: name="{{ test_win_user_name }}" fullname="Test Ansible User" description="Test user account created by Ansible" groups="" register: win_user_update_result - name: check full name and description update result From 0abcebf1e4763a7e3a1f81b1c8ea5a195de55064 Mon Sep 17 00:00:00 2001 From: Feanil Patel Date: Sat, 14 Mar 2015 16:26:48 -0400 Subject: [PATCH 0286/3617] Don't convert numbers and booleans to strings. Before this change if a variable was of type int or bool and the variable was referenced by another variable, the type would change to string. eg. defaults/main.yml ``` PORT: 4567 OTHER_CONFIG: secret1: "so_secret" secret2: "even_more_secret" CONFIG: hostname: "some_hostname" port: "{{ PORT }}" secrets: "{{ OTHER_CONFIG }}" ``` If you output `CONFIG` to json or yaml, the port would get represented in the output as a string instead of as a number, but secrets would get represented as a dictionary. This is a mis-match in behaviour where some "types" are retained and others are not. This change should fix the issue. Update template test to also test var retainment. Make the template changes in v2. Update to only short-circuit for booleans and numbers. Added an entry to the changelog. --- CHANGELOG.md | 5 +++- lib/ansible/utils/template.py | 30 +++++++++++++++---- .../roles/test_template/files/foo.txt | 7 +++++ .../roles/test_template/templates/foo.j2 | 2 ++ .../roles/test_template/vars/main.yml | 13 ++++++++ v2/ansible/template/__init__.py | 21 +++++++++++++ 6 files changed, 71 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06fe0504fc7ea4..69d7c3fd56aa4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,10 @@ Ansible Changes By Release ## 2.0 "TBD" - ACTIVE DEVELOPMENT Major Changes: - big_ip modules now support turning off ssl certificate validation (use only for self signed) + - big_ip modules now support turning off ssl certificate validation (use only for self signed) + + - template code now retains types for bools and Numbers instead of turning them into strings + - If you need the old behaviour, quote the value and it will get passed around as a string New Modules: cloudtrail diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 9426e254eb5826..5f712b2675ea9e 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -31,6 +31,7 @@ import pwd import ast import traceback +from numbers import Number from ansible.utils.string_functions import count_newlines_from_end from ansible.utils import to_bytes, to_unicode @@ -81,6 +82,11 @@ class Flags: FILTER_PLUGINS = None _LISTRE = re.compile(r"(\w+)\[(\d+)\]") + +# A regex for checking to see if a variable we're trying to +# expand is just a single variable name. +SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$") + JINJA2_OVERRIDE = '#jinja2:' JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline'] @@ -109,7 +115,6 @@ def lookup(name, *args, **kwargs): def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True): ''' templates a data structure by traversing it and substituting for other data structures ''' from ansible import utils - try: if convert_bare and isinstance(varname, basestring): first_part = varname.split(".")[0].split("[")[0] @@ -123,10 +128,13 @@ def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_ except errors.AnsibleError, e: raise errors.AnsibleError("Failed to template %s: %s" % (varname, str(e))) - if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["): - eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True) - if eval_results[1] is None: - varname = eval_results[0] + # template_from_string may return non strings for the case where the var is just + # a reference to a single variable, so we should re_check before we do further evals + if isinstance(varname, basestring): + if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["): + eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True) + if eval_results[1] is None: + varname = eval_results[0] return varname @@ -323,10 +331,20 @@ def my_finalize(thing): def template_from_string(basedir, data, vars, fail_on_undefined=False): ''' run a string through the (Jinja2) templating engine ''' - try: if type(data) == str: data = unicode(data, 'utf-8') + + # Check to see if the string we are trying to render is just referencing a single + # var. In this case we don't wont to accidentally change the type of the variable + # to a string by using the jinja template renderer. We just want to pass it. + only_one = SINGLE_VAR.match(data) + if only_one: + var_name = only_one.group(1) + if var_name in vars: + resolved_val = vars[var_name] + if isinstance(resolved_val, (bool, Number)): + return resolved_val def my_finalize(thing): return thing if thing is not None else '' diff --git a/test/integration/roles/test_template/files/foo.txt b/test/integration/roles/test_template/files/foo.txt index 3e96db9b3ec01e..edd704da048007 100644 --- a/test/integration/roles/test_template/files/foo.txt +++ b/test/integration/roles/test_template/files/foo.txt @@ -1 +1,8 @@ templated_var_loaded + +{ + "bool": true, + "multi_part": "1Foo", + "number": 5, + "string_num": "5" +} diff --git a/test/integration/roles/test_template/templates/foo.j2 b/test/integration/roles/test_template/templates/foo.j2 index 55aab8f1ea1435..22187f913004c3 100644 --- a/test/integration/roles/test_template/templates/foo.j2 +++ b/test/integration/roles/test_template/templates/foo.j2 @@ -1 +1,3 @@ {{ templated_var }} + +{{ templated_dict | to_nice_json }} diff --git a/test/integration/roles/test_template/vars/main.yml b/test/integration/roles/test_template/vars/main.yml index 1e8f64ccf4458a..b79f95e6cf16f7 100644 --- a/test/integration/roles/test_template/vars/main.yml +++ b/test/integration/roles/test_template/vars/main.yml @@ -1 +1,14 @@ templated_var: templated_var_loaded + +number_var: 5 +string_num: "5" +bool_var: true +part_1: 1 +part_2: "Foo" + +templated_dict: + number: "{{ number_var }}" + string_num: "{{ string_num }}" + bool: "{{ bool_var }}" + multi_part: "{{ part_1 }}{{ part_2 }}" + diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py index 46bbc06a07dd96..0345a750081cdd 100644 --- a/v2/ansible/template/__init__.py +++ b/v2/ansible/template/__init__.py @@ -32,8 +32,17 @@ from ansible.template.vars import AnsibleJ2Vars from ansible.utils.debug import debug +from numbers import Number + __all__ = ['Templar'] +# A regex for checking to see if a variable we're trying to +# expand is just a single variable name. +SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$") + +# Primitive Types which we don't want Jinja to convert to strings. +NON_TEMPLATED_TYPES = ( bool, Number ) + JINJA2_OVERRIDE = '#jinja2:' JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline'] @@ -125,6 +134,18 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals if isinstance(variable, basestring): result = variable if self._contains_vars(variable): + + # Check to see if the string we are trying to render is just referencing a single + # var. In this case we don't wont to accidentally change the type of the variable + # to a string by using the jinja template renderer. We just want to pass it. + only_one = SINGLE_VAR.match(variable) + if only_one: + var_name = only_one.group(1) + if var_name in self._available_vars: + resolved_val = self._available_vars[var_name] + if isinstance(resolved_val, NON_TEMPLATED_TYPES): + return resolved_val + result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines) # if this looks like a dictionary or list, convert it to such using the safe_eval method From e6b7b9206d16a9b446437e06957096ed242c0fc7 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Mon, 13 Apr 2015 23:45:09 +1000 Subject: [PATCH 0287/3617] Fixed changelog typos --- CHANGELOG.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 69d7c3fd56aa4a..256b3bafe28155 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,7 +45,7 @@ Major changes: For some use cases this can lead to dramatic improvements in startup time. * Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly. * Fix skipped tasks to not display their parameters if no_log is specified. -* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundries. +* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundaries. * Added travis integration to github for basic tests, this should speed up ticket triage and merging. * environment: directive now can also be applied to play and is inhertited by tasks, which can still override it. * expanded facts and OS/distribution support for existing facts and improved performance with pypy. @@ -162,7 +162,7 @@ Other Notable Changes: ## 1.8.3 "You Really Got Me" - Feb 17, 2015 -* Fixing a security bug related to the default permissions set on a tempoary file created when using "ansible-vault view ". +* Fixing a security bug related to the default permissions set on a temporary file created when using "ansible-vault view ". * Many bug fixes, for both core code and core modules. ## 1.8.2 "You Really Got Me" - Dec 04, 2014 @@ -450,7 +450,7 @@ Other notable changes: ## 1.5.4 "Love Walks In" - April 1, 2014 - Security fix for safe_eval, which further hardens the checking of the evaluation function. -- Changing order of variable precendence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host. +- Changing order of variable precedence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host. ## 1.5.3 "Love Walks In" - March 13, 2014 @@ -485,7 +485,7 @@ Major features/changes: * ec2 module now accepts 'exact_count' and 'count_tag' as a way to enforce a running number of nodes by tags. * all ec2 modules that work with Eucalyptus also now support a 'validate_certs' option, which can be set to 'off' for installations using self-signed certs. * Start of new integration test infrastructure (WIP, more details TBD) -* if repoquery is unavailble, the yum module will automatically attempt to install yum-utils +* if repoquery is unavailable, the yum module will automatically attempt to install yum-utils * ansible-vault: a framework for encrypting your playbooks and variable files * added support for privilege escalation via 'su' into bin/ansible and bin/ansible-playbook and associated keywords 'su', 'su_user', 'su_pass' for tasks/plays @@ -948,7 +948,7 @@ Bugfixes and Misc Changes: * misc fixes to the Riak module * make template module slightly more efficient * base64encode / decode filters are now available to templates -* libvirt module can now work with multiple different libvirt connecton URIs +* libvirt module can now work with multiple different libvirt connection URIs * fix for postgresql password escaping * unicode fix for shlex.split in some cases * apt module upgrade logic improved @@ -1153,7 +1153,7 @@ New playbook/language features: * task includes can now be of infinite depth * when_set and when_unset can take more than one var (when_set: $a and $b and $c) * added the with_sequence lookup plugin -* can override "connection:" on an indvidual task +* can override "connection:" on an individual task * parameterized playbook includes can now define complex variables (not just all on one line) * making inventory variables available for use in vars_files paths * messages when skipping plays are now more clear From 224fd0adfe8c977d55b0924ec558a51f59de4bab Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Apr 2015 10:10:32 -0400 Subject: [PATCH 0288/3617] added fleetctl entry for new inventory script to changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 256b3bafe28155..0211defbaa0f4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,9 @@ New Modules: vertica_schema vertica_user +New Inventory scripts: + fleetctl + Other Notable Changes: ## 1.9 "Dancing In the Street" - Mar 25, 2015 From 89cc54cc16c36c8c46b76a5c0f70afe9c86aa4b5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Apr 2015 10:49:31 -0400 Subject: [PATCH 0289/3617] typo fix --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 21bbc93d4d102a..a85f3fff0ef221 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -177,7 +177,7 @@ def get_platform_facts(self): data = out.split('\n') self.facts['architecture'] = data[0] except: - self.facts['architectrure' = 'Not Available' + self.facts['architectrure'] = 'Not Available' elif self.facts['system'] == 'OpenBSD': self.facts['architecture'] = platform.uname()[5] From 62c08d96e50ad7fd17da5b8b1396e7d168dc3f48 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Apr 2015 10:58:17 -0400 Subject: [PATCH 0290/3617] fixed another typo --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index a85f3fff0ef221..595629a7109759 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -177,7 +177,7 @@ def get_platform_facts(self): data = out.split('\n') self.facts['architecture'] = data[0] except: - self.facts['architectrure'] = 'Not Available' + self.facts['architecture'] = 'Not Available' elif self.facts['system'] == 'OpenBSD': self.facts['architecture'] = platform.uname()[5] From b193d327b616da2774ce4293aa52539fbd61b6ef Mon Sep 17 00:00:00 2001 From: Dorian Pula Date: Mon, 13 Apr 2015 12:17:07 -0400 Subject: [PATCH 0291/3617] Fix re import failure in templates module when running unit tests. --- v2/ansible/template/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py index 0345a750081cdd..4e15e83424c22b 100644 --- a/v2/ansible/template/__init__.py +++ b/v2/ansible/template/__init__.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import re + from jinja2 import Environment from jinja2.exceptions import TemplateSyntaxError, UndefinedError from jinja2.utils import concat as j2_concat From 6747f825476e7e82c315fbbef29794bc8d0026e6 Mon Sep 17 00:00:00 2001 From: ian Date: Mon, 13 Apr 2015 12:35:20 -0400 Subject: [PATCH 0292/3617] Change exceptions to python3 syntax. --- v2/ansible/playbook/base.py | 4 ++-- v2/ansible/plugins/__init__.py | 2 +- v2/ansible/plugins/action/__init__.py | 4 ++-- v2/ansible/plugins/action/copy.py | 6 +++--- v2/ansible/plugins/action/pause.py | 2 +- v2/ansible/plugins/action/template.py | 2 +- v2/ansible/plugins/connections/accelerate.py | 2 +- v2/ansible/plugins/connections/paramiko_ssh.py | 8 ++++---- v2/ansible/plugins/connections/winrm.py | 2 +- v2/ansible/plugins/lookup/csvfile.py | 4 ++-- v2/ansible/plugins/lookup/dnstxt.py | 2 +- v2/ansible/plugins/lookup/first_found.py | 2 +- v2/ansible/plugins/lookup/password.py | 4 ++-- v2/ansible/plugins/lookup/url.py | 4 ++-- v2/ansible/plugins/strategies/__init__.py | 2 +- v2/ansible/plugins/strategies/free.py | 2 +- v2/ansible/template/safe_eval.py | 4 ++-- v2/ansible/utils/hashing.py | 2 +- v2/ansible/utils/vault.py | 4 ++-- v2/ansible/vars/__init__.py | 2 +- v2/samples/multi.py | 4 ++-- v2/samples/multi_queues.py | 8 ++++---- 22 files changed, 38 insertions(+), 38 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index e834d3b729684f..c6a9d9a051396e 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -270,9 +270,9 @@ def post_validate(self, all_vars=dict(), fail_on_undefined=True): # and assign the massaged value back to the attribute field setattr(self, name, value) - except (TypeError, ValueError), e: + except (TypeError, ValueError) as e: raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) - except UndefinedError, e: + except UndefinedError as e: if fail_on_undefined: raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds()) diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index a55059f1b7b7bc..d16eecd3c39921 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -180,7 +180,7 @@ def find_plugin(self, name, suffixes=None): if os.path.isdir(path): try: full_paths = (os.path.join(path, f) for f in os.listdir(path)) - except OSError,e: + except OSError as e: d = Display() d.warning("Error accessing plugin paths: %s" % str(e)) for full_path in (f for f in full_paths if os.path.isfile(f)): diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 2f56c4df582eb6..0e98bbc5b75e59 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -122,7 +122,7 @@ def _early_needs_tmp_path(self): # FIXME: modified from original, needs testing? Since this is now inside # the action plugin, it should make it just this simple return getattr(self, 'TRANSFERS_FILES', False) - + def _late_needs_tmp_path(self, tmp, module_style): ''' Determines if a temp path is required after some early actions have already taken place. @@ -223,7 +223,7 @@ def _transfer_data(self, remote_path, data): #else: # data = data.encode('utf-8') afo.write(data) - except Exception, e: + except Exception as e: #raise AnsibleError("failure encoding into utf-8: %s" % str(e)) raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e)) diff --git a/v2/ansible/plugins/action/copy.py b/v2/ansible/plugins/action/copy.py index ece8b5b11b0973..6db130ad7f3a32 100644 --- a/v2/ansible/plugins/action/copy.py +++ b/v2/ansible/plugins/action/copy.py @@ -70,7 +70,7 @@ def run(self, tmp=None, task_vars=dict()): else: content_tempfile = self._create_content_tempfile(content) source = content_tempfile - except Exception, err: + except Exception as err: return dict(failed=True, msg="could not write content temp file: %s" % err) ############################################################################################### @@ -270,7 +270,7 @@ def run(self, tmp=None, task_vars=dict()): if module_return.get('changed') == True: changed = True - # the file module returns the file path as 'path', but + # the file module returns the file path as 'path', but # the copy module uses 'dest', so add it if it's not there if 'path' in module_return and 'dest' not in module_return: module_return['dest'] = module_return['path'] @@ -297,7 +297,7 @@ def _create_content_tempfile(self, content): content = to_bytes(content) try: f.write(content) - except Exception, err: + except Exception as err: os.remove(content_tempfile) raise Exception(err) finally: diff --git a/v2/ansible/plugins/action/pause.py b/v2/ansible/plugins/action/pause.py index 9c6075e1011fa2..c56e6654b1bb44 100644 --- a/v2/ansible/plugins/action/pause.py +++ b/v2/ansible/plugins/action/pause.py @@ -68,7 +68,7 @@ def run(self, tmp=None, task_vars=dict()): seconds = int(self._task.args['seconds']) duration_unit = 'seconds' - except ValueError, e: + except ValueError as e: return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e)) # Is 'prompt' a key in 'args'? diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py index 76b2e78a737d62..f82cbb376670eb 100644 --- a/v2/ansible/plugins/action/template.py +++ b/v2/ansible/plugins/action/template.py @@ -102,7 +102,7 @@ def run(self, tmp=None, task_vars=dict()): with open(source, 'r') as f: template_data = f.read() resultant = templar.template(template_data, preserve_trailing_newlines=True) - except Exception, e: + except Exception as e: return dict(failed=True, msg=type(e).__name__ + ": " + str(e)) local_checksum = checksum_s(resultant) diff --git a/v2/ansible/plugins/connections/accelerate.py b/v2/ansible/plugins/connections/accelerate.py index a31124e119f655..13012aa9299a86 100644 --- a/v2/ansible/plugins/connections/accelerate.py +++ b/v2/ansible/plugins/connections/accelerate.py @@ -140,7 +140,7 @@ def connect(self, allow_ssh=True): # shutdown, so we'll reconnect. wrong_user = True - except AnsibleError, e: + except AnsibleError as e: if allow_ssh: if "WRONG_USER" in e: vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host) diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/v2/ansible/plugins/connections/paramiko_ssh.py index 4bb06e01c36147..81470f657c8c24 100644 --- a/v2/ansible/plugins/connections/paramiko_ssh.py +++ b/v2/ansible/plugins/connections/paramiko_ssh.py @@ -170,7 +170,7 @@ def _connect_uncached(self): key_filename=key_filename, password=self.password, timeout=self.runner.timeout, port=self.port) - except Exception, e: + except Exception as e: msg = str(e) if "PID check failed" in msg: @@ -197,7 +197,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable self.ssh.get_transport().set_keepalive(5) chan = self.ssh.get_transport().open_session() - except Exception, e: + except Exception as e: msg = "Failed to open session" if len(str(e)) > 0: @@ -284,7 +284,7 @@ def put_file(self, in_path, out_path): try: self.sftp = self.ssh.open_sftp() - except Exception, e: + except Exception as e: raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e) try: @@ -308,7 +308,7 @@ def fetch_file(self, in_path, out_path): try: self.sftp = self._connect_sftp() - except Exception, e: + except Exception as e: raise errors.AnsibleError("failed to open a SFTP connection (%s)", e) try: diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py index d6e51710b5f27a..57d26ce61880a2 100644 --- a/v2/ansible/plugins/connections/winrm.py +++ b/v2/ansible/plugins/connections/winrm.py @@ -147,7 +147,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable cmd_parts = powershell._encode_script(script, as_list=True) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) - except Exception, e: + except Exception as e: traceback.print_exc() raise errors.AnsibleError("failed to exec cmd %s" % cmd) return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8')) diff --git a/v2/ansible/plugins/lookup/csvfile.py b/v2/ansible/plugins/lookup/csvfile.py index 87757399ce5b79..e5fb9a451213c0 100644 --- a/v2/ansible/plugins/lookup/csvfile.py +++ b/v2/ansible/plugins/lookup/csvfile.py @@ -33,7 +33,7 @@ def read_csv(self, filename, key, delimiter, dflt=None, col=1): for row in creader: if row[0] == key: return row[int(col)] - except Exception, e: + except Exception as e: raise AnsibleError("csvfile: %s" % str(e)) return dflt @@ -61,7 +61,7 @@ def run(self, terms, variables=None, **kwargs): name, value = param.split('=') assert(name in paramvals) paramvals[name] = value - except (ValueError, AssertionError), e: + except (ValueError, AssertionError) as e: raise AnsibleError(e) if paramvals['delimiter'] == 'TAB': diff --git a/v2/ansible/plugins/lookup/dnstxt.py b/v2/ansible/plugins/lookup/dnstxt.py index 7100f8d96dfc39..75222927c79b7d 100644 --- a/v2/ansible/plugins/lookup/dnstxt.py +++ b/v2/ansible/plugins/lookup/dnstxt.py @@ -59,7 +59,7 @@ def run(self, terms, variables=None, **kwargs): string = 'NXDOMAIN' except dns.resolver.Timeout: string = '' - except dns.exception.DNSException, e: + except dns.exception.DNSException as e: raise AnsibleError("dns.resolver unhandled exception", e) ret.append(''.join(string)) diff --git a/v2/ansible/plugins/lookup/first_found.py b/v2/ansible/plugins/lookup/first_found.py index 0ed268801508e2..b1d655b81147b5 100644 --- a/v2/ansible/plugins/lookup/first_found.py +++ b/v2/ansible/plugins/lookup/first_found.py @@ -177,7 +177,7 @@ def run(self, terms, variables, **kwargs): for fn in total_search: try: fn = templar.template(fn) - except (AnsibleUndefinedVariable, UndefinedError), e: + except (AnsibleUndefinedVariable, UndefinedError) as e: continue if os.path.isabs(fn) and os.path.exists(fn): diff --git a/v2/ansible/plugins/lookup/password.py b/v2/ansible/plugins/lookup/password.py index 6e13410e1ab67e..7e812a38c5f6b4 100644 --- a/v2/ansible/plugins/lookup/password.py +++ b/v2/ansible/plugins/lookup/password.py @@ -85,7 +85,7 @@ def run(self, terms, variables, **kwargs): paramvals['chars'] = use_chars else: paramvals[name] = value - except (ValueError, AssertionError), e: + except (ValueError, AssertionError) as e: raise AnsibleError(e) length = paramvals['length'] @@ -99,7 +99,7 @@ def run(self, terms, variables, **kwargs): if not os.path.isdir(pathdir): try: os.makedirs(pathdir, mode=0700) - except OSError, e: + except OSError as e: raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e))) chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'') diff --git a/v2/ansible/plugins/lookup/url.py b/v2/ansible/plugins/lookup/url.py index c907bfbce3965a..1b9c5c0d808d48 100644 --- a/v2/ansible/plugins/lookup/url.py +++ b/v2/ansible/plugins/lookup/url.py @@ -31,10 +31,10 @@ def run(self, terms, inject=None, **kwargs): try: r = urllib2.Request(term) response = urllib2.urlopen(r) - except URLError, e: + except URLError as e: utils.warnings("Failed lookup url for %s : %s" % (term, str(e))) continue - except HTTPError, e: + except HTTPError as e: utils.warnings("Recieved HTTP error for %s : %s" % (term, str(e))) continue diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index afbc373f4f3332..c5b3dd0f066731 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -109,7 +109,7 @@ def _queue_task(self, host, task, task_vars, connection_info): self._pending_results += 1 main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, module_loader), block=False) - except (EOFError, IOError, AssertionError), e: + except (EOFError, IOError, AssertionError) as e: # most likely an abort debug("got an error while queuing: %s" % e) return diff --git a/v2/ansible/plugins/strategies/free.py b/v2/ansible/plugins/strategies/free.py index 4fd8a132018ca3..d0506d37ddab59 100644 --- a/v2/ansible/plugins/strategies/free.py +++ b/v2/ansible/plugins/strategies/free.py @@ -139,7 +139,7 @@ def run(self, iterator, connection_info): try: results = self._wait_on_pending_results(iterator) host_results.extend(results) - except Exception, e: + except Exception as e: # FIXME: ctrl+c can cause some failures here, so catch them # with the appropriate error type print("wtf: %s" % e) diff --git a/v2/ansible/template/safe_eval.py b/v2/ansible/template/safe_eval.py index ba377054d7ad2e..c52ef398d76b76 100644 --- a/v2/ansible/template/safe_eval.py +++ b/v2/ansible/template/safe_eval.py @@ -105,13 +105,13 @@ def generic_visit(self, node, inside_call=False): return (result, None) else: return result - except SyntaxError, e: + except SyntaxError as e: # special handling for syntax errors, we just return # the expression string back as-is if include_exceptions: return (expr, None) return expr - except Exception, e: + except Exception as e: if include_exceptions: return (expr, e) return expr diff --git a/v2/ansible/utils/hashing.py b/v2/ansible/utils/hashing.py index 0b2edd434bc544..2c7dd534fcb28c 100644 --- a/v2/ansible/utils/hashing.py +++ b/v2/ansible/utils/hashing.py @@ -64,7 +64,7 @@ def secure_hash(filename, hash_func=sha1): digest.update(block) block = infile.read(blocksize) infile.close() - except IOError, e: + except IOError as e: raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) return digest.hexdigest() diff --git a/v2/ansible/utils/vault.py b/v2/ansible/utils/vault.py index 04634aa377b498..5c704afac59b2b 100644 --- a/v2/ansible/utils/vault.py +++ b/v2/ansible/utils/vault.py @@ -40,7 +40,7 @@ def read_vault_file(vault_password_file): try: # STDERR not captured to make it easier for users to prompt for input in their scripts p = subprocess.Popen(this_path, stdout=subprocess.PIPE) - except OSError, e: + except OSError as e: raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) stdout, stderr = p.communicate() vault_pass = stdout.strip('\r\n') @@ -49,7 +49,7 @@ def read_vault_file(vault_password_file): f = open(this_path, "rb") vault_pass=f.read().strip() f.close() - except (OSError, IOError), e: + except (OSError, IOError) as e: raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) return vault_pass diff --git a/v2/ansible/vars/__init__.py b/v2/ansible/vars/__init__.py index eb75d9c9929b8a..183116ea2d84fe 100644 --- a/v2/ansible/vars/__init__.py +++ b/v2/ansible/vars/__init__.py @@ -243,7 +243,7 @@ def _load_inventory_file(self, path, loader): try: names = loader.list_directory(path) - except os.error, err: + except os.error as err: raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror)) # evaluate files in a stable order rather than whatever diff --git a/v2/samples/multi.py b/v2/samples/multi.py index ca4c8b68f744e4..dce61430594bfb 100644 --- a/v2/samples/multi.py +++ b/v2/samples/multi.py @@ -59,10 +59,10 @@ def _read_worker_result(cur_worker): time.sleep(0.01) continue pipe.send(result) - except (IOError, EOFError, KeyboardInterrupt), e: + except (IOError, EOFError, KeyboardInterrupt) as e: debug("got a breaking error: %s" % e) break - except Exception, e: + except Exception as e: debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e) traceback.print_exc() break diff --git a/v2/samples/multi_queues.py b/v2/samples/multi_queues.py index 8eb80366076476..9e8f22b9a945bc 100644 --- a/v2/samples/multi_queues.py +++ b/v2/samples/multi_queues.py @@ -55,10 +55,10 @@ def _read_worker_result(cur_worker): time.sleep(0.01) continue final_q.put(result, block=False) - except (IOError, EOFError, KeyboardInterrupt), e: + except (IOError, EOFError, KeyboardInterrupt) as e: debug("got a breaking error: %s" % e) break - except Exception, e: + except Exception as e: debug("EXCEPTION DURING RESULTS PROCESSING: %s" % e) traceback.print_exc() break @@ -77,10 +77,10 @@ def worker(main_q, res_q, loader): time.sleep(0.01) except Queue.Empty: pass - except (IOError, EOFError, KeyboardInterrupt), e: + except (IOError, EOFError, KeyboardInterrupt) as e: debug("got a breaking error: %s" % e) break - except Exception, e: + except Exception as e: debug("EXCEPTION DURING WORKER PROCESSING: %s" % e) traceback.print_exc() break From b407dd8b58258379b824721c193ca005deeb3a19 Mon Sep 17 00:00:00 2001 From: Dorian Pula Date: Mon, 13 Apr 2015 13:34:48 -0400 Subject: [PATCH 0293/3617] Add setup.py for v2 to allow for pip editable installs. --- v2/setup.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 v2/setup.py diff --git a/v2/setup.py b/v2/setup.py new file mode 100644 index 00000000000000..a9a518798188ea --- /dev/null +++ b/v2/setup.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +import sys + +from ansible import __version__ +try: + from setuptools import setup, find_packages +except ImportError: + print("Ansible now needs setuptools in order to build. Install it using" + " your package manager (usually python-setuptools) or via pip (pip" + " install setuptools).") + sys.exit(1) + +setup(name='ansible', + version=__version__, + description='Radically simple IT automation', + author='Michael DeHaan', + author_email='michael@ansible.com', + url='http://ansible.com/', + license='GPLv3', + install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'], + # package_dir={ '': 'lib' }, + # packages=find_packages('lib'), + package_data={ + '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'], + }, + scripts=[ + 'bin/ansible', + 'bin/ansible-playbook', + # 'bin/ansible-pull', + # 'bin/ansible-doc', + # 'bin/ansible-galaxy', + # 'bin/ansible-vault', + ], + data_files=[], +) From 5f1ba589a5a27d0379e8154293ba19964ac60e8f Mon Sep 17 00:00:00 2001 From: Timothy Sutton Date: Mon, 13 Apr 2015 13:38:11 -0400 Subject: [PATCH 0294/3617] Git integration test: remove test for ambiguous .git/branches dir - '.git/branches' does not always exist, but the git integration tests always checks for this directory's existence so it always fails - more info: - http://stackoverflow.com/questions/10398225/what-is-the-git-branches-folder-used-for --- test/integration/roles/test_git/tasks/main.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml index 4bdc1d8bd870fd..831db8ea698803 100644 --- a/test/integration/roles/test_git/tasks/main.yml +++ b/test/integration/roles/test_git/tasks/main.yml @@ -65,16 +65,11 @@ stat: path={{ checkout_dir }}/.git/HEAD register: head -- name: check for remotes - stat: path={{ checkout_dir }}/.git/branches - register: branches - - name: assert presence of tags/trunk/branches assert: that: - "tags.stat.isdir" - "head.stat.isreg" - - "branches.stat.isdir" - name: verify on a reclone things are marked unchanged assert: From 3504f1cad96f781c3ebf5bb8d50b6bed1df13d15 Mon Sep 17 00:00:00 2001 From: Dorian Pula Date: Mon, 13 Apr 2015 13:44:58 -0400 Subject: [PATCH 0295/3617] Add test requirements for working with v2. --- v2/test-requirements.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 v2/test-requirements.txt diff --git a/v2/test-requirements.txt b/v2/test-requirements.txt new file mode 100644 index 00000000000000..97a75d3cb5c767 --- /dev/null +++ b/v2/test-requirements.txt @@ -0,0 +1,11 @@ +# Ansible requirementss +paramiko +PyYAML +jinja2 +httplib2 +passlib + +# Test requirements +unittest2 +mock +nose From 87dde862bd5b93900a3f1db1d99962f89e160705 Mon Sep 17 00:00:00 2001 From: eroldan Date: Mon, 13 Apr 2015 16:21:08 -0300 Subject: [PATCH 0296/3617] Fixed wrong example of 'environment' for setting PATH --- docsite/rst/faq.rst | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 1b499c547406bb..ba3ae1264ffd3d 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -3,15 +3,17 @@ Frequently Asked Questions Here are some commonly-asked questions and their answers. -.. _users_and_ports: +.. _set_environment: -If you are looking to set environment variables remotely for your project (in a task, not locally for Ansible) -The keyword is simply `environment` +How can I set the PATH or any other environment variable for a task or entire playbook? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Setting environment variables can be done with the `environment` keyword. It can be used at task or playbook level:: + + environment: + PATH: {{ ansible_env.PATH }}:/thingy/bin + SOME: value -``` - environment: - PATH:$PATH:/thingy/bin -``` How do I handle different machines needing different user accounts or ports to log in with? From 1bdf0bb0d67849d96aa1b29713af6643e35d148f Mon Sep 17 00:00:00 2001 From: ian Date: Mon, 13 Apr 2015 15:37:25 -0400 Subject: [PATCH 0297/3617] Several more changes to suport python3 syntax. --- v2/ansible/plugins/action/__init__.py | 2 +- v2/ansible/plugins/lookup/password.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 0e98bbc5b75e59..be83539def6ddb 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import StringIO +from six.moves import StringIO import json import os import random diff --git a/v2/ansible/plugins/lookup/password.py b/v2/ansible/plugins/lookup/password.py index 7e812a38c5f6b4..74017eff619948 100644 --- a/v2/ansible/plugins/lookup/password.py +++ b/v2/ansible/plugins/lookup/password.py @@ -98,7 +98,7 @@ def run(self, terms, variables, **kwargs): pathdir = os.path.dirname(path) if not os.path.isdir(pathdir): try: - os.makedirs(pathdir, mode=0700) + os.makedirs(pathdir, mode=0o700) except OSError as e: raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e))) @@ -111,7 +111,7 @@ def run(self, terms, variables, **kwargs): else: content = password with open(path, 'w') as f: - os.chmod(path, 0600) + os.chmod(path, 0o600) f.write(content + '\n') else: content = open(path).read().rstrip() @@ -129,12 +129,12 @@ def run(self, terms, variables, **kwargs): salt = self.random_salt() content = '%s salt=%s' % (password, salt) with open(path, 'w') as f: - os.chmod(path, 0600) + os.chmod(path, 0o600) f.write(content + '\n') # crypt not requested, remove salt if present elif (encrypt is None and salt): with open(path, 'w') as f: - os.chmod(path, 0600) + os.chmod(path, 0o600) f.write(password + '\n') if encrypt: From 3a8088fe3009e2ef29a33517c6a787c27098041c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 13 Apr 2015 12:57:17 -0700 Subject: [PATCH 0298/3617] _available_vars in v1 == _available_variables in v2 --- v2/ansible/template/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py index 4e15e83424c22b..6c41ad3cf40697 100644 --- a/v2/ansible/template/__init__.py +++ b/v2/ansible/template/__init__.py @@ -143,8 +143,8 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals only_one = SINGLE_VAR.match(variable) if only_one: var_name = only_one.group(1) - if var_name in self._available_vars: - resolved_val = self._available_vars[var_name] + if var_name in self._available_variables: + resolved_val = self._available_variables[var_name] if isinstance(resolved_val, NON_TEMPLATED_TYPES): return resolved_val From 3bb7b0eef309dbac7ca97ae7fa54213950e86ac8 Mon Sep 17 00:00:00 2001 From: ian Date: Mon, 13 Apr 2015 16:03:02 -0400 Subject: [PATCH 0299/3617] Import StringIO from six in a couple more places. --- v2/ansible/executor/module_common.py | 2 +- v2/test/parsing/yaml/test_loader.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/executor/module_common.py b/v2/ansible/executor/module_common.py index 23890d64e61a69..535fbd45e335aa 100644 --- a/v2/ansible/executor/module_common.py +++ b/v2/ansible/executor/module_common.py @@ -21,7 +21,7 @@ __metaclass__ = type # from python and deps -from cStringIO import StringIO +from six.moves import StringIO import json import os import shlex diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py index 9a4746b99dfeab..d393d72a0054a0 100644 --- a/v2/test/parsing/yaml/test_loader.py +++ b/v2/test/parsing/yaml/test_loader.py @@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from StringIO import StringIO +from six.moves import StringIO from collections import Sequence, Set, Mapping from ansible.compat.tests import unittest From 3d2a056ad4e748eb22d51ce73f94d3cb53092776 Mon Sep 17 00:00:00 2001 From: Amandine Lee Date: Mon, 13 Apr 2015 13:28:01 -0700 Subject: [PATCH 0300/3617] Import futures including print --- v2/ansible/executor/playbook_executor.py | 2 +- v2/ansible/inventory/__init__.py | 3 +++ v2/ansible/inventory/dir.py | 2 ++ v2/ansible/inventory/expand_hosts.py | 3 +++ v2/ansible/inventory/group.py | 2 ++ v2/ansible/inventory/ini.py | 2 ++ v2/ansible/inventory/script.py | 2 ++ v2/ansible/inventory/vars_plugins/noop.py | 2 ++ v2/ansible/parsing/utils/jsonify.py | 21 ++++++++++++++++++- v2/ansible/playbook/helpers.py | 2 ++ v2/ansible/playbook/play.py | 2 +- v2/ansible/plugins/action/assemble.py | 2 ++ v2/ansible/plugins/action/assert.py | 2 ++ v2/ansible/plugins/action/async.py | 2 ++ v2/ansible/plugins/action/debug.py | 2 ++ v2/ansible/plugins/action/fail.py | 2 ++ v2/ansible/plugins/action/fetch.py | 2 ++ v2/ansible/plugins/action/group_by.py | 2 ++ v2/ansible/plugins/action/include_vars.py | 2 ++ v2/ansible/plugins/action/normal.py | 2 ++ v2/ansible/plugins/action/pause.py | 2 ++ v2/ansible/plugins/action/raw.py | 2 ++ v2/ansible/plugins/action/script.py | 2 ++ v2/ansible/plugins/action/set_fact.py | 2 ++ v2/ansible/plugins/action/synchronize.py | 2 ++ v2/ansible/plugins/action/template.py | 2 ++ v2/ansible/plugins/action/unarchive.py | 2 ++ v2/ansible/plugins/cache/__init__.py | 2 ++ v2/ansible/plugins/cache/base.py | 2 ++ v2/ansible/plugins/cache/memcached.py | 2 ++ v2/ansible/plugins/cache/memory.py | 2 ++ v2/ansible/plugins/cache/redis.py | 4 ++-- v2/ansible/plugins/connections/accelerate.py | 2 ++ v2/ansible/plugins/connections/chroot.py | 2 ++ v2/ansible/plugins/connections/funcd.py | 3 +++ v2/ansible/plugins/connections/jail.py | 2 ++ v2/ansible/plugins/connections/libvirt_lxc.py | 2 ++ v2/ansible/plugins/connections/local.py | 2 ++ .../plugins/connections/paramiko_ssh.py | 3 ++- v2/ansible/plugins/connections/ssh.py | 2 ++ v2/ansible/plugins/connections/winrm.py | 4 ++-- v2/ansible/plugins/inventory/directory.py | 2 +- v2/ansible/plugins/lookup/cartesian.py | 2 ++ v2/ansible/plugins/lookup/csvfile.py | 2 ++ v2/ansible/plugins/lookup/dict.py | 2 ++ v2/ansible/plugins/lookup/dnstxt.py | 2 ++ v2/ansible/plugins/lookup/env.py | 2 ++ v2/ansible/plugins/lookup/etcd.py | 2 ++ v2/ansible/plugins/lookup/file.py | 2 ++ v2/ansible/plugins/lookup/fileglob.py | 2 ++ v2/ansible/plugins/lookup/first_found.py | 2 ++ v2/ansible/plugins/lookup/flattened.py | 3 ++- v2/ansible/plugins/lookup/indexed_items.py | 2 ++ .../plugins/lookup/inventory_hostnames.py | 3 +++ v2/ansible/plugins/lookup/items.py | 2 ++ v2/ansible/plugins/lookup/lines.py | 4 +++- v2/ansible/plugins/lookup/nested.py | 2 ++ v2/ansible/plugins/lookup/password.py | 2 ++ v2/ansible/plugins/lookup/pipe.py | 2 ++ v2/ansible/plugins/lookup/random_choice.py | 2 ++ v2/ansible/plugins/lookup/redis_kv.py | 2 ++ v2/ansible/plugins/lookup/sequence.py | 2 ++ v2/ansible/plugins/lookup/subelements.py | 2 ++ v2/ansible/plugins/lookup/template.py | 2 ++ v2/ansible/plugins/lookup/together.py | 2 ++ v2/ansible/plugins/lookup/url.py | 2 ++ v2/ansible/plugins/shell/csh.py | 2 ++ v2/ansible/plugins/shell/fish.py | 2 ++ v2/ansible/plugins/shell/powershell.py | 2 ++ v2/ansible/plugins/shell/sh.py | 2 ++ v2/ansible/template/safe_eval.py | 2 ++ v2/ansible/utils/color.py | 2 ++ v2/ansible/utils/debug.py | 3 +++ v2/ansible/utils/display.py | 2 ++ v2/ansible/utils/encrypt.py | 3 +++ v2/ansible/utils/path.py | 2 ++ 76 files changed, 174 insertions(+), 11 deletions(-) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 8af19ed378fa3d..6f0bf31f3374a8 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -16,7 +16,7 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division) +from __future__ import (absolute_import, division, print_function) __metaclass__ = type import signal diff --git a/v2/ansible/inventory/__init__.py b/v2/ansible/inventory/__init__.py index c8e3cddebaad53..063398f17f9cdf 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v2/ansible/inventory/__init__.py @@ -16,6 +16,9 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import fnmatch import os import sys diff --git a/v2/ansible/inventory/dir.py b/v2/ansible/inventory/dir.py index 52f7af8b53f794..73c882f288fb25 100644 --- a/v2/ansible/inventory/dir.py +++ b/v2/ansible/inventory/dir.py @@ -17,6 +17,8 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os diff --git a/v2/ansible/inventory/expand_hosts.py b/v2/ansible/inventory/expand_hosts.py index f1297409355c22..b5a957c53fe89b 100644 --- a/v2/ansible/inventory/expand_hosts.py +++ b/v2/ansible/inventory/expand_hosts.py @@ -30,6 +30,9 @@ Note that when beg is specified with left zero padding, then the length of end must be the same as that of beg, else an exception is raised. ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import string from ansible import errors diff --git a/v2/ansible/inventory/group.py b/v2/ansible/inventory/group.py index 87d6f64dfc65cd..6525e69b466bd1 100644 --- a/v2/ansible/inventory/group.py +++ b/v2/ansible/inventory/group.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.utils.debug import debug diff --git a/v2/ansible/inventory/ini.py b/v2/ansible/inventory/ini.py index 4236140ac88486..e004ee8bb7584d 100644 --- a/v2/ansible/inventory/ini.py +++ b/v2/ansible/inventory/ini.py @@ -16,6 +16,8 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import ast import shlex diff --git a/v2/ansible/inventory/script.py b/v2/ansible/inventory/script.py index 13b53a24f5e89a..9675d70f690910 100644 --- a/v2/ansible/inventory/script.py +++ b/v2/ansible/inventory/script.py @@ -16,6 +16,8 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import subprocess diff --git a/v2/ansible/inventory/vars_plugins/noop.py b/v2/ansible/inventory/vars_plugins/noop.py index 5d4b4b6658c985..8f0c98cad56d35 100644 --- a/v2/ansible/inventory/vars_plugins/noop.py +++ b/v2/ansible/inventory/vars_plugins/noop.py @@ -15,6 +15,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type class VarsModule(object): diff --git a/v2/ansible/parsing/utils/jsonify.py b/v2/ansible/parsing/utils/jsonify.py index 37c97d0195fc4e..59dbf9f8c4ce65 100644 --- a/v2/ansible/parsing/utils/jsonify.py +++ b/v2/ansible/parsing/utils/jsonify.py @@ -1,4 +1,23 @@ -# FIXME: header +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type try: import json diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index 7242322b88faf2..92f1c64c83e8fa 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -15,6 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 33fd5efd9fa417..c7f89888b87e7a 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -16,7 +16,7 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division) +from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleError, AnsibleParserError diff --git a/v2/ansible/plugins/action/assemble.py b/v2/ansible/plugins/action/assemble.py index 638d4b92bb5568..4e796bddb6f013 100644 --- a/v2/ansible/plugins/action/assemble.py +++ b/v2/ansible/plugins/action/assemble.py @@ -15,6 +15,8 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import os.path diff --git a/v2/ansible/plugins/action/assert.py b/v2/ansible/plugins/action/assert.py index 7204d93875eaac..5c4fdd7b89c222 100644 --- a/v2/ansible/plugins/action/assert.py +++ b/v2/ansible/plugins/action/assert.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.errors import AnsibleError from ansible.playbook.conditional import Conditional diff --git a/v2/ansible/plugins/action/async.py b/v2/ansible/plugins/action/async.py index 6fbf93d61fecd7..7c02e09757eac1 100644 --- a/v2/ansible/plugins/action/async.py +++ b/v2/ansible/plugins/action/async.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import json import random diff --git a/v2/ansible/plugins/action/debug.py b/v2/ansible/plugins/action/debug.py index dcee3e6347d1ec..dc80dfc1795aa1 100644 --- a/v2/ansible/plugins/action/debug.py +++ b/v2/ansible/plugins/action/debug.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean diff --git a/v2/ansible/plugins/action/fail.py b/v2/ansible/plugins/action/fail.py index a95ccb32f74ffc..b7845c95c5cb2a 100644 --- a/v2/ansible/plugins/action/fail.py +++ b/v2/ansible/plugins/action/fail.py @@ -15,6 +15,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.action import ActionBase diff --git a/v2/ansible/plugins/action/fetch.py b/v2/ansible/plugins/action/fetch.py index 7b549f5ecbce48..58e7cebb8d2d6b 100644 --- a/v2/ansible/plugins/action/fetch.py +++ b/v2/ansible/plugins/action/fetch.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import pwd diff --git a/v2/ansible/plugins/action/group_by.py b/v2/ansible/plugins/action/group_by.py index 50e0cc09c43fc2..95db33aa43f901 100644 --- a/v2/ansible/plugins/action/group_by.py +++ b/v2/ansible/plugins/action/group_by.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.errors import * from ansible.plugins.action import ActionBase diff --git a/v2/ansible/plugins/action/include_vars.py b/v2/ansible/plugins/action/include_vars.py index 345e0edc0e9aff..8a7a74d8705029 100644 --- a/v2/ansible/plugins/action/include_vars.py +++ b/v2/ansible/plugins/action/include_vars.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os diff --git a/v2/ansible/plugins/action/normal.py b/v2/ansible/plugins/action/normal.py index 66721b4eb25452..431d9b0eebebc4 100644 --- a/v2/ansible/plugins/action/normal.py +++ b/v2/ansible/plugins/action/normal.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.action import ActionBase diff --git a/v2/ansible/plugins/action/pause.py b/v2/ansible/plugins/action/pause.py index 9c6075e1011fa2..47399fc4939de6 100644 --- a/v2/ansible/plugins/action/pause.py +++ b/v2/ansible/plugins/action/pause.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import datetime import sys diff --git a/v2/ansible/plugins/action/raw.py b/v2/ansible/plugins/action/raw.py index d1d1b280561ac9..f9cd56572b1ba5 100644 --- a/v2/ansible/plugins/action/raw.py +++ b/v2/ansible/plugins/action/raw.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.action import ActionBase diff --git a/v2/ansible/plugins/action/script.py b/v2/ansible/plugins/action/script.py index 21a9f41c59bfd1..3ca7dc6a342795 100644 --- a/v2/ansible/plugins/action/script.py +++ b/v2/ansible/plugins/action/script.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os diff --git a/v2/ansible/plugins/action/set_fact.py b/v2/ansible/plugins/action/set_fact.py index bf89e7ec51707b..a7ddf10b474a44 100644 --- a/v2/ansible/plugins/action/set_fact.py +++ b/v2/ansible/plugins/action/set_fact.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.errors import AnsibleError from ansible.plugins.action import ActionBase diff --git a/v2/ansible/plugins/action/synchronize.py b/v2/ansible/plugins/action/synchronize.py index 81e335b0098414..1bc64ff4d5bcfb 100644 --- a/v2/ansible/plugins/action/synchronize.py +++ b/v2/ansible/plugins/action/synchronize.py @@ -15,6 +15,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os.path diff --git a/v2/ansible/plugins/action/template.py b/v2/ansible/plugins/action/template.py index 76b2e78a737d62..07b406f2beb9e2 100644 --- a/v2/ansible/plugins/action/template.py +++ b/v2/ansible/plugins/action/template.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import base64 import os diff --git a/v2/ansible/plugins/action/unarchive.py b/v2/ansible/plugins/action/unarchive.py index 1b6cb354f0fdf7..b7601ed9107e39 100644 --- a/v2/ansible/plugins/action/unarchive.py +++ b/v2/ansible/plugins/action/unarchive.py @@ -15,6 +15,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import pipes diff --git a/v2/ansible/plugins/cache/__init__.py b/v2/ansible/plugins/cache/__init__.py index deed7f3ecde83c..4aa8fda8bbbfff 100644 --- a/v2/ansible/plugins/cache/__init__.py +++ b/v2/ansible/plugins/cache/__init__.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from collections import MutableMapping diff --git a/v2/ansible/plugins/cache/base.py b/v2/ansible/plugins/cache/base.py index b6254cdfd48298..6ff3d5ed1e2b2c 100644 --- a/v2/ansible/plugins/cache/base.py +++ b/v2/ansible/plugins/cache/base.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import exceptions diff --git a/v2/ansible/plugins/cache/memcached.py b/v2/ansible/plugins/cache/memcached.py index deaf07fe2e2695..135e34c2b43f23 100644 --- a/v2/ansible/plugins/cache/memcached.py +++ b/v2/ansible/plugins/cache/memcached.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import collections import os diff --git a/v2/ansible/plugins/cache/memory.py b/v2/ansible/plugins/cache/memory.py index 007719a6477673..15628361513121 100644 --- a/v2/ansible/plugins/cache/memory.py +++ b/v2/ansible/plugins/cache/memory.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.cache.base import BaseCacheModule diff --git a/v2/ansible/plugins/cache/redis.py b/v2/ansible/plugins/cache/redis.py index 7f126de64bb73f..291ce81c474371 100644 --- a/v2/ansible/plugins/cache/redis.py +++ b/v2/ansible/plugins/cache/redis.py @@ -14,9 +14,9 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type -from __future__ import absolute_import -import collections # FIXME: can we store these as something else before we ship it? import sys import time diff --git a/v2/ansible/plugins/connections/accelerate.py b/v2/ansible/plugins/connections/accelerate.py index a31124e119f655..925136ecce2c6f 100644 --- a/v2/ansible/plugins/connections/accelerate.py +++ b/v2/ansible/plugins/connections/accelerate.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import json import os diff --git a/v2/ansible/plugins/connections/chroot.py b/v2/ansible/plugins/connections/chroot.py index 38c8af7a69096c..4e61f4ea559e01 100644 --- a/v2/ansible/plugins/connections/chroot.py +++ b/v2/ansible/plugins/connections/chroot.py @@ -15,6 +15,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import distutils.spawn import traceback diff --git a/v2/ansible/plugins/connections/funcd.py b/v2/ansible/plugins/connections/funcd.py index 7244abcbe9a65b..83a0c9b01d302f 100644 --- a/v2/ansible/plugins/connections/funcd.py +++ b/v2/ansible/plugins/connections/funcd.py @@ -18,6 +18,9 @@ # along with Ansible. If not, see . # --- +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + # The func transport permit to use ansible over func. For people who have already setup # func and that wish to play with ansible, this permit to move gradually to ansible # without having to redo completely the setup of the network. diff --git a/v2/ansible/plugins/connections/jail.py b/v2/ansible/plugins/connections/jail.py index b721ad62b50ab1..a81f587bfd06de 100644 --- a/v2/ansible/plugins/connections/jail.py +++ b/v2/ansible/plugins/connections/jail.py @@ -16,6 +16,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import distutils.spawn import traceback diff --git a/v2/ansible/plugins/connections/libvirt_lxc.py b/v2/ansible/plugins/connections/libvirt_lxc.py index c6cf11f2667fb2..ee824554a0212d 100644 --- a/v2/ansible/plugins/connections/libvirt_lxc.py +++ b/v2/ansible/plugins/connections/libvirt_lxc.py @@ -16,6 +16,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import distutils.spawn import os diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py index 31d0b296e4aee1..73583974bf025c 100644 --- a/v2/ansible/plugins/connections/local.py +++ b/v2/ansible/plugins/connections/local.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import traceback import os diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/v2/ansible/plugins/connections/paramiko_ssh.py index 4bb06e01c36147..4562eaa86e3390 100644 --- a/v2/ansible/plugins/connections/paramiko_ssh.py +++ b/v2/ansible/plugins/connections/paramiko_ssh.py @@ -14,7 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type # --- # The paramiko transport is provided because many distributions, in particular EL6 and before diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index e59311ead96df6..2c8f8de8135abb 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -15,6 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . # +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import re diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py index d6e51710b5f27a..bb704d405c7d1c 100644 --- a/v2/ansible/plugins/connections/winrm.py +++ b/v2/ansible/plugins/connections/winrm.py @@ -14,8 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - -from __future__ import absolute_import +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import base64 import hashlib diff --git a/v2/ansible/plugins/inventory/directory.py b/v2/ansible/plugins/inventory/directory.py index d340ed753870c9..a75ad44ea6cfb7 100644 --- a/v2/ansible/plugins/inventory/directory.py +++ b/v2/ansible/plugins/inventory/directory.py @@ -18,7 +18,7 @@ ############################################# # Make coding more python3-ish -from __future__ import (division, print_function) +from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os diff --git a/v2/ansible/plugins/lookup/cartesian.py b/v2/ansible/plugins/lookup/cartesian.py index cc74240826a74b..c50d53e7f80aa2 100644 --- a/v2/ansible/plugins/lookup/cartesian.py +++ b/v2/ansible/plugins/lookup/cartesian.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from itertools import product diff --git a/v2/ansible/plugins/lookup/csvfile.py b/v2/ansible/plugins/lookup/csvfile.py index 87757399ce5b79..b67b6bcd1c45b3 100644 --- a/v2/ansible/plugins/lookup/csvfile.py +++ b/v2/ansible/plugins/lookup/csvfile.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import codecs diff --git a/v2/ansible/plugins/lookup/dict.py b/v2/ansible/plugins/lookup/dict.py index 61389df7c2e26d..cc7975ae499993 100644 --- a/v2/ansible/plugins/lookup/dict.py +++ b/v2/ansible/plugins/lookup/dict.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/dnstxt.py b/v2/ansible/plugins/lookup/dnstxt.py index 7100f8d96dfc39..07451079fece31 100644 --- a/v2/ansible/plugins/lookup/dnstxt.py +++ b/v2/ansible/plugins/lookup/dnstxt.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os diff --git a/v2/ansible/plugins/lookup/env.py b/v2/ansible/plugins/lookup/env.py index 896f95e13a96d2..55847dd7779fb4 100644 --- a/v2/ansible/plugins/lookup/env.py +++ b/v2/ansible/plugins/lookup/env.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os diff --git a/v2/ansible/plugins/lookup/etcd.py b/v2/ansible/plugins/lookup/etcd.py index 5b54788985b07a..002068389f8357 100644 --- a/v2/ansible/plugins/lookup/etcd.py +++ b/v2/ansible/plugins/lookup/etcd.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import urllib2 diff --git a/v2/ansible/plugins/lookup/file.py b/v2/ansible/plugins/lookup/file.py index add4da7f47b5d7..efb039497dd89b 100644 --- a/v2/ansible/plugins/lookup/file.py +++ b/v2/ansible/plugins/lookup/file.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import codecs diff --git a/v2/ansible/plugins/lookup/fileglob.py b/v2/ansible/plugins/lookup/fileglob.py index bde016af9e40f2..898590671503b1 100644 --- a/v2/ansible/plugins/lookup/fileglob.py +++ b/v2/ansible/plugins/lookup/fileglob.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import glob diff --git a/v2/ansible/plugins/lookup/first_found.py b/v2/ansible/plugins/lookup/first_found.py index 0ed268801508e2..e2ae2eb214c502 100644 --- a/v2/ansible/plugins/lookup/first_found.py +++ b/v2/ansible/plugins/lookup/first_found.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type # take a list of files and (optionally) a list of paths # return the first existing file found in the paths diff --git a/v2/ansible/plugins/lookup/flattened.py b/v2/ansible/plugins/lookup/flattened.py index 24f1a9ac950e4b..f0a8adaf5e65ac 100644 --- a/v2/ansible/plugins/lookup/flattened.py +++ b/v2/ansible/plugins/lookup/flattened.py @@ -14,7 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.errors import * from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/indexed_items.py b/v2/ansible/plugins/lookup/indexed_items.py index 1731dc0e847b10..4f1dd199471e50 100644 --- a/v2/ansible/plugins/lookup/indexed_items.py +++ b/v2/ansible/plugins/lookup/indexed_items.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/inventory_hostnames.py b/v2/ansible/plugins/lookup/inventory_hostnames.py index faffe47eb85c8f..d09dec0c7b5ae3 100644 --- a/v2/ansible/plugins/lookup/inventory_hostnames.py +++ b/v2/ansible/plugins/lookup/inventory_hostnames.py @@ -16,6 +16,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + from ansible.errors import * from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/items.py b/v2/ansible/plugins/lookup/items.py index 46925d2a8ba71b..65ff66d854a3a7 100644 --- a/v2/ansible/plugins/lookup/items.py +++ b/v2/ansible/plugins/lookup/items.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/lines.py b/v2/ansible/plugins/lookup/lines.py index 507793b18e9711..0d842bf148ff0d 100644 --- a/v2/ansible/plugins/lookup/lines.py +++ b/v2/ansible/plugins/lookup/lines.py @@ -15,8 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import subprocess +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type +import subprocess from ansible.errors import * from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/nested.py b/v2/ansible/plugins/lookup/nested.py index 0f2d146b4780b7..52f4bed1d52410 100644 --- a/v2/ansible/plugins/lookup/nested.py +++ b/v2/ansible/plugins/lookup/nested.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/password.py b/v2/ansible/plugins/lookup/password.py index 6e13410e1ab67e..d262ed79c44458 100644 --- a/v2/ansible/plugins/lookup/password.py +++ b/v2/ansible/plugins/lookup/password.py @@ -16,6 +16,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import errno diff --git a/v2/ansible/plugins/lookup/pipe.py b/v2/ansible/plugins/lookup/pipe.py index 0a7e5cb31ae97e..d9f74708b28510 100644 --- a/v2/ansible/plugins/lookup/pipe.py +++ b/v2/ansible/plugins/lookup/pipe.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import subprocess diff --git a/v2/ansible/plugins/lookup/random_choice.py b/v2/ansible/plugins/lookup/random_choice.py index e899a2dbe3c56c..de4f31cd0eb134 100644 --- a/v2/ansible/plugins/lookup/random_choice.py +++ b/v2/ansible/plugins/lookup/random_choice.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import random diff --git a/v2/ansible/plugins/lookup/redis_kv.py b/v2/ansible/plugins/lookup/redis_kv.py index 08895d4c4ec254..e499e83f9383d3 100644 --- a/v2/ansible/plugins/lookup/redis_kv.py +++ b/v2/ansible/plugins/lookup/redis_kv.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import re diff --git a/v2/ansible/plugins/lookup/sequence.py b/v2/ansible/plugins/lookup/sequence.py index 99783cf566b778..1ddeba932f8523 100644 --- a/v2/ansible/plugins/lookup/sequence.py +++ b/v2/ansible/plugins/lookup/sequence.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from re import compile as re_compile, IGNORECASE diff --git a/v2/ansible/plugins/lookup/subelements.py b/v2/ansible/plugins/lookup/subelements.py index 93e9e570c41ed5..09a2ca306a11ee 100644 --- a/v2/ansible/plugins/lookup/subelements.py +++ b/v2/ansible/plugins/lookup/subelements.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.errors import * from ansible.plugins.lookup import LookupBase diff --git a/v2/ansible/plugins/lookup/template.py b/v2/ansible/plugins/lookup/template.py index 74406f64458c1a..e53e1990a0dfcb 100644 --- a/v2/ansible/plugins/lookup/template.py +++ b/v2/ansible/plugins/lookup/template.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os diff --git a/v2/ansible/plugins/lookup/together.py b/v2/ansible/plugins/lookup/together.py index 8b5ff5c89193af..2f53121cc8bb33 100644 --- a/v2/ansible/plugins/lookup/together.py +++ b/v2/ansible/plugins/lookup/together.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from itertools import izip_longest diff --git a/v2/ansible/plugins/lookup/url.py b/v2/ansible/plugins/lookup/url.py index c907bfbce3965a..59a26ae5413f01 100644 --- a/v2/ansible/plugins/lookup/url.py +++ b/v2/ansible/plugins/lookup/url.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.plugins.lookup import LookupBase import urllib2 diff --git a/v2/ansible/plugins/shell/csh.py b/v2/ansible/plugins/shell/csh.py index 4e9f8c8af742f0..96ec84c5bf833b 100644 --- a/v2/ansible/plugins/shell/csh.py +++ b/v2/ansible/plugins/shell/csh.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.runner.shell_plugins.sh import ShellModule as ShModule diff --git a/v2/ansible/plugins/shell/fish.py b/v2/ansible/plugins/shell/fish.py index 137c013c12fa3e..53fa9abada6297 100644 --- a/v2/ansible/plugins/shell/fish.py +++ b/v2/ansible/plugins/shell/fish.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type from ansible.runner.shell_plugins.sh import ShellModule as ShModule diff --git a/v2/ansible/plugins/shell/powershell.py b/v2/ansible/plugins/shell/powershell.py index 7254df6f7ea63c..9f3825c3b0f9a1 100644 --- a/v2/ansible/plugins/shell/powershell.py +++ b/v2/ansible/plugins/shell/powershell.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import base64 import os diff --git a/v2/ansible/plugins/shell/sh.py b/v2/ansible/plugins/shell/sh.py index 5fb0dc3add3f76..497d45eace2ea4 100644 --- a/v2/ansible/plugins/shell/sh.py +++ b/v2/ansible/plugins/shell/sh.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import re diff --git a/v2/ansible/template/safe_eval.py b/v2/ansible/template/safe_eval.py index ba377054d7ad2e..8dafa433878483 100644 --- a/v2/ansible/template/safe_eval.py +++ b/v2/ansible/template/safe_eval.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import ast import sys diff --git a/v2/ansible/utils/color.py b/v2/ansible/utils/color.py index a87717073ebf67..37d0466d2d199a 100644 --- a/v2/ansible/utils/color.py +++ b/v2/ansible/utils/color.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import sys diff --git a/v2/ansible/utils/debug.py b/v2/ansible/utils/debug.py index 3b37ac50a78003..5b04ac05726d4e 100644 --- a/v2/ansible/utils/debug.py +++ b/v2/ansible/utils/debug.py @@ -1,3 +1,6 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os import time import sys diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index 62dbeabca51f69..e30ae225cfa144 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -16,6 +16,8 @@ # along with Ansible. If not, see . # FIXME: copied mostly from old code, needs py3 improvements +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import textwrap import sys diff --git a/v2/ansible/utils/encrypt.py b/v2/ansible/utils/encrypt.py index 878b461c86d9d5..5138dbef70570f 100644 --- a/v2/ansible/utils/encrypt.py +++ b/v2/ansible/utils/encrypt.py @@ -14,6 +14,9 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + PASSLIB_AVAILABLE = False try: diff --git a/v2/ansible/utils/path.py b/v2/ansible/utils/path.py index ea7fc201a891e4..e49a2f7d5533d1 100644 --- a/v2/ansible/utils/path.py +++ b/v2/ansible/utils/path.py @@ -14,6 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import stat From 683c2913c528aa13598cebb7526934cfbd318c2c Mon Sep 17 00:00:00 2001 From: Amandine Lee Date: Mon, 13 Apr 2015 13:28:16 -0700 Subject: [PATCH 0301/3617] Use print function --- v2/ansible/plugins/action/pause.py | 4 ++-- v2/ansible/plugins/cache/memcached.py | 2 +- v2/ansible/plugins/cache/redis.py | 2 +- v2/ansible/utils/display.py | 8 ++++---- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/v2/ansible/plugins/action/pause.py b/v2/ansible/plugins/action/pause.py index 47399fc4939de6..fb35e9601ff4b3 100644 --- a/v2/ansible/plugins/action/pause.py +++ b/v2/ansible/plugins/action/pause.py @@ -101,7 +101,7 @@ def run(self, tmp=None, task_vars=dict()): try: if not pause_type == 'prompt': - print "(^C-c = continue early, ^C-a = abort)" + print("(^C-c = continue early, ^C-a = abort)") #print("[%s]\nPausing for %s seconds" % (hosts, seconds)) print("[%s]\nPausing for %s seconds" % (self._task.get_name().strip(), seconds)) time.sleep(seconds) @@ -112,7 +112,7 @@ def run(self, tmp=None, task_vars=dict()): result['user_input'] = raw_input(prompt.encode(sys.stdout.encoding)) except KeyboardInterrupt: while True: - print '\nAction? (a)bort/(c)ontinue: ' + print('\nAction? (a)bort/(c)ontinue: ') c = getch() if c == 'c': # continue playbook evaluation diff --git a/v2/ansible/plugins/cache/memcached.py b/v2/ansible/plugins/cache/memcached.py index 135e34c2b43f23..e7321a5a6b5c87 100644 --- a/v2/ansible/plugins/cache/memcached.py +++ b/v2/ansible/plugins/cache/memcached.py @@ -30,7 +30,7 @@ try: import memcache except ImportError: - print 'python-memcached is required for the memcached fact cache' + print('python-memcached is required for the memcached fact cache') sys.exit(1) diff --git a/v2/ansible/plugins/cache/redis.py b/v2/ansible/plugins/cache/redis.py index 291ce81c474371..287c14bd2a2bf4 100644 --- a/v2/ansible/plugins/cache/redis.py +++ b/v2/ansible/plugins/cache/redis.py @@ -28,7 +28,7 @@ try: from redis import StrictRedis except ImportError: - print "The 'redis' python module is required, 'pip install redis'" + print("The 'redis' python module is required, 'pip install redis'") sys.exit(1) class CacheModule(BaseCacheModule): diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index e30ae225cfa144..f132d4383f9b48 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -43,14 +43,14 @@ def display(self, msg, color=None, stderr=False, screen_only=False, log_only=Fal if not log_only: if not stderr: try: - print msg2 + print(msg2) except UnicodeEncodeError: - print msg2.encode('utf-8') + print(msg2.encode('utf-8')) else: try: - print >>sys.stderr, msg2 + print(msg2, file=sys.stderr) except UnicodeEncodeError: - print >>sys.stderr, msg2.encode('utf-8') + print(msg2.encode('utf-8'), file=sys.stderr) if C.DEFAULT_LOG_PATH != '': while msg.startswith("\n"): msg = msg.replace("\n","") From d85f97ccfd4e3c61f479c7055088bb8d4d74a51d Mon Sep 17 00:00:00 2001 From: Ian Dotson Date: Mon, 13 Apr 2015 16:31:16 -0400 Subject: [PATCH 0302/3617] Change how we're calling StringIO since we're now importing the class rather than a module. --- v2/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index be83539def6ddb..30d1641090e964 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -319,7 +319,7 @@ def _filter_leading_non_json_lines(self, data): filter only leading lines since multiline JSON is valid. ''' - filtered_lines = StringIO.StringIO() + filtered_lines = StringIO() stop_filtering = False for line in data.splitlines(): if stop_filtering or line.startswith('{') or line.startswith('['): From 4b889bbe3d93dfad36bcd8a648d21b1d3414ec20 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 13 Apr 2015 13:43:25 -0700 Subject: [PATCH 0303/3617] Add six to the v2 test-requirements --- v2/test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/v2/test-requirements.txt b/v2/test-requirements.txt index 97a75d3cb5c767..ca5bcae0d98083 100644 --- a/v2/test-requirements.txt +++ b/v2/test-requirements.txt @@ -4,6 +4,7 @@ PyYAML jinja2 httplib2 passlib +six # Test requirements unittest2 From 6e12117b04937b76bbc3ee96f7d3eb66247b645f Mon Sep 17 00:00:00 2001 From: Ian Dotson Date: Mon, 13 Apr 2015 17:01:00 -0400 Subject: [PATCH 0304/3617] Import queue from six.moves for python3 compatibility. --- v2/ansible/executor/process/result.py | 6 +++--- v2/ansible/executor/process/worker.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/v2/ansible/executor/process/result.py b/v2/ansible/executor/process/result.py index 761db21fe69a64..f0416db852d3b6 100644 --- a/v2/ansible/executor/process/result.py +++ b/v2/ansible/executor/process/result.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import Queue +from six.moves import queue import multiprocessing import os import signal @@ -77,7 +77,7 @@ def _read_worker_result(self): result = rslt_q.get(block=False) debug("got a result from worker %d: %s" % (self._cur_worker, result)) break - except Queue.Empty: + except queue.Empty: pass if self._cur_worker == starting_point: @@ -164,7 +164,7 @@ def run(self): if result._task.register: self._send_result(('set_host_var', result._host, result._task.register, result._result)) - except Queue.Empty: + except queue.Empty: pass except (KeyboardInterrupt, IOError, EOFError): break diff --git a/v2/ansible/executor/process/worker.py b/v2/ansible/executor/process/worker.py index bf5ee8c93f0480..8e624fe401e6ca 100644 --- a/v2/ansible/executor/process/worker.py +++ b/v2/ansible/executor/process/worker.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import Queue +from six.moves import queue import multiprocessing import os import signal @@ -130,7 +130,7 @@ def run(self): else: time.sleep(0.1) - except Queue.Empty: + except queue.Empty: pass except (IOError, EOFError, KeyboardInterrupt): break From 08feaea077e2aebe3cef0d9d2cf4e2e28f6068f2 Mon Sep 17 00:00:00 2001 From: Amandine Lee Date: Mon, 13 Apr 2015 14:03:10 -0700 Subject: [PATCH 0305/3617] Fix plugin imports with six --- v2/ansible/plugins/connections/winrm.py | 3 ++- v2/ansible/plugins/strategies/__init__.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py index f94141b81b6ef5..f3d6a03ba07eac 100644 --- a/v2/ansible/plugins/connections/winrm.py +++ b/v2/ansible/plugins/connections/winrm.py @@ -24,7 +24,8 @@ import re import shlex import traceback -import urlparse + +from six.moves.urllib import parse as urlparse from ansible import errors from ansible import utils from ansible.callbacks import vvv, vvvv, verbose diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index c5b3dd0f066731..3f160d84dbc1f4 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import Queue +import six.moves import queue as Queue import time from ansible.errors import * From 164cfdfda4780973272b9cc63dcf376a36317b0e Mon Sep 17 00:00:00 2001 From: Amandine Lee Date: Mon, 13 Apr 2015 14:28:00 -0700 Subject: [PATCH 0306/3617] Fix typo --- v2/ansible/plugins/strategies/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index 3f160d84dbc1f4..9b26ff23a7f0e6 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import six.moves import queue as Queue +from six.moves import queue as Queue import time from ansible.errors import * From d71834d1d2ae92edc4f9975ddcc8d1e72127d737 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Apr 2015 10:56:10 -0500 Subject: [PATCH 0307/3617] Moving setting of options values to after play again Moving this above the play setting means that any default values set in play (like connection) override any corresponding CLI option, which is wrong. Generally CLI options should override things set in playbooks --- v2/ansible/executor/connection_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 19c8b130c72f66..ace2252e3ad9aa 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -64,14 +64,14 @@ def __init__(self, play=None, options=None, passwords=None): self.no_log = False self.check_mode = False + if play: + self.set_play(play) + #TODO: just pull options setup to above? # set options before play to allow play to override them if options: self.set_options(options) - if play: - self.set_play(play) - def __repr__(self): value = "CONNECTION INFO:\n" From d5a7cd0efceb51f509a4d2619e4d54f4726233ad Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Apr 2015 12:44:04 -0400 Subject: [PATCH 0308/3617] bad hack to maybe fix some corner cases with pbrun custom prompts --- lib/ansible/utils/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 07e8174893fc39..7ed07a54c840d3 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1250,6 +1250,7 @@ def make_become_cmd(cmd, user, shell, method, flags=None, exe=None): becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) elif method == 'pbrun': + prompt = 'assword:' exe = exe or 'pbrun' flags = flags or '' becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd))) From 0345b675f87bcc19ef31d6423d7a8915c5ddd6bc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Apr 2015 12:03:50 -0500 Subject: [PATCH 0309/3617] Rather than moving connection option setting, fix defaults This reverts the previous commit (d71834d) and instead fixes the problem by making sure that options used by ConnectionInformation do not have defaults set in the playbook objects so they're properly inherited from the CLI options object if not otherwise specified in the play --- v2/ansible/executor/connection_info.py | 6 +++--- v2/ansible/playbook/play.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index ace2252e3ad9aa..19c8b130c72f66 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -64,14 +64,14 @@ def __init__(self, play=None, options=None, passwords=None): self.no_log = False self.check_mode = False - if play: - self.set_play(play) - #TODO: just pull options setup to above? # set options before play to allow play to override them if options: self.set_options(options) + if play: + self.set_play(play) + def __repr__(self): value = "CONNECTION INFO:\n" diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index c7f89888b87e7a..fef40568abfbe7 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -56,11 +56,11 @@ class Play(Base, Taggable, Become): _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port # Connection - _connection = FieldAttribute(isa='string', default='smart') + _connection = FieldAttribute(isa='string') _gather_facts = FieldAttribute(isa='string', default='smart') _hosts = FieldAttribute(isa='list', default=[], required=True) _name = FieldAttribute(isa='string', default='') - _port = FieldAttribute(isa='int', default=22) + _port = FieldAttribute(isa='int') _remote_user = FieldAttribute(isa='string') # Variable Attributes From 09efba2a7cf937b6b738824d71c3b297dce13a2d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Apr 2015 14:41:31 -0400 Subject: [PATCH 0310/3617] fixed indent when looking at delegate_to vars --- lib/ansible/runner/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py index 70619ecc0540e4..c153cd78ba631d 100644 --- a/lib/ansible/runner/__init__.py +++ b/lib/ansible/runner/__init__.py @@ -394,11 +394,11 @@ def _compute_delegate_user(self, host, inject): if inject['hostvars'][host].get('ansible_ssh_user'): # user for delegate host in inventory thisuser = inject['hostvars'][host].get('ansible_ssh_user') - else: - # look up the variables for the host directly from inventory - host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) - if 'ansible_ssh_user' in host_vars: - thisuser = host_vars['ansible_ssh_user'] + else: + # look up the variables for the host directly from inventory + host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass) + if 'ansible_ssh_user' in host_vars: + thisuser = host_vars['ansible_ssh_user'] except errors.AnsibleError, e: # the hostname was not found in the inventory, so # we just ignore this and try the next method From 8592ffb5738e39d86ff51182c9e6072e22437bb8 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 15:23:59 -0400 Subject: [PATCH 0311/3617] Add back AnsibleFileNotFound ( used in connection plugins ) --- v2/ansible/errors/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index bdd6e524489c72..04beb2b3caf005 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -167,3 +167,7 @@ class AnsibleFilterError(AnsibleRuntimeError): class AnsibleUndefinedVariable(AnsibleRuntimeError): ''' a templating failure ''' pass + +class AnsibleFileNotFound(AnsibleRuntimeError): + ''' a file missing failure ''' + pass From 46beaf8a47c928f0e7de26a96de25e9d65e0a385 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Apr 2015 14:38:47 -0500 Subject: [PATCH 0312/3617] Submodule update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5f58240d176a74..74e69d1fd16957 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5f58240d176a74b8eb0da0b45cf60e498d11ab34 +Subproject commit 74e69d1fd16957ff84408eac0d28a0c8ef78225c From 6957d66a2630ea6eb624372234c93e44b1977d98 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 15:43:02 -0400 Subject: [PATCH 0313/3617] Do not import all ansible errors and fix the exception raised --- v2/ansible/inventory/dir.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/v2/ansible/inventory/dir.py b/v2/ansible/inventory/dir.py index 73c882f288fb25..735f32d62c35a6 100644 --- a/v2/ansible/inventory/dir.py +++ b/v2/ansible/inventory/dir.py @@ -23,7 +23,7 @@ import os from ansible import constants as C -from ansible.errors import * +from ansible.errors import AnsibleError from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -160,7 +160,7 @@ def _merge_groups(self, group, newgroup): # name if group.name != newgroup.name: - raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) + raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) # depth group.depth = max([group.depth, newgroup.depth]) @@ -210,7 +210,7 @@ def _merge_hosts(self,host, newhost): # name if host.name != newhost.name: - raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) + raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) # group membership relation for newgroup in newhost.groups: From 28f51233c822b794c75af109124e736f6f344775 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 15:50:31 -0400 Subject: [PATCH 0314/3617] Add another error, who seems to be all over the place --- v2/ansible/errors/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index 04beb2b3caf005..453e63de6e3c3b 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -171,3 +171,7 @@ class AnsibleUndefinedVariable(AnsibleRuntimeError): class AnsibleFileNotFound(AnsibleRuntimeError): ''' a file missing failure ''' pass + +class AnsibleParserError(AnsibleRuntimeError): + ''' a parser error ''' + pass From f641b91594cb40cf34629793935453d5e484c3d1 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 16:03:54 -0400 Subject: [PATCH 0315/3617] Pylint show a error "no deprecations variable" --- v2/ansible/utils/display.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index f132d4383f9b48..ed43da8623bdd1 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -100,7 +100,7 @@ def deprecated(self, msg, version, removed=False): wrapped = textwrap.wrap(new_msg, 79) new_msg = "\n".join(wrapped) + "\n" - if new_msg not in deprecations: + if new_msg not in self._deprecations: self.display(new_msg, color='purple', stderr=True) self._deprecations[new_msg] = 1 From de57459dd40e6945ae0adf201bf16d7082f1ae0f Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 16:10:19 -0400 Subject: [PATCH 0316/3617] Pylint warning, the method _warning is called warning --- v2/ansible/utils/display.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index f132d4383f9b48..3c96ffec67a5f1 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -114,7 +114,7 @@ def warning(self, msg): def system_warning(self, msg): if C.SYSTEM_WARNINGS: - self._warning(msg) + self.warning(msg) def banner(self, msg, color=None): ''' From 65f6f76323e7a5fcb1461de6173f94fce6b41f89 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 16:13:33 -0400 Subject: [PATCH 0317/3617] Missing import for ansible.errors (pylint) --- v2/ansible/utils/hashing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/v2/ansible/utils/hashing.py b/v2/ansible/utils/hashing.py index 2c7dd534fcb28c..5e378db79f49c5 100644 --- a/v2/ansible/utils/hashing.py +++ b/v2/ansible/utils/hashing.py @@ -20,6 +20,7 @@ __metaclass__ = type import os +from ansible.errors import AnsibleError # Note, sha1 is the only hash algorithm compatible with python2.4 and with # FIPS-140 mode (as of 11-2014) @@ -65,7 +66,7 @@ def secure_hash(filename, hash_func=sha1): block = infile.read(blocksize) infile.close() except IOError as e: - raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) + raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) return digest.hexdigest() # The checksum algorithm must match with the algorithm in ShellModule.checksum() method From 131ce117a979f6d592bd9e6c7a70016208040698 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Tue, 14 Apr 2015 15:07:31 -0400 Subject: [PATCH 0318/3617] Updated the tox.ini file to run multiple ansible versions. Purpose: so that devs can use tox to run v1 or v2 of ansible with various versions of python. For example `tox -e py27-v2 will run python2.7 on v2. Currently, only py26 and py27 are run on v1 when running just `tox` so that we aren't breaking builds. --- tox.ini | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 7c86e7e08f1ff4..5691980b607a2a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,21 @@ [tox] -envlist = py26,py27 +envlist = {py26,py27}-v{1} [testenv] deps = -r{toxinidir}/test-requirements.txt whitelist_externals = make + +[testenv:py26-v1] +commands = make tests + +[testenv:py27-v1] commands = make tests + +[testenv:py26-v2] +commands = make newtests + +[testenv:py27-v2] +commands = make newtests + +[testenv:py34-v2] +commands = make newtests From 1566a90fcd56bdc61cad83939e526f012d7ecda3 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 16:36:39 -0400 Subject: [PATCH 0319/3617] Fix the exception name ( AnsibleParserError, not AnsibleParsingError ) --- v2/ansible/playbook/play.py | 2 +- v2/ansible/plugins/strategies/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index fef40568abfbe7..e96e7826776724 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -144,7 +144,7 @@ def _load_vars(self, attr, ds): else: raise ValueError except ValueError: - raise AnsibleParsingError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds) + raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds) def _load_tasks(self, attr, ds): ''' diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index 9b26ff23a7f0e6..d01360463b6f99 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -303,7 +303,7 @@ def _load_included_file(self, included_file): data = self._loader.load_from_file(included_file._filename) if not isinstance(data, list): - raise AnsibleParsingError("included task files must contain a list of tasks", obj=included_file._task._ds) + raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds) is_handler = isinstance(included_file._task, Handler) block_list = load_list_of_blocks( From 996bd058235e115ba3f5e05e9cf3cf0766390c58 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 14 Apr 2015 12:07:30 -0700 Subject: [PATCH 0320/3617] Revert "Rather than moving connection option setting, fix defaults" This reverts commit 0345b675f87bcc19ef31d6423d7a8915c5ddd6bc. --- v2/ansible/executor/connection_info.py | 6 +++--- v2/ansible/playbook/play.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 19c8b130c72f66..ace2252e3ad9aa 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -64,14 +64,14 @@ def __init__(self, play=None, options=None, passwords=None): self.no_log = False self.check_mode = False + if play: + self.set_play(play) + #TODO: just pull options setup to above? # set options before play to allow play to override them if options: self.set_options(options) - if play: - self.set_play(play) - def __repr__(self): value = "CONNECTION INFO:\n" diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index fef40568abfbe7..c7f89888b87e7a 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -56,11 +56,11 @@ class Play(Base, Taggable, Become): _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port # Connection - _connection = FieldAttribute(isa='string') + _connection = FieldAttribute(isa='string', default='smart') _gather_facts = FieldAttribute(isa='string', default='smart') _hosts = FieldAttribute(isa='list', default=[], required=True) _name = FieldAttribute(isa='string', default='') - _port = FieldAttribute(isa='int') + _port = FieldAttribute(isa='int', default=22) _remote_user = FieldAttribute(isa='string') # Variable Attributes From 37b4b68e8377bd0daf76667890c05ab461790b77 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Apr 2015 13:40:21 -0700 Subject: [PATCH 0321/3617] Use six.moves to find configparser instead of our compat code --- v2/ansible/compat/configparser.py | 30 ------------------------------ v2/ansible/constants.py | 2 +- 2 files changed, 1 insertion(+), 31 deletions(-) delete mode 100644 v2/ansible/compat/configparser.py diff --git a/v2/ansible/compat/configparser.py b/v2/ansible/compat/configparser.py deleted file mode 100644 index 7cce642376357c..00000000000000 --- a/v2/ansible/compat/configparser.py +++ /dev/null @@ -1,30 +0,0 @@ -# (c) 2014, Toshio Kuratomi -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat module for Python3.x's configparser -''' - -# Python 2.7 -try: - from configparser import * -except ImportError: - from ConfigParser import * diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 72b571ebb8034e..5932db0b2ce75a 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -23,7 +23,7 @@ import pwd import sys -from . compat import configparser +from six.moves import configparser from string import ascii_letters, digits # copied from utils, avoid circular reference fun :) From 0c74b356d2f65d4e68d81f51a409bb6f31721efa Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 16:19:59 -0400 Subject: [PATCH 0322/3617] Add a import for 'builtins' module, used in CleansingNodeVisitor. This was previously done by ./lib/ansible/utils/__init__.py, but this code is no longer here in v2 anymore. And since the module got renamed in python3 to builtins ( https://docs.python.org/3/library/builtins.html ), we have to use six. --- v2/ansible/template/safe_eval.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/v2/ansible/template/safe_eval.py b/v2/ansible/template/safe_eval.py index 81db8b2333cc3d..268994950443d4 100644 --- a/v2/ansible/template/safe_eval.py +++ b/v2/ansible/template/safe_eval.py @@ -20,6 +20,8 @@ import ast import sys +from six.moves import builtins + from ansible import constants as C from ansible.plugins import filter_loader @@ -84,7 +86,7 @@ def generic_visit(self, node, inside_call=False): elif isinstance(node, ast.Call): inside_call = True elif isinstance(node, ast.Name) and inside_call: - if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST: + if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST: raise Exception("invalid function: %s" % node.id) # iterate over all child nodes for child_node in ast.iter_child_nodes(node): From c0c115317ac3483424a53c3fd41af7926e00aa34 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 16:56:35 -0400 Subject: [PATCH 0323/3617] Add missing imports. They are used later in mkdtmp, needed by action plugins --- v2/ansible/plugins/shell/sh.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v2/ansible/plugins/shell/sh.py b/v2/ansible/plugins/shell/sh.py index 497d45eace2ea4..628df9bbfbf738 100644 --- a/v2/ansible/plugins/shell/sh.py +++ b/v2/ansible/plugins/shell/sh.py @@ -21,6 +21,8 @@ import re import pipes import ansible.constants as C +import time +import random _USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$') From 674d1e72f6624f876f4ae9ee479dce780a2851e7 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 17:05:02 -0400 Subject: [PATCH 0324/3617] Rename the import, since the directory was renamed for v2 --- v2/ansible/plugins/shell/csh.py | 2 +- v2/ansible/plugins/shell/fish.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/plugins/shell/csh.py b/v2/ansible/plugins/shell/csh.py index 96ec84c5bf833b..29751f73ee7537 100644 --- a/v2/ansible/plugins/shell/csh.py +++ b/v2/ansible/plugins/shell/csh.py @@ -17,7 +17,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.runner.shell_plugins.sh import ShellModule as ShModule +from ansible.plugins.shell.sh import ShellModule as ShModule class ShellModule(ShModule): diff --git a/v2/ansible/plugins/shell/fish.py b/v2/ansible/plugins/shell/fish.py index 53fa9abada6297..ff78941e19c028 100644 --- a/v2/ansible/plugins/shell/fish.py +++ b/v2/ansible/plugins/shell/fish.py @@ -17,7 +17,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.runner.shell_plugins.sh import ShellModule as ShModule +from ansible.plugins.shell.sh import ShellModule as ShModule class ShellModule(ShModule): From 570f9db6bf313155822447772104e8ad0cb1b0ef Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 17:42:57 -0400 Subject: [PATCH 0325/3617] Add/correct missing imports for AnsibleError in v2 lookup plugins --- v2/ansible/plugins/lookup/cartesian.py | 4 ++-- v2/ansible/plugins/lookup/dict.py | 3 ++- v2/ansible/plugins/lookup/indexed_items.py | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/v2/ansible/plugins/lookup/cartesian.py b/v2/ansible/plugins/lookup/cartesian.py index c50d53e7f80aa2..7d8e08cb94da8d 100644 --- a/v2/ansible/plugins/lookup/cartesian.py +++ b/v2/ansible/plugins/lookup/cartesian.py @@ -19,7 +19,7 @@ from itertools import product -from ansible.errors import * +from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.utils.listify import listify_lookup_plugin_terms @@ -42,7 +42,7 @@ def run(self, terms, variables=None, **kwargs): my_list = terms[:] if len(my_list) == 0: - raise errors.AnsibleError("with_cartesian requires at least one element in each list") + raise AnsibleError("with_cartesian requires at least one element in each list") return [self._flatten(x) for x in product(*my_list, fillvalue=None)] diff --git a/v2/ansible/plugins/lookup/dict.py b/v2/ansible/plugins/lookup/dict.py index cc7975ae499993..1b54f3db93eeed 100644 --- a/v2/ansible/plugins/lookup/dict.py +++ b/v2/ansible/plugins/lookup/dict.py @@ -17,6 +17,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): @@ -24,6 +25,6 @@ class LookupModule(LookupBase): def run(self, terms, varibles=None, **kwargs): if not isinstance(terms, dict): - raise errors.AnsibleError("with_dict expects a dict") + raise AnsibleError("with_dict expects a dict") return self._flatten_hash_to_list(terms) diff --git a/v2/ansible/plugins/lookup/indexed_items.py b/v2/ansible/plugins/lookup/indexed_items.py index 4f1dd199471e50..9e242ac6bfca2b 100644 --- a/v2/ansible/plugins/lookup/indexed_items.py +++ b/v2/ansible/plugins/lookup/indexed_items.py @@ -17,6 +17,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase class LookupModule(LookupBase): @@ -27,7 +28,7 @@ def __init__(self, basedir=None, **kwargs): def run(self, terms, variables, **kwargs): if not isinstance(terms, list): - raise errors.AnsibleError("with_indexed_items expects a list") + raise AnsibleError("with_indexed_items expects a list") items = self._flatten(terms) return zip(range(len(items)), items) From 0da7834584f18dd70a6b5e979b33629f9051b003 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Apr 2015 15:46:25 -0700 Subject: [PATCH 0326/3617] Move command for v1 back into testenv so that we have a default (We have a jenkins build that creates a [testenv:jenkins] so it needs there to be a default value --- tox.ini | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tox.ini b/tox.ini index 5691980b607a2a..5440a5825c9a6e 100644 --- a/tox.ini +++ b/tox.ini @@ -2,20 +2,22 @@ envlist = {py26,py27}-v{1} [testenv] +commands = make tests deps = -r{toxinidir}/test-requirements.txt whitelist_externals = make [testenv:py26-v1] -commands = make tests [testenv:py27-v1] -commands = make tests [testenv:py26-v2] +deps = -r{toxinidir}/v2/test-requirements.txt commands = make newtests [testenv:py27-v2] +deps = -r{toxinidir}/v2/test-requirements.txt commands = make newtests [testenv:py34-v2] +deps = -r{toxinidir}/v2/test-requirements.txt commands = make newtests From 07c3107cfe788923263d9474a4b208bc71e39737 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 19:03:18 -0400 Subject: [PATCH 0327/3617] Fix the name of the exported class ( pylint ) --- v2/ansible/executor/process/worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/executor/process/worker.py b/v2/ansible/executor/process/worker.py index 8e624fe401e6ca..f24e6abd5e0f47 100644 --- a/v2/ansible/executor/process/worker.py +++ b/v2/ansible/executor/process/worker.py @@ -41,7 +41,7 @@ from ansible.utils.debug import debug -__all__ = ['ExecutorProcess'] +__all__ = ['WorkerProcess'] class WorkerProcess(multiprocessing.Process): From 1acd56a9aa3b30a291c7f7de4f67cb88281ce6b5 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 19:04:08 -0400 Subject: [PATCH 0328/3617] Fix various pylint issues ( missing import and wrong variable names ) --- v2/ansible/executor/connection_info.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index ace2252e3ad9aa..d0929d321768a9 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -25,7 +25,7 @@ from ansible import constants as C from ansible.template import Templar from ansible.utils.boolean import boolean - +from ansible.errors import AnsibleError __all__ = ['ConnectionInformation'] @@ -230,7 +230,7 @@ def make_become_cmd(self, cmd, executable, become_settings=None): elif self.become_method == 'pbrun': exe = become_settings.get('pbrun_exe', 'pbrun') flags = become_settings.get('pbrun_flags', '') - becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, success_cmd) + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, success_cmd) elif self.become_method == 'pfexec': exe = become_settings.get('pfexec_exe', 'pbrun') @@ -239,7 +239,7 @@ def make_become_cmd(self, cmd, executable, become_settings=None): becomecmd = '%s %s "%s"' % (exe, flags, success_cmd) else: - raise errors.AnsibleError("Privilege escalation method not found: %s" % method) + raise AnsibleError("Privilege escalation method not found: %s" % self.become_method) return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key) From 6dcc883ac9bd2b04084326688dce14c915605fe6 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 19:07:57 -0400 Subject: [PATCH 0329/3617] Remove old dead code ( variable no longer exist, not used ) --- v2/ansible/executor/task_queue_manager.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index 026726b3d8e02d..9a56d3f920b3c2 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -188,18 +188,6 @@ def get_variable_manager(self): def get_loader(self): return self._loader - def get_server_pipe(self): - return self._server_pipe - - def get_client_pipe(self): - return self._client_pipe - - def get_pending_results(self): - return self._pending_results - - def get_allow_processing(self): - return self._allow_processing - def get_notified_handlers(self): return self._notified_handlers From 2d9097e025f04b7ffb307834ebbb8c901929066d Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 14 Apr 2015 19:13:27 -0400 Subject: [PATCH 0330/3617] Fix the filename in error message (pylint) --- v2/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 30d1641090e964..1ec1da34c7c8a6 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -190,7 +190,7 @@ def _make_tmp_path(self): # Catch failure conditions, files should never be # written to locations in /. if rc == '/': - raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd)) + raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd)) return rc From dd1c14a0c7059e26eb736ac5ed619605069763ce Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Apr 2015 21:10:17 -0500 Subject: [PATCH 0331/3617] Adding a method for setting up magic variables from connection info in v2 --- v2/ansible/executor/connection_info.py | 8 ++++++++ v2/ansible/executor/task_executor.py | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index ace2252e3ad9aa..5e14392e11bed6 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -261,3 +261,11 @@ def post_validate(self, variables, loader): for field in self._get_fields(): value = templar.template(getattr(self, field)) setattr(self, field, value) + + def update_vars(self, variables): + ''' + Adds 'magic' variables relating to connections to the variable dictionary provided. + ''' + + variables['ansible_ssh_port'] = self.port + variables['ansible_ssh_user'] = self.remote_user diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 256d26f8dcf843..a75cbed176ec2f 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -193,9 +193,13 @@ def _execute(self, variables=None): variables = self._job_vars # fields set from the play/task may be based on variables, so we have to - # do the same kind of post validation step on it here before we use it + # do the same kind of post validation step on it here before we use it. self._connection_info.post_validate(variables=variables, loader=self._loader) + # now that the connection information is finalized, we can add 'magic' + # variables to the variable dictionary + self._connection_info.update_vars(variables) + # get the connection and the handler for this execution self._connection = self._get_connection(variables) self._handler = self._get_action_handler(connection=self._connection) From 9097274fe0da0021909cc97a18e9b676ff2aef35 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Apr 2015 21:10:56 -0500 Subject: [PATCH 0332/3617] Fixing minimal callback for v2 to use the new api --- v2/ansible/plugins/callback/minimal.py | 60 +++++++++++--------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/v2/ansible/plugins/callback/minimal.py b/v2/ansible/plugins/callback/minimal.py index 8ba883307b89f6..95dfaee87850c2 100644 --- a/v2/ansible/plugins/callback/minimal.py +++ b/v2/ansible/plugins/callback/minimal.py @@ -33,81 +33,69 @@ class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 - def _print_banner(self, msg): - ''' - Prints a header-looking line with stars taking up to 80 columns - of width (3 columns, minimum) - ''' - msg = msg.strip() - star_len = (80 - len(msg)) - if star_len < 0: - star_len = 3 - stars = "*" * star_len - self._display.display("\n%s %s\n" % (msg, stars)) - - def on_any(self, *args, **kwargs): - pass - - def runner_on_failed(self, task, result, ignore_errors=False): + def v2_on_any(self, *args, **kwargs): + pass + + def v2_runner_on_failed(self, result, ignore_errors=False): self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red') - def runner_on_ok(self, task, result): + def v2_runner_on_ok(self, result): self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), json.dumps(result._result, indent=4)), color='green') - def runner_on_skipped(self, task, result): + def v2_runner_on_skipped(self, result): pass - def runner_on_unreachable(self, task, result): + def v2_runner_on_unreachable(self, result): self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') - def runner_on_no_hosts(self, task): + def v2_runner_on_no_hosts(self, task): pass - def runner_on_async_poll(self, host, res, jid, clock): + def v2_runner_on_async_poll(self, host, res, jid, clock): pass - def runner_on_async_ok(self, host, res, jid): + def v2_runner_on_async_ok(self, host, res, jid): pass - def runner_on_async_failed(self, host, res, jid): + def v2_runner_on_async_failed(self, host, res, jid): pass - def playbook_on_start(self): + def v2_playbook_on_start(self): pass - def playbook_on_notify(self, host, handler): + def v2_playbook_on_notify(self, host, handler): pass - def playbook_on_no_hosts_matched(self): + def v2_playbook_on_no_hosts_matched(self): pass - def playbook_on_no_hosts_remaining(self): + def v2_playbook_on_no_hosts_remaining(self): pass - def playbook_on_task_start(self, name, is_conditional): + def v2_playbook_on_task_start(self, task, is_conditional): pass - def playbook_on_cleanup_task_start(self, name): + def v2_playbook_on_cleanup_task_start(self, task): pass - def playbook_on_handler_task_start(self, name): + def v2_playbook_on_handler_task_start(self, task): pass - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): pass - def playbook_on_setup(self): + def v2_playbook_on_setup(self): pass - def playbook_on_import_for_host(self, host, imported_file): + def v2_playbook_on_import_for_host(self, result, imported_file): pass - def playbook_on_not_import_for_host(self, host, missing_file): + def v2_playbook_on_not_import_for_host(self, result, missing_file): pass - def playbook_on_play_start(self, name): + def v2_playbook_on_play_start(self, play): pass - def playbook_on_stats(self, stats): + def v2_playbook_on_stats(self, stats): pass From 719d01067ef8d4ff52d54f4dfd25b098fa303856 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Apr 2015 19:42:52 -0700 Subject: [PATCH 0333/3617] Use six to assign metaclass for py2 and py3 compat --- v2/ansible/plugins/inventory/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/v2/ansible/plugins/inventory/__init__.py b/v2/ansible/plugins/inventory/__init__.py index 41e8578ee701ac..03fd89429b4dd4 100644 --- a/v2/ansible/plugins/inventory/__init__.py +++ b/v2/ansible/plugins/inventory/__init__.py @@ -23,6 +23,9 @@ from abc import ABCMeta, abstractmethod +from six import add_metaclass + +@add_metaclass(ABCMeta) class InventoryParser: '''Abstract Base Class for retrieving inventory information @@ -31,7 +34,6 @@ class InventoryParser: InventoryParser.hosts for a mapping of Host objects and InventoryParser.Groups for a mapping of Group objects. ''' - __metaclass__ = ABCMeta def __init__(self, inven_source): ''' From 460dc5e4db95bd3bb8bf3c116b923759df98183c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Apr 2015 19:56:17 -0700 Subject: [PATCH 0334/3617] Fix errors import --- v2/ansible/plugins/connections/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py index 74ff693a331944..4461bb6f3de399 100644 --- a/v2/ansible/plugins/connections/__init__.py +++ b/v2/ansible/plugins/connections/__init__.py @@ -20,6 +20,7 @@ __metaclass__ = type from ansible import constants as C +from ansible.errors import AnsibleError # FIXME: this object should be created upfront and passed through # the entire chain of calls to here, as there are other things @@ -48,4 +49,4 @@ def _become_method_supported(self, become_method): if become_method in self.__class__.become_methods: return True - raise errors.AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method) + raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method) From 5aa56245d53ba9a2aef08bd0da8fe9e9f3193718 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 15 Apr 2015 00:58:11 -0400 Subject: [PATCH 0335/3617] Fix variable name There is no 'role' variable, and given the test and code after, that's likely a test on 'ds' --- v2/ansible/playbook/role/requirement.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/playbook/role/requirement.py b/v2/ansible/playbook/role/requirement.py index 61db0cb1fd4979..03ffc3d7107ecf 100644 --- a/v2/ansible/playbook/role/requirement.py +++ b/v2/ansible/playbook/role/requirement.py @@ -92,7 +92,7 @@ def _preprocess_role_spec(self, ds): ds["scm"] = scm ds["src"] = src - if 'name' in role: + if 'name' in ds: ds["role"] = ds["name"] del ds["name"] else: From 72cf11f8e1b7f802cf6f3f0e2216782cf0d3a163 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 15 Apr 2015 00:59:39 -0400 Subject: [PATCH 0336/3617] Fix serialize function by using the right members name --- v2/ansible/playbook/role/metadata.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/playbook/role/metadata.py b/v2/ansible/playbook/role/metadata.py index 05ed2f35850a8a..461a9a4a6271f9 100644 --- a/v2/ansible/playbook/role/metadata.py +++ b/v2/ansible/playbook/role/metadata.py @@ -82,8 +82,8 @@ def _load_galaxy_info(self, attr, ds): def serialize(self): return dict( - allow_duplicates = self.allow_duplicates, - dependencies = self.dependencies, + allow_duplicates = self._allow_duplicates, + dependencies = self._dependencies, ) def deserialize(self, data): From b43ede1eb4c3bc54ce5b4388f6e884386e589c69 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 15 Apr 2015 01:03:31 -0400 Subject: [PATCH 0337/3617] Do add a unused named argument using a variable that was removed Found by pylint. --- v2/ansible/playbook/role/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index 72dd2a27d3f311..bc4d4262eb17c5 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -172,13 +172,13 @@ def _load_role_data(self, role_include, parent_role=None): # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars') if not isinstance(self._role_vars, (dict, NoneType)): - raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds) + raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name) elif self._role_vars is None: self._role_vars = dict() self._default_vars = self._load_role_yaml('defaults') if not isinstance(self._default_vars, (dict, NoneType)): - raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name, obj=ds) + raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name) elif self._default_vars is None: self._default_vars = dict() From b20d54520c1b635409057f7c73ffddab7bc9b07b Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 15 Apr 2015 01:06:02 -0400 Subject: [PATCH 0338/3617] Fix errors reporting for playbook/* --- v2/ansible/playbook/become.py | 6 +++--- v2/ansible/playbook/playbook_include.py | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py index 291cff2b716570..272976929a7493 100644 --- a/v2/ansible/playbook/become.py +++ b/v2/ansible/playbook/become.py @@ -45,11 +45,11 @@ def _detect_privilege_escalation_conflict(self, ds): if has_become: msg = 'The become params ("become", "become_user") and' if has_sudo: - raise errors.AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg) + raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg) elif has_su: - raise errors.AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg) + raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg) elif has_sudo and has_su: - raise errors.AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together') + raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together') def _preprocess_data_become(self, ds): """Preprocess the playbook data for become attributes diff --git a/v2/ansible/playbook/playbook_include.py b/v2/ansible/playbook/playbook_include.py index 2e4964fce9617b..5c91dd14adb701 100644 --- a/v2/ansible/playbook/playbook_include.py +++ b/v2/ansible/playbook/playbook_include.py @@ -27,6 +27,7 @@ from ansible.playbook.base import Base from ansible.playbook.conditional import Conditional from ansible.playbook.taggable import Taggable +from ansible.errors import AnsibleParserError class PlaybookInclude(Base): From 96a7d85b61bf6b513ffe406bef09f98e676544ef Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Apr 2015 01:07:55 -0500 Subject: [PATCH 0339/3617] Adding more magic variables for connection info to v2 --- v2/ansible/executor/connection_info.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 54bd4c3b4322f0..e036342c191116 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -267,5 +267,9 @@ def update_vars(self, variables): Adds 'magic' variables relating to connections to the variable dictionary provided. ''' - variables['ansible_ssh_port'] = self.port - variables['ansible_ssh_user'] = self.remote_user + variables['ansible_connection'] = self.connection + variables['ansible_ssh_host'] = self.remote_addr + variables['ansible_ssh_pass'] = self.password + variables['ansible_ssh_port'] = self.port + variables['ansible_ssh_user'] = self.remote_user + variables['ansible_ssh_private_key_file'] = self.private_key_file From 02e738500239fe5b724a814066b3af3bc412bed7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Apr 2015 01:10:24 -0500 Subject: [PATCH 0340/3617] Filter tasks based on tags during iterator setup in v2 --- v2/ansible/executor/play_iterator.py | 9 +++++++-- v2/ansible/executor/task_queue_manager.py | 2 +- v2/ansible/playbook/block.py | 22 ++++++++++++++++++++++ v2/ansible/plugins/strategies/linear.py | 4 ---- v2/samples/test_tags.yml | 22 ++++++++++++++++++++++ 5 files changed, 52 insertions(+), 7 deletions(-) create mode 100644 v2/samples/test_tags.yml diff --git a/v2/ansible/executor/play_iterator.py b/v2/ansible/executor/play_iterator.py index 38bebb21132c9c..dc4d4c7d5d2522 100644 --- a/v2/ansible/executor/play_iterator.py +++ b/v2/ansible/executor/play_iterator.py @@ -87,10 +87,15 @@ class PlayIterator: FAILED_RESCUE = 4 FAILED_ALWAYS = 8 - def __init__(self, inventory, play): + def __init__(self, inventory, play, connection_info, all_vars): self._play = play - self._blocks = self._play.compile() + self._blocks = [] + for block in self._play.compile(): + new_block = block.filter_tagged_tasks(connection_info, all_vars) + if new_block.has_tasks(): + self._blocks.append(new_block) + self._host_states = {} for host in inventory.get_hosts(self._play.hosts): self._host_states[host.name] = HostState(blocks=self._blocks) diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index 026726b3d8e02d..c5772942feb142 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -161,7 +161,7 @@ def run(self, play): raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds) # build the iterator - iterator = PlayIterator(inventory=self._inventory, play=new_play) + iterator = PlayIterator(inventory=self._inventory, play=new_play, connection_info=connection_info, all_vars=all_vars) # and run the play using the strategy return strategy.run(iterator, connection_info) diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index e0e607da3b88d4..9fd3d773600a18 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -281,3 +281,25 @@ def _get_parent_attribute(self, attr): return value + def filter_tagged_tasks(self, connection_info, all_vars): + ''' + Creates a new block, with task lists filtered based on the tags contained + within the connection_info object. + ''' + + def evaluate_and_append_task(target): + tmp_list = [] + for task in target: + if task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, all_vars=all_vars): + tmp_list.append(task) + return tmp_list + + new_block = self.copy() + new_block.block = evaluate_and_append_task(self.block) + new_block.rescue = evaluate_and_append_task(self.rescue) + new_block.always = evaluate_and_append_task(self.always) + + return new_block + + def has_tasks(self): + return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0 diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py index fcda46a7af0686..9988bb3e2a3db9 100644 --- a/v2/ansible/plugins/strategies/linear.py +++ b/v2/ansible/plugins/strategies/linear.py @@ -178,10 +178,6 @@ def run(self, iterator, connection_info): debug("'%s' skipped because role has already run" % task) continue - if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup': - debug("'%s' failed tag evaluation" % task) - continue - if task.action == 'meta': # meta tasks store their args in the _raw_params field of args, # since they do not use k=v pairs, so get that diff --git a/v2/samples/test_tags.yml b/v2/samples/test_tags.yml new file mode 100644 index 00000000000000..c94b88e0a0c573 --- /dev/null +++ b/v2/samples/test_tags.yml @@ -0,0 +1,22 @@ +- hosts: localhost + gather_facts: no + tasks: + - block: + - debug: msg="this is the tagged block" + tags: + - block + - block: + - debug: msg="tagged debug from second block" + tags: + - tag1 + - fail: + tags: + - tag1 + rescue: + - debug: msg="tagged rescue from second block" + tags: + - rescue_tag + always: + - debug: msg="tagged always from second block" + tags: + - always_tag From aab681bc2baeae580bacd47edd214830b6b87181 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Apr 2015 05:09:09 -0700 Subject: [PATCH 0341/3617] Update core and extras module refs and add tests for an unarchive problem that the update fixes: https://github.com/ansible/ansible-modules-core/issues/1064 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- .../roles/test_unarchive/tasks/main.yml | 93 ++++++++++++++++++- 3 files changed, 91 insertions(+), 6 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 74e69d1fd16957..761fc8d277e64e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 74e69d1fd16957ff84408eac0d28a0c8ef78225c +Subproject commit 761fc8d277e64e0d63eb2cff8c72c3fa3ec70dd2 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 4048de9c1e2333..df7fcc90d9a179 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 4048de9c1e2333aa7880b61f34af8cbdce5cbcec +Subproject commit df7fcc90d9a17956ec156066e8fc31e5ed8106e6 diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index fa5891396c393e..edcee064a9b1cb 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -32,11 +32,20 @@ shell: tar cvf test-unarchive.tar foo-unarchive.txt chdir={{output_dir}} - name: prep a tar.gz file - shell: tar cvf test-unarchive.tar.gz foo-unarchive.txt chdir={{output_dir}} + shell: tar czvf test-unarchive.tar.gz foo-unarchive.txt chdir={{output_dir}} - name: prep a zip file shell: zip test-unarchive.zip foo-unarchive.txt chdir={{output_dir}} +- name: prep a subdirectory + file: path={{output_dir}}/unarchive-dir state=directory + +- name: prep our file + copy: src=foo.txt dest={{output_dir}}/unarchive-dir/foo-unarchive.txt + +- name: prep a tar.gz file with directory + shell: tar czvf test-unarchive-dir.tar.gz unarchive-dir chdir={{output_dir}} + - name: create our tar unarchive destination file: path={{output_dir}}/test-unarchive-tar state=directory @@ -161,7 +170,7 @@ - name: create our unarchive destination file: path={{output_dir}}/test-unarchive-tar-gz state=directory -- name: unarchive and set mode +- name: unarchive and set mode to 0600 unarchive: src: "{{ output_dir }}/test-unarchive.tar.gz" dest: "{{ output_dir | expanduser }}/test-unarchive-tar-gz" @@ -180,12 +189,39 @@ - "unarchive06.changed == true" - "unarchive06_stat.stat.mode == '0600'" -- name: unarchive and set mode +- name: remove our tar.gz unarchive destination + file: path={{ output_dir }}/test-unarchive-tar-gz state=absent + +- name: create our unarchive destination + file: path={{output_dir}}/test-unarchive-tar-gz state=directory + + +- name: unarchive over existing extraction and set mode to 0644 unarchive: src: "{{ output_dir }}/test-unarchive.tar.gz" dest: "{{ output_dir | expanduser }}/test-unarchive-tar-gz" copy: no - mode: "u+rwX,g-rwx,o-rwx" + mode: "u+rwX,g-wx,o-wx,g+r,o+r" + register: unarchive06_2 + +- name: Test that the file modes were changed + stat: + path: "{{ output_dir | expanduser }}/test-unarchive-tar-gz/foo-unarchive.txt" + register: unarchive06_2_stat + +- debug: var=unarchive06_2_stat.stat.mode +- name: Test that the files were changed + assert: + that: + - "unarchive06_2.changed == true" + - "unarchive06_2_stat.stat.mode == '0644'" + +- name: Repeat the last request to verify no changes + unarchive: + src: "{{ output_dir }}/test-unarchive.tar.gz" + dest: "{{ output_dir | expanduser }}/test-unarchive-tar-gz" + copy: no + mode: "u+rwX,g-wx,o-wx,g+r,o+r" register: unarchive07 - name: Test that the files were not changed @@ -196,6 +232,11 @@ - name: remove our tar.gz unarchive destination file: path={{ output_dir }}/test-unarchive-tar-gz state=absent + +- name: create our unarchive destination + file: path={{output_dir}}/test-unarchive-tar-gz state=directory + + - name: create a directory with quotable chars file: path="{{ output_dir }}/test-quotes~root" state=directory @@ -225,3 +266,47 @@ - name: remove quotable chars test file: path="{{ output_dir }}/test-quotes~root" state=absent + +# Test that unarchiving is performed if files are missing +# https://github.com/ansible/ansible-modules-core/issues/1064 +- name: create our unarchive destination + file: path={{output_dir}}/test-unarchive-tar-gz state=directory + +- name: unarchive a tar that has directories + unarchive: + src: "{{ output_dir }}/test-unarchive-dir.tar.gz" + dest: "{{ output_dir }}/test-unarchive-tar-gz" + mode: "0700" + copy: no + register: unarchive10 + +- name: Test that unarchive succeeded + assert: + that: + - "unarchive10.changed == true" + +- name: Change the mode of the toplevel dir + file: + path: "{{ output_dir }}/test-unarchive-tar-gz/unarchive-dir" + mode: 0701 + +- name: Remove a file from the extraction point + file: + path: "{{ output_dir }}/test-unarchive-tar-gz/unarchive-dir/foo-unarchive.txt" + state: absent + +- name: unarchive a tar that has directories + unarchive: + src: "{{ output_dir }}/test-unarchive-dir.tar.gz" + dest: "{{ output_dir }}/test-unarchive-tar-gz" + mode: "0700" + copy: no + register: unarchive10_1 + +- name: Test that unarchive succeeded + assert: + that: + - "unarchive10_1.changed == true" + +- name: remove our tar.gz unarchive destination + file: path={{ output_dir }}/test-unarchive-tar-gz state=absent From 791dbd7661598ab8150f2aa20c404849f54dc4d9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Apr 2015 05:15:30 -0700 Subject: [PATCH 0342/3617] Update extras module ref on v2 --- v2/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras index 21fce8ac730346..df7fcc90d9a179 160000 --- a/v2/ansible/modules/extras +++ b/v2/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 21fce8ac730346b4e77427e3582553f2dc93c675 +Subproject commit df7fcc90d9a17956ec156066e8fc31e5ed8106e6 From a6592ba0f9f6a1e4249239bb4a0ee9588b484b19 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 15 Apr 2015 11:49:28 -0400 Subject: [PATCH 0343/3617] updated banners as per marketing's request --- docsite/_themes/srtd/layout.html | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index ce44c4284da0da..b9d9d065c7bd5d 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -198,10 +198,10 @@
- + - - + +
 

 
From a0def30c34bf664232e8e0b04e1169a88bc818f4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Apr 2015 09:28:50 -0700 Subject: [PATCH 0344/3617] Add integration test for unarchive filelist feature --- lib/ansible/modules/core | 2 +- .../roles/test_unarchive/tasks/main.yml | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 761fc8d277e64e..a19fa6ba48bf09 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 761fc8d277e64e0d63eb2cff8c72c3fa3ec70dd2 +Subproject commit a19fa6ba48bf092b574eb6ee40f38f06500d767d diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index edcee064a9b1cb..c26d3aeb101d8d 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -75,6 +75,8 @@ assert: that: - "unarchive02.changed == true" + # Verify that no file list is generated + - "'files' not in unarchive02" - name: verify that the file was unarchived file: path={{output_dir}}/test-unarchive-tar-gz/foo-unarchive.txt state=file @@ -126,13 +128,17 @@ file: path={{output_dir}}/test-unarchive-zip state=directory - name: unarchive a zip file - unarchive: src={{output_dir}}/test-unarchive.zip dest={{output_dir | expanduser}}/test-unarchive-zip copy=no + unarchive: src={{output_dir}}/test-unarchive.zip dest={{output_dir | expanduser}}/test-unarchive-zip copy=no list_files=True register: unarchive03 - name: verify that the file was marked as changed assert: that: - "unarchive03.changed == true" + # Verify that file list is generated + - "'files' in unarchive03" + - "{{unarchive03['files']| length}} == 1" + - "'foo-unarchive.txt' in unarchive03['files']" - name: verify that the file was unarchived file: path={{output_dir}}/test-unarchive-zip/foo-unarchive.txt state=file @@ -176,6 +182,7 @@ dest: "{{ output_dir | expanduser }}/test-unarchive-tar-gz" copy: no mode: "u+rwX,g-rwx,o-rwx" + list_files: True register: unarchive06 - name: Test that the file modes were changed @@ -188,6 +195,10 @@ that: - "unarchive06.changed == true" - "unarchive06_stat.stat.mode == '0600'" + # Verify that file list is generated + - "'files' in unarchive06" + - "{{unarchive06['files']| length}} == 1" + - "'foo-unarchive.txt' in unarchive06['files']" - name: remove our tar.gz unarchive destination file: path={{ output_dir }}/test-unarchive-tar-gz state=absent @@ -222,12 +233,17 @@ dest: "{{ output_dir | expanduser }}/test-unarchive-tar-gz" copy: no mode: "u+rwX,g-wx,o-wx,g+r,o+r" + list_files: True register: unarchive07 - name: Test that the files were not changed assert: that: - "unarchive07.changed == false" + # Verify that file list is generated + - "'files' in unarchive07" + - "{{unarchive07['files']| length}} == 1" + - "'foo-unarchive.txt' in unarchive07['files']" - name: remove our tar.gz unarchive destination file: path={{ output_dir }}/test-unarchive-tar-gz state=absent From 45247eb4b38a76a7837f68f97c7ccebae488ea0e Mon Sep 17 00:00:00 2001 From: Martin Chlumsky Date: Wed, 15 Apr 2015 13:09:59 -0400 Subject: [PATCH 0345/3617] Use abc for BaseCacheModule --- v2/ansible/plugins/cache/base.py | 30 ++++++++---- v2/test-requirements.txt | 2 + v2/test/plugins/test_cache.py | 82 ++++++++++++++++++++++++++++++++ 3 files changed, 105 insertions(+), 9 deletions(-) create mode 100644 v2/test/plugins/test_cache.py diff --git a/v2/ansible/plugins/cache/base.py b/v2/ansible/plugins/cache/base.py index 6ff3d5ed1e2b2c..051f02d0b00335 100644 --- a/v2/ansible/plugins/cache/base.py +++ b/v2/ansible/plugins/cache/base.py @@ -14,30 +14,42 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import exceptions +from abc import ABCMeta, abstractmethod + +from six import add_metaclass + -class BaseCacheModule(object): +@add_metaclass(ABCMeta) +class BaseCacheModule: + @abstractmethod def get(self, key): - raise exceptions.NotImplementedError + pass + @abstractmethod def set(self, key, value): - raise exceptions.NotImplementedError + pass + @abstractmethod def keys(self): - raise exceptions.NotImplementedError + pass + @abstractmethod def contains(self, key): - raise exceptions.NotImplementedError + pass + @abstractmethod def delete(self, key): - raise exceptions.NotImplementedError + pass + @abstractmethod def flush(self): - raise exceptions.NotImplementedError + pass + @abstractmethod def copy(self): - raise exceptions.NotImplementedError + pass diff --git a/v2/test-requirements.txt b/v2/test-requirements.txt index ca5bcae0d98083..100bdd01a00a89 100644 --- a/v2/test-requirements.txt +++ b/v2/test-requirements.txt @@ -5,6 +5,8 @@ jinja2 httplib2 passlib six +python-memcached +redis # Test requirements unittest2 diff --git a/v2/test/plugins/test_cache.py b/v2/test/plugins/test_cache.py new file mode 100644 index 00000000000000..b1273874cd3b61 --- /dev/null +++ b/v2/test/plugins/test_cache.py @@ -0,0 +1,82 @@ +# (c) 2012-2015, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.plugins.cache.base import BaseCacheModule +from ansible.plugins.cache.memcached import CacheModule as MemcachedCache +from ansible.plugins.cache.memory import CacheModule as MemoryCache +from ansible.plugins.cache.redis import CacheModule as RedisCache + + +class TestAbstractClass(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_subclass_error(self): + class CacheModule1(BaseCacheModule): + pass + with self.assertRaises(TypeError): + CacheModule1() + + class CacheModule2(BaseCacheModule): + def get(self, key): + super(CacheModule2, self).get(key) + + with self.assertRaises(TypeError): + CacheModule2() + + def test_subclass_success(self): + class CacheModule3(BaseCacheModule): + def get(self, key): + super(CacheModule3, self).get(key) + + def set(self, key, value): + super(CacheModule3, self).set(key, value) + + def keys(self): + super(CacheModule3, self).keys() + + def contains(self, key): + super(CacheModule3, self).contains(key) + + def delete(self, key): + super(CacheModule3, self).delete(key) + + def flush(self): + super(CacheModule3, self).flush() + + def copy(self): + super(CacheModule3, self).copy() + + self.assertIsInstance(CacheModule3(), CacheModule3) + + def test_memcached_cachemodule(self): + self.assertIsInstance(MemcachedCache(), MemcachedCache) + + def test_memory_cachemodule(self): + self.assertIsInstance(MemoryCache(), MemoryCache) + + def test_redis_cachemodule(self): + self.assertIsInstance(RedisCache(), RedisCache) From f624ec4cb8771736ffbe3fe81b2949edda159863 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 15 Apr 2015 16:11:08 -0400 Subject: [PATCH 0346/3617] Prefer dnf to yum. On Fedora 22 and later, yum is deprecated and dnf is installed by default. However, the detection do not seems to take this in account, and always use yum, even when yum cli is just a wrapper to tell "use dnf", as this is the case on F22 and later ( see package dnf-yum ). As dnf is not installed by default, except on F22, this shouldn't break anything. --- lib/ansible/module_utils/facts.py | 1 + v2/ansible/module_utils/facts.py | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 595629a7109759..6b817d4ebcc973 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -106,6 +106,7 @@ class Facts(object): # package manager, put the preferred one last. If there is an # ansible module, use that as the value for the 'name' key. PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' }, + { 'path' : '/usr/bin/dnf', 'name' : 'dnf' }, { 'path' : '/usr/bin/apt-get', 'name' : 'apt' }, { 'path' : '/usr/bin/zypper', 'name' : 'zypper' }, { 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' }, diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index d18615857cc665..ae1a3094b6064d 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -105,6 +105,7 @@ class Facts(object): # package manager, put the preferred one last. If there is an # ansible module, use that as the value for the 'name' key. PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' }, + { 'path' : '/usr/bin/dnf', 'name' : 'dnf' }, { 'path' : '/usr/bin/apt-get', 'name' : 'apt' }, { 'path' : '/usr/bin/zypper', 'name' : 'zypper' }, { 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' }, From 4903bca0c16dda7908d27b5c0d86213a38e7ac23 Mon Sep 17 00:00:00 2001 From: Ralph Bean Date: Wed, 15 Apr 2015 17:51:36 -0400 Subject: [PATCH 0347/3617] Remove check of hardcoded AWS region list. You can extend boto to point at other regions that are defined in a private cloud by defining ``BOTO_ENDPOINTS`` or ``endpoints_path`` in the ``~/.boto`` file. Ansible was doing a premature check against a hard-coded list of regions that interrupted this possibility. This commit removes that and clarifies what the user can do if they specify a non-AWS region. --- lib/ansible/module_utils/ec2.py | 18 ++---------------- v2/ansible/module_utils/ec2.py | 19 ++----------------- 2 files changed, 4 insertions(+), 33 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index c7bad2970b6522..d02c3476f2e975 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -32,20 +32,6 @@ except: HAS_LOOSE_VERSION = False -AWS_REGIONS = [ - 'ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'cn-north-1', - 'eu-central-1', - 'eu-west-1', - 'eu-central-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2', - 'us-gov-west-1', -] def aws_common_argument_spec(): @@ -63,7 +49,7 @@ def ec2_argument_spec(): spec = aws_common_argument_spec() spec.update( dict( - region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + region=dict(aliases=['aws_region', 'ec2_region']), ) ) return spec @@ -170,7 +156,7 @@ def connect_to_aws(aws_module, region, **params): conn = aws_module.connect_to_region(region, **params) if not conn: if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: - raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto" % (region, aws_module.__name__)) + raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__)) else: raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): diff --git a/v2/ansible/module_utils/ec2.py b/v2/ansible/module_utils/ec2.py index 0f08fead18021a..8d2a369e90040f 100644 --- a/v2/ansible/module_utils/ec2.py +++ b/v2/ansible/module_utils/ec2.py @@ -32,21 +32,6 @@ except: HAS_LOOSE_VERSION = False -AWS_REGIONS = [ - 'ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'cn-north-1', - 'eu-central-1', - 'eu-west-1', - 'eu-central-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2', - 'us-gov-west-1', -] - def aws_common_argument_spec(): return dict( @@ -63,7 +48,7 @@ def ec2_argument_spec(): spec = aws_common_argument_spec() spec.update( dict( - region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS), + region=dict(aliases=['aws_region', 'ec2_region']), ) ) return spec @@ -168,7 +153,7 @@ def connect_to_aws(aws_module, region, **params): conn = aws_module.connect_to_region(region, **params) if not conn: if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]: - raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto" % (region, aws_module.__name__)) + raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__)) else: raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__)) if params.get('profile_name'): From 0be531db71569c10263d0ee48456b286252baabb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Apr 2015 15:19:40 -0700 Subject: [PATCH 0348/3617] Make some of the optional requirements optional for testing -- we'll skip the tests instead --- v2/test-requirements.txt | 6 ++++-- v2/test/plugins/test_cache.py | 22 ++++++++++++++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/v2/test-requirements.txt b/v2/test-requirements.txt index 100bdd01a00a89..e4822ada648e67 100644 --- a/v2/test-requirements.txt +++ b/v2/test-requirements.txt @@ -5,8 +5,10 @@ jinja2 httplib2 passlib six -python-memcached -redis + +# These are needed for various optional features +#python-memcached +#redis # Test requirements unittest2 diff --git a/v2/test/plugins/test_cache.py b/v2/test/plugins/test_cache.py index b1273874cd3b61..bf94053aa338c1 100644 --- a/v2/test/plugins/test_cache.py +++ b/v2/test/plugins/test_cache.py @@ -21,9 +21,25 @@ from ansible.compat.tests import unittest from ansible.plugins.cache.base import BaseCacheModule -from ansible.plugins.cache.memcached import CacheModule as MemcachedCache from ansible.plugins.cache.memory import CacheModule as MemoryCache -from ansible.plugins.cache.redis import CacheModule as RedisCache + +HAVE_MEMCACHED = True +try: + import memcached +except ImportError: + HAVE_MEMCACHED = False +else: + # Use an else so that the only reason we skip this is for lack of + # memcached, not errors importing the plugin + from ansible.plugins.cache.memcached import CacheModule as MemcachedCache + +HAVE_REDIS = True +try: + import redis +except ImportError: + HAVE_REDIS = False +else: + from ansible.plugins.cache.redis import CacheModule as RedisCache class TestAbstractClass(unittest.TestCase): @@ -72,11 +88,13 @@ def copy(self): self.assertIsInstance(CacheModule3(), CacheModule3) + @unittest.skipUnless(HAVE_MEMCACHED) def test_memcached_cachemodule(self): self.assertIsInstance(MemcachedCache(), MemcachedCache) def test_memory_cachemodule(self): self.assertIsInstance(MemoryCache(), MemoryCache) + @unittest.skipUnless(HAVE_REDIS) def test_redis_cachemodule(self): self.assertIsInstance(RedisCache(), RedisCache) From 1f7d23fc18ef2b7b6909d325db6b49aee683a58e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Apr 2015 15:57:13 -0700 Subject: [PATCH 0349/3617] Fix call to skipUnless by adding a reason --- v2/test/plugins/test_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/test/plugins/test_cache.py b/v2/test/plugins/test_cache.py index bf94053aa338c1..f3cfe6a38c17cb 100644 --- a/v2/test/plugins/test_cache.py +++ b/v2/test/plugins/test_cache.py @@ -88,13 +88,13 @@ def copy(self): self.assertIsInstance(CacheModule3(), CacheModule3) - @unittest.skipUnless(HAVE_MEMCACHED) + @unittest.skipUnless(HAVE_MEMCACHED, 'python-memcached module not installed') def test_memcached_cachemodule(self): self.assertIsInstance(MemcachedCache(), MemcachedCache) def test_memory_cachemodule(self): self.assertIsInstance(MemoryCache(), MemoryCache) - @unittest.skipUnless(HAVE_REDIS) + @unittest.skipUnless(HAVE_REDIS, 'Redis pyhton module not installed') def test_redis_cachemodule(self): self.assertIsInstance(RedisCache(), RedisCache) From 01df51d2ae7daec4a996118c48779e749c8f45ad Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Apr 2015 16:32:44 -0700 Subject: [PATCH 0350/3617] Improve the API for connection plugins and update local and ssh to use it --- v2/ansible/executor/task_executor.py | 2 - v2/ansible/plugins/action/__init__.py | 2 +- v2/ansible/plugins/connections/__init__.py | 47 ++++++++- v2/ansible/plugins/connections/local.py | 37 +++---- v2/ansible/plugins/connections/ssh.py | 110 ++++++++++++--------- 5 files changed, 126 insertions(+), 72 deletions(-) diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index a75cbed176ec2f..0c57a42857d862 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -374,8 +374,6 @@ def _get_connection(self, variables): if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) - connection.connect() - return connection def _get_action_handler(self, connection): diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 1ec1da34c7c8a6..c5b88e76946351 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -168,7 +168,7 @@ def _make_tmp_path(self): if result['rc'] != 0: if result['rc'] == 5: output = 'Authentication failure.' - elif result['rc'] == 255 and self._connection.get_transport() in ['ssh']: + elif result['rc'] == 255 and self._connection.transport in ('ssh',): # FIXME: more utils.VERBOSITY #if utils.VERBOSITY > 3: # output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py index 4461bb6f3de399..8f84e6a01ac4ce 100644 --- a/v2/ansible/plugins/connections/__init__.py +++ b/v2/ansible/plugins/connections/__init__.py @@ -1,4 +1,5 @@ # (c) 2012-2014, Michael DeHaan +# (c) 2015 Toshio Kuratomi # # This file is part of Ansible # @@ -19,6 +20,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from abc import ABCMeta, abstractmethod, abstractproperty + +from six import add_metaclass + from ansible import constants as C from ansible.errors import AnsibleError @@ -29,7 +34,7 @@ __all__ = ['ConnectionBase'] - +@add_metaclass(ABCMeta) class ConnectionBase: ''' A base class for connections to contain common code. @@ -39,9 +44,15 @@ class ConnectionBase: become_methods = C.BECOME_METHODS def __init__(self, connection_info, *args, **kwargs): - self._connection_info = connection_info - self._display = Display(verbosity=connection_info.verbosity) + # All these hasattrs allow subclasses to override these parameters + if not hasattr(self, '_connection_info'): + self._connection_info = connection_info + if not hasattr(self, '_display'): + self._display = Display(verbosity=connection_info.verbosity) + if not hasattr(self, '_connected'): + self._connected = False + self._connect() def _become_method_supported(self, become_method): ''' Checks if the current class supports this privilege escalation method ''' @@ -50,3 +61,33 @@ def _become_method_supported(self, become_method): return True raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method) + + @abstractproperty + def transport(self): + """String used to identify this Connection class from other classes""" + pass + + @abstractmethod + def _connect(self): + """Connect to the host we've been initialized with""" + pass + + @abstractmethod + def exec_command(self, cmd, tmp_path, executable=None, in_data=None): + """Run a command on the remote host""" + pass + + @abstractmethod + def put_file(self, in_path, out_path): + """Transfer a file from local to remote""" + pass + + @abstractmethod + def fetch_file(self, in_path, out_path): + """Fetch a file from remote to local""" + pass + + @abstractmethod + def close(self): + """Terminate the connection""" + pass diff --git a/v2/ansible/plugins/connections/local.py b/v2/ansible/plugins/connections/local.py index 73583974bf025c..1dc6076b0db547 100644 --- a/v2/ansible/plugins/connections/local.py +++ b/v2/ansible/plugins/connections/local.py @@ -1,4 +1,5 @@ # (c) 2012, Michael DeHaan +# (c) 2015 Toshio Kuratomi # # This file is part of Ansible # @@ -19,13 +20,12 @@ import traceback import os -import pipes import shutil import subprocess -import select -import fcntl +#import select +#import fcntl -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase from ansible.utils.debug import debug @@ -33,15 +33,17 @@ class Connection(ConnectionBase): ''' Local based connections ''' - def get_transport(self): + @property + def transport(self): ''' used to identify this connection object ''' return 'local' - def connect(self, port=None): + def _connect(self, port=None): ''' connect to the local host; nothing to do here ''' - self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr) - + if not self._connected: + self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._connection_info.remote_user, host=self._connection_info.remote_addr)) + self._connected = True return self def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): @@ -57,7 +59,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): executable = executable.split()[0] if executable else None - self._display.vvv("%s EXEC %s" % (self._connection_info.remote_addr, cmd)) + self._display.vvv("{0} EXEC {1}".format(self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook debug("opening command with Popen()") p = subprocess.Popen( @@ -106,26 +108,25 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): def put_file(self, in_path, out_path): ''' transfer a file from local to local ''' - #vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) - self._display.vvv("%s PUT %s TO %s" % (self._connection_info.remote_addr, in_path, out_path)) + #vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host) + self._display.vvv("{0} PUT {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path)) if not os.path.exists(in_path): - #raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) - raise AnsibleError("file or module does not exist: %s" % in_path) + raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) try: shutil.copyfile(in_path, out_path) except shutil.Error: traceback.print_exc() - raise AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) + raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path)) except IOError: traceback.print_exc() - raise AnsibleError("failed to transfer file to %s" % out_path) + raise AnsibleError("failed to transfer file to {0}".format(out_path)) def fetch_file(self, in_path, out_path): - #vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - self._display.vvv("%s FETCH %s TO %s" % (self._connection_info.remote_addr, in_path, out_path)) ''' fetch a file from local to local -- for copatibility ''' + #vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host) + self._display.vvv("{0} FETCH {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path)) self.put_file(in_path, out_path) def close(self): ''' terminate the connection; nothing to do here ''' - pass + self._connected = False diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index 2c8f8de8135abb..c07582f6b747cd 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -33,15 +33,13 @@ from hashlib import sha1 from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleConnectionFailure +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase class Connection(ConnectionBase): ''' ssh based connections ''' def __init__(self, connection_info, *args, **kwargs): - super(Connection, self).__init__(connection_info) - # SSH connection specific init stuff self.HASHED_KEY_MAGIC = "|1|" self._has_pipelining = True @@ -52,14 +50,20 @@ def __init__(self, connection_info, *args, **kwargs): self._cp_dir = '/tmp' #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) - def get_transport(self): + super(Connection, self).__init__(connection_info) + + @property + def transport(self): ''' used to identify this connection object from other classes ''' return 'ssh' - def connect(self): + def _connect(self): ''' connect to the remote host ''' - self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: %s" % self._connection_info.remote_user, host=self._connection_info.remote_addr) + self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._connection_info.remote_user), host=self._connection_info.remote_addr) + + if self._connected: + return self self._common_args = [] extra_args = C.ANSIBLE_SSH_ARGS @@ -67,11 +71,11 @@ def connect(self): # make sure there is no empty string added as this can produce weird errors self._common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()] else: - self._common_args += [ + self._common_args += ( "-o", "ControlMaster=auto", "-o", "ControlPersist=60s", - "-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)), - ] + "-o", "ControlPath=\"{0}\"".format(C.ANSIBLE_SSH_CONTROL_PATH.format(dict(directory=self._cp_dir))), + ) cp_in_use = False cp_path_set = False @@ -82,30 +86,34 @@ def connect(self): cp_path_set = True if cp_in_use and not cp_path_set: - self._common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir))] + self._common_args += ("-o", "ControlPath=\"{0}\"".format( + C.ANSIBLE_SSH_CONTROL_PATH.format(dict(directory=self._cp_dir))) + ) if not C.HOST_KEY_CHECKING: - self._common_args += ["-o", "StrictHostKeyChecking=no"] + self._common_args += ("-o", "StrictHostKeyChecking=no") if self._connection_info.port is not None: - self._common_args += ["-o", "Port=%d" % (self._connection_info.port)] + self._common_args += ("-o", "Port={0}".format(self._connection_info.port)) # FIXME: need to get this from connection info #if self.private_key_file is not None: - # self._common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)] + # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.private_key_file))) #elif self.runner.private_key_file is not None: - # self._common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)] + # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.runner.private_key_file))) if self._connection_info.password: - self._common_args += ["-o", "GSSAPIAuthentication=no", - "-o", "PubkeyAuthentication=no"] + self._common_args += ("-o", "GSSAPIAuthentication=no", + "-o", "PubkeyAuthentication=no") else: - self._common_args += ["-o", "KbdInteractiveAuthentication=no", + self._common_args += ("-o", "KbdInteractiveAuthentication=no", "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey", - "-o", "PasswordAuthentication=no"] + "-o", "PasswordAuthentication=no") if self._connection_info.remote_user is not None and self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]: - self._common_args += ["-o", "User="+self._connection_info.remote_user] + self._common_args += ("-o", "User={0}".format(self._connection_info.remote_user)) # FIXME: figure out where this goes - #self._common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout] - self._common_args += ["-o", "ConnectTimeout=15"] + #self._common_args += ("-o", "ConnectTimeout={0}".format(self.runner.timeout)) + self._common_args += ("-o", "ConnectTimeout=15") + + self._connected = True return self @@ -136,13 +144,13 @@ def _password_cmd(self): except OSError: raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program") (self.rfd, self.wfd) = os.pipe() - return ["sshpass", "-d%d" % self.rfd] + return ("sshpass", "-d{0}".format(self.rfd)) return [] def _send_password(self): if self._connection_info.password: os.close(self.rfd) - os.write(self.wfd, "%s\n" % self._connection_info.password) + os.write(self.wfd, "{0}\n".format(self._connection_info.password)) os.close(self.wfd) def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): @@ -215,12 +223,12 @@ def not_in_host_file(self, host): else: user_host_file = "~/.ssh/known_hosts" user_host_file = os.path.expanduser(user_host_file) - + host_file_list = [] host_file_list.append(user_host_file) host_file_list.append("/etc/ssh/ssh_known_hosts") host_file_list.append("/etc/ssh/ssh_known_hosts2") - + hfiles_not_found = 0 for hf in host_file_list: if not os.path.exists(hf): @@ -234,7 +242,7 @@ def not_in_host_file(self, host): else: data = host_fh.read() host_fh.close() - + for line in data.split("\n"): if line is None or " " not in line: continue @@ -258,33 +266,33 @@ def not_in_host_file(self, host): return False if (hfiles_not_found == len(host_file_list)): - self._display.vvv("EXEC previous known host file not found for %s" % host) + self._display.vvv("EXEC previous known host file not found for {0}".format(host)) return True def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' ssh_cmd = self._password_cmd() - ssh_cmd += ["ssh", "-C"] + ssh_cmd += ("ssh", "-C") if not in_data: # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python # inside a tty automatically invokes the python interactive-mode but the modules are not # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines) - ssh_cmd += ["-tt"] + ssh_cmd.append("-tt") if self._connection_info.verbosity > 3: - ssh_cmd += ["-vvv"] + ssh_cmd.append("-vvv") else: - ssh_cmd += ["-q"] + ssh_cmd.append("-q") ssh_cmd += self._common_args # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however # not sure if it's all working yet so this remains commented out #if self._ipv6: # ssh_cmd += ['-6'] - ssh_cmd += [self._connection_info.remote_addr] + ssh_cmd.append(self._connection_info.remote_addr) ssh_cmd.append(cmd) - self._display.vvv("EXEC %s" % ' '.join(ssh_cmd), host=self._connection_info.remote_addr) + self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self._connection_info.remote_addr) not_in_host_file = self.not_in_host_file(self._connection_info.remote_addr) @@ -361,7 +369,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): # FIXME: the prompt won't be here anymore prompt="" (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt) - + #if C.HOST_KEY_CHECKING and not_in_host_file: # # lock around the initial SSH connectivity so the user prompt about whether to add # # the host to known hosts is not intermingled with multiprocess output. @@ -384,9 +392,9 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) + self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) if not os.path.exists(in_path): - raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) + raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) cmd = self._password_cmd() # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH @@ -398,12 +406,15 @@ def put_file(self, in_path, out_path): # host = '[%s]' % host if C.DEFAULT_SCP_IF_SSH: - cmd += ["scp"] + self._common_args - cmd += [in_path,host + ":" + pipes.quote(out_path)] + cmd.append('scp') + cmd += self._common_args + cmd.append(in_path,host + ":" + pipes.quote(out_path)) indata = None else: - cmd += ["sftp"] + self._common_args + [host] - indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path)) + cmd.append('sftp') + cmd += self._common_args + cmd.append(host) + indata = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path)) (p, stdin) = self._run(cmd, indata) @@ -412,11 +423,11 @@ def put_file(self, in_path, out_path): (returncode, stdout, stderr) = self._communicate(p, stdin, indata) if returncode != 0: - raise AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) + raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr)) def fetch_file(self, in_path, out_path): ''' fetch a file from remote to local ''' - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) + self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) cmd = self._password_cmd() # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH @@ -428,21 +439,24 @@ def fetch_file(self, in_path, out_path): # host = '[%s]' % self._connection_info.remote_addr if C.DEFAULT_SCP_IF_SSH: - cmd += ["scp"] + self._common_args - cmd += [host + ":" + in_path, out_path] + cmd.append('scp') + cmd += self._common_args + cmd += ('{0}:{1}'.format(host, in_path), out_path) indata = None else: - cmd += ["sftp"] + self._common_args + [host] - indata = "get %s %s\n" % (in_path, out_path) + cmd.append('sftp') + cmd += self._common_args + cmd.append(host) + indata = "get {0} {1}\n".format(in_path, out_path) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self._send_password() stdout, stderr = p.communicate(indata) if p.returncode != 0: - raise AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr)) + raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr)) def close(self): ''' not applicable since we're executing openssh binaries ''' - pass + self._connected = False From 7e1644bffd77f84677f4d77c3291e0c9d89ccefb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Apr 2015 16:33:57 -0700 Subject: [PATCH 0351/3617] Add smoketest unittests for the connection plugins --- v2/test/plugins/test_connection.py | 99 ++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 v2/test/plugins/test_connection.py diff --git a/v2/test/plugins/test_connection.py b/v2/test/plugins/test_connection.py new file mode 100644 index 00000000000000..bf78a08c89d534 --- /dev/null +++ b/v2/test/plugins/test_connection.py @@ -0,0 +1,99 @@ +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.executor.connection_info import ConnectionInformation + +from ansible.plugins.connections import ConnectionBase +#from ansible.plugins.connections.accelerate import Connection as AccelerateConnection +#from ansible.plugins.connections.chroot import Connection as ChrootConnection +#from ansible.plugins.connections.funcd import Connection as FuncdConnection +#from ansible.plugins.connections.jail import Connection as JailConnection +#from ansible.plugins.connections.libvirt_lxc import Connection as LibvirtLXCConnection +from ansible.plugins.connections.local import Connection as LocalConnection +#from ansible.plugins.connections.paramiko_ssh import Connection as ParamikoConnection +from ansible.plugins.connections.ssh import Connection as SSHConnection +#from ansible.plugins.connections.winrm import Connection as WinRmConnection + +class TestConnectionBaseClass(unittest.TestCase): + + def setUp(self): + self.conn_info = ConnectionInformation() + + def tearDown(self): + pass + + def test_subclass_error(self): + class ConnectionModule1(ConnectionBase): + pass + with self.assertRaises(TypeError): + ConnectionModule1() + + class ConnectionModule2(ConnectionBase): + def get(self, key): + super(ConnectionModule2, self).get(key) + + with self.assertRaises(TypeError): + ConnectionModule2() + + def test_subclass_success(self): + class ConnectionModule3(ConnectionBase): + @property + def transport(self): + pass + def _connect(self): + pass + def exec_command(self): + pass + def put_file(self): + pass + def fetch_file(self): + pass + def close(self): + pass + self.assertIsInstance(ConnectionModule3(self.conn_info), ConnectionModule3) + +# def test_accelerate_connection_module(self): +# self.assertIsInstance(AccelerateConnection(), AccelerateConnection) +# +# def test_chroot_connection_module(self): +# self.assertIsInstance(ChrootConnection(), ChrootConnection) +# +# def test_funcd_connection_module(self): +# self.assertIsInstance(FuncdConnection(), FuncdConnection) +# +# def test_jail_connection_module(self): +# self.assertIsInstance(JailConnection(), JailConnection) +# +# def test_libvirt_lxc_connection_module(self): +# self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection) + + def test_local_connection_module(self): + self.assertIsInstance(LocalConnection(self.conn_info), LocalConnection) + +# def test_paramiko_connection_module(self): +# self.assertIsInstance(ParamikoConnection(self.conn_info), ParamikoConnection) + + def test_ssh_connection_module(self): + self.assertIsInstance(SSHConnection(self.conn_info), SSHConnection) + +# def test_winrm_connection_module(self): +# self.assertIsInstance(WinRmConnection(), WinRmConnection) From 92c8275f94d0584d6e57727fabe44689d4c2e8b6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Apr 2015 08:36:59 -0400 Subject: [PATCH 0352/3617] made certain flags part of base to make them universally settable --- v2/ansible/playbook/base.py | 10 ++++++++++ v2/ansible/playbook/play.py | 6 ------ v2/ansible/playbook/task.py | 6 ------ 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index c6a9d9a051396e..73eceba996ba6a 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -41,6 +41,16 @@ class Base: + # connection/transport + _connection = FieldAttribute(isa='string') + _port = FieldAttribute(isa='int') + _remote_user = FieldAttribute(isa='string') + + # vars and flags + _vars = FieldAttribute(isa='dict', default=dict()) + _environment = FieldAttribute(isa='dict', default=dict()) + _no_log = FieldAttribute(isa='bool', default=False) + def __init__(self): # initialize the data loader and variable manager, which will be provided diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 01bc275e940e1d..457f23810904f0 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -56,15 +56,11 @@ class Play(Base, Taggable, Become): _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port # Connection - _connection = FieldAttribute(isa='string', default='smart') _gather_facts = FieldAttribute(isa='string', default='smart') _hosts = FieldAttribute(isa='list', default=[], required=True) _name = FieldAttribute(isa='string', default='') - _port = FieldAttribute(isa='int', default=22) - _remote_user = FieldAttribute(isa='string') # Variable Attributes - _vars = FieldAttribute(isa='dict', default=dict()) _vars_files = FieldAttribute(isa='list', default=[]) _vars_prompt = FieldAttribute(isa='dict', default=dict()) _vault_password = FieldAttribute(isa='string') @@ -80,9 +76,7 @@ class Play(Base, Taggable, Become): # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False) - _environment = FieldAttribute(isa='dict', default=dict()) _max_fail_percentage = FieldAttribute(isa='string', default='0') - _no_log = FieldAttribute(isa='bool', default=False) _serial = FieldAttribute(isa='int', default=0) _strategy = FieldAttribute(isa='string', default='linear') diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 7c9478837d7de7..2c92dd4674ae42 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -63,10 +63,8 @@ class Task(Base, Conditional, Taggable, Become): _any_errors_fatal = FieldAttribute(isa='bool') _async = FieldAttribute(isa='int', default=0) _changed_when = FieldAttribute(isa='string') - _connection = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') - _environment = FieldAttribute(isa='dict') _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _ignore_errors = FieldAttribute(isa='bool') @@ -80,16 +78,12 @@ class Task(Base, Conditional, Taggable, Become): _name = FieldAttribute(isa='string', default='') - _no_log = FieldAttribute(isa='bool') _notify = FieldAttribute(isa='list') _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') - _remote_user = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=1) _run_once = FieldAttribute(isa='bool') - _transport = FieldAttribute(isa='string') _until = FieldAttribute(isa='list') # ? - _vars = FieldAttribute(isa='dict', default=dict()) def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' From 2ad787038f4d0a53480189ef6ec59c9991f55764 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Apr 2015 10:26:18 -0400 Subject: [PATCH 0353/3617] removed vars from block as its now in base --- v2/ansible/playbook/block.py | 1 - 1 file changed, 1 deletion(-) diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index 9fd3d773600a18..defb8d9f22107a 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -43,7 +43,6 @@ def __init__(self, parent_block=None, role=None, task_include=None, use_handlers self._task_include = task_include self._use_handlers = use_handlers self._dep_chain = [] - self._vars = dict() super(Block, self).__init__() From f478f1ec109e7934c578f6ad0b6c6b93c0a7487f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Apr 2015 11:13:21 -0400 Subject: [PATCH 0354/3617] fixed vars in block now that they are a field atribute also --- v2/ansible/playbook/block.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index defb8d9f22107a..f8fc683694074f 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -61,7 +61,7 @@ def get_vars(self): if self._task_include: all_vars.update(self._task_include.get_vars()) - all_vars.update(self._vars) + all_vars.update(self.vars) return all_vars @staticmethod From ec01e071d8e5f6c5b6fc73e1e5bbdc806642fb59 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Apr 2015 11:54:50 -0400 Subject: [PATCH 0355/3617] adjusted for the posibolity of lsblk not existing for fact gathering --- lib/ansible/module_utils/facts.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 6b817d4ebcc973..136dcb0195dd0a 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -885,13 +885,14 @@ def get_mount_facts(self): size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail) except OSError, e: continue + + uuid = 'NA' lsblkPath = module.get_bin_path("lsblk") - rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) + if lsblkPath: + rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) - if rc == 0: - uuid = out.strip() - else: - uuid = 'NA' + if rc == 0: + uuid = out.strip() self.facts['mounts'].append( {'mount': fields[1], From 77afdd16b0d9909e9a4b412dc56cbf2ddc2089c4 Mon Sep 17 00:00:00 2001 From: Mark Phillips Date: Thu, 16 Apr 2015 17:30:54 +0100 Subject: [PATCH 0356/3617] Fixed a few typos in the become documentation --- docsite/rst/become.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index dd2d9b140cd842..70b781887a0a77 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -23,7 +23,7 @@ become_user equivalent to adding sudo_user: or su_user: to a play or task become_method - at play or task level overrides the default method set in ansibile.cfg + at play or task level overrides the default method set in ansible.cfg New ansible_ variables @@ -31,16 +31,16 @@ New ansible_ variables Each allows you to set an option per group and/or host ansible_become - equivalent to ansible_sudo or ansbile_su, allows to force privilege escalation + equivalent to ansible_sudo or ansible_su, allows to force privilege escalation ansible_become_method allows to set privilege escalation method ansible_become_user - equivalent to ansible_sudo_user or ansbile_su_user, allows to set the user you become through privilege escalation + equivalent to ansible_sudo_user or ansible_su_user, allows to set the user you become through privilege escalation ansible_become_pass - equivalent to ansible_sudo_pass or ansbile_su_pass, allows you to set the privilege escalation password + equivalent to ansible_sudo_pass or ansible_su_pass, allows you to set the privilege escalation password New command line options @@ -50,7 +50,7 @@ New command line options ask for privilege escalation password -b, --become - run operations with become (no passorwd implied) + run operations with become (no password implied) --become-method=BECOME_METHOD privilege escalation method to use (default=sudo), From b4f02625cdbc1a83ca78daabe090df424aa8ee58 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Thu, 16 Apr 2015 13:04:23 -0400 Subject: [PATCH 0357/3617] Add CoreOS facts detection, fix https://github.com/ansible/ansible-modules-core/issues/1000 --- lib/ansible/module_utils/facts.py | 17 ++++++++++++++++- v2/ansible/module_utils/facts.py | 17 ++++++++++++++++- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 6b817d4ebcc973..a9f1b17e5bd5a9 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -99,7 +99,8 @@ class Facts(object): ('/etc/os-release', 'SuSE'), ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), - ('/etc/lsb-release', 'Mandriva') ) + ('/etc/lsb-release', 'Mandriva'), + ('/etc/os-release', 'NA') ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -427,6 +428,20 @@ def get_distribution_facts(self): self.facts['distribution_release'] = release.groups()[0] self.facts['distribution'] = name break + elif name == 'NA': + data = get_file_content(path) + for line in data.splitlines(): + distribution = re.search("^NAME=(.*)", line) + if distribution: + self.facts['distribution'] = distribution.group(1).strip('"') + version = re.search("^VERSION=(.*)", line) + if version: + self.facts['distribution_version'] = version.group(1).strip('"') + if self.facts['distribution'].lower() == 'coreos': + data = get_file_content('/etc/coreos/update.conf') + release = re.search("^GROUP=(.*)", data) + if release: + self.facts['distribution_release'] = release.group(1).strip('"') else: self.facts['distribution'] = name machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id") diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index ae1a3094b6064d..5844c4f67871d0 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -98,7 +98,8 @@ class Facts(object): ('/etc/os-release', 'SuSE'), ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), - ('/etc/lsb-release', 'Mandriva') ) + ('/etc/lsb-release', 'Mandriva'), + ('/etc/os-release', 'NA') ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -386,6 +387,20 @@ def get_distribution_facts(self): self.facts['distribution_release'] = release.groups()[0] self.facts['distribution'] = name break + elif name == 'NA': + data = get_file_content(path) + for line in data.splitlines(): + distribution = re.search("^NAME=(.*)", line) + if distribution: + self.facts['distribution'] = distribution.group(1).strip('"') + version = re.search("^VERSION=(.*)", line) + if version: + self.facts['distribution_version'] = version.group(1).strip('"') + if self.facts['distribution'].lower() == 'coreos': + data = get_file_content('/etc/coreos/update.conf') + release = re.search("^GROUP=(.*)", data) + if release: + self.facts['distribution_release'] = release.group(1).strip('"') else: self.facts['distribution'] = name From a0c34da779f583915a945f4ec039dd5f7b6e422c Mon Sep 17 00:00:00 2001 From: Simon Gomizelj Date: Wed, 8 Apr 2015 13:57:56 -0400 Subject: [PATCH 0358/3617] Support querying systemd container information systemd writes a /run/systemd/container file in any container it starts to make it really easy to detect the container type. This adds support for detecting systemd-nspawn containers (and any other container format that will write data there for compatibility). --- lib/ansible/module_utils/facts.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 628d1dd267833c..300ed3ad2ea966 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2394,6 +2394,12 @@ def get_virtual_facts(self): self.facts['virtualization_role'] = 'guest' return + systemd_container = get_file_content('/run/systemd/container') + if systemd_container: + self.facts['virtualization_type'] = systemd_container + self.facts['virtualization_role'] = 'guest' + return + if os.path.exists('/proc/1/cgroup'): for line in get_file_lines('/proc/1/cgroup'): if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line): From fa1eff83562153b07aea905633225dbe3a0daafd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Apr 2015 15:42:55 -0400 Subject: [PATCH 0359/3617] added new ec2_ami_find and deprecated ec2_ami_search --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0211defbaa0f4d..82a41702d5507d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,11 @@ Major Changes: - template code now retains types for bools and Numbers instead of turning them into strings - If you need the old behaviour, quote the value and it will get passed around as a string +Deprecated Modules: + ec2_ami_search, in favor of the new ec2_ami_find + New Modules: + ec2_ami_find cloudtrail cloudstack_fw cloudstack_iso From 7e9292c75511c6478f77623a1363807fd9dc6bb8 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Tue, 14 Apr 2015 17:07:43 -0400 Subject: [PATCH 0360/3617] Updated parsing/yaml/objects.py with 2/3 compatibility. --- v2/ansible/parsing/yaml/objects.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/v2/ansible/parsing/yaml/objects.py b/v2/ansible/parsing/yaml/objects.py index fe37eaab94a8df..33ea1ad37e42f4 100644 --- a/v2/ansible/parsing/yaml/objects.py +++ b/v2/ansible/parsing/yaml/objects.py @@ -19,14 +19,17 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -class AnsibleBaseYAMLObject: +from six import text_type + + +class AnsibleBaseYAMLObject(object): ''' the base class used to sub-class python built-in objects so that we can add attributes to them during yaml parsing ''' - _data_source = None - _line_number = 0 + _data_source = None + _line_number = 0 _column_number = 0 def _get_ansible_position(self): @@ -36,21 +39,27 @@ def _set_ansible_position(self, obj): try: (src, line, col) = obj except (TypeError, ValueError): - raise AssertionError('ansible_pos can only be set with a tuple/list of three values: source, line number, column number') - self._data_source = src - self._line_number = line + raise AssertionError( + 'ansible_pos can only be set with a tuple/list ' + 'of three values: source, line number, column number' + ) + self._data_source = src + self._line_number = line self._column_number = col ansible_pos = property(_get_ansible_position, _set_ansible_position) + class AnsibleMapping(AnsibleBaseYAMLObject, dict): ''' sub class for dictionaries ''' pass -class AnsibleUnicode(AnsibleBaseYAMLObject, unicode): + +class AnsibleUnicode(AnsibleBaseYAMLObject, text_type): ''' sub class for unicode objects ''' pass + class AnsibleSequence(AnsibleBaseYAMLObject, list): ''' sub class for lists ''' pass From 77cd7a6e8907a53141c50063ac6c2f5715464540 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Tue, 14 Apr 2015 17:42:13 -0400 Subject: [PATCH 0361/3617] Fixed 2/3 compatibility issues in parsing/yaml/test_loader with six. --- v2/test/parsing/yaml/test_loader.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/v2/test/parsing/yaml/test_loader.py b/v2/test/parsing/yaml/test_loader.py index d393d72a0054a0..37eeabff83b52a 100644 --- a/v2/test/parsing/yaml/test_loader.py +++ b/v2/test/parsing/yaml/test_loader.py @@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from six import text_type, binary_type from six.moves import StringIO from collections import Sequence, Set, Mapping @@ -28,6 +29,7 @@ from ansible.parsing.yaml.loader import AnsibleLoader + class TestAnsibleLoaderBasic(unittest.TestCase): def setUp(self): @@ -52,7 +54,7 @@ def test_parse_string(self): loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, u'Ansible') - self.assertIsInstance(data, unicode) + self.assertIsInstance(data, text_type) self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) @@ -63,7 +65,7 @@ def test_parse_utf8_string(self): loader = AnsibleLoader(stream, 'myfile.yml') data = loader.get_single_data() self.assertEqual(data, u'Cafè Eñyei') - self.assertIsInstance(data, unicode) + self.assertIsInstance(data, text_type) self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) @@ -76,8 +78,8 @@ def test_parse_dict(self): data = loader.get_single_data() self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'}) self.assertEqual(len(data), 2) - self.assertIsInstance(data.keys()[0], unicode) - self.assertIsInstance(data.values()[0], unicode) + self.assertIsInstance(list(data.keys())[0], text_type) + self.assertIsInstance(list(data.values())[0], text_type) # Beginning of the first key self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) @@ -94,7 +96,7 @@ def test_parse_list(self): data = loader.get_single_data() self.assertEqual(data, [u'a', u'b']) self.assertEqual(len(data), 2) - self.assertIsInstance(data[0], unicode) + self.assertIsInstance(data[0], text_type) self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17)) @@ -204,10 +206,10 @@ def test_data_complete(self): def walk(self, data): # Make sure there's no str in the data - self.assertNotIsInstance(data, str) + self.assertNotIsInstance(data, binary_type) # Descend into various container types - if isinstance(data, unicode): + if isinstance(data, text_type): # strings are a sequence so we have to be explicit here return elif isinstance(data, (Sequence, Set)): From 1e139fe08f5e1e534928491c2cf87664627ecff2 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Tue, 14 Apr 2015 23:30:41 -0400 Subject: [PATCH 0362/3617] Updated the test_data_loader to use six.builtins vs __builtins__ --- v2/test/parsing/test_data_loader.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/v2/test/parsing/test_data_loader.py b/v2/test/parsing/test_data_loader.py index 75ceb662f7327c..5117150b4fe9df 100644 --- a/v2/test/parsing/test_data_loader.py +++ b/v2/test/parsing/test_data_loader.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from six.moves import builtins from yaml.scanner import ScannerError from ansible.compat.tests import unittest @@ -79,6 +80,6 @@ def test_parse_from_vault_1_1_file(self): 3135306561356164310a343937653834643433343734653137383339323330626437313562306630 3035 """ - with patch('__builtin__.open', mock_open(read_data=vaulted_data)): + with patch('builtins.open', mock_open(read_data=vaulted_data)): output = self._loader.load_from_file('dummy_vault.txt') self.assertEqual(output, dict(foo='bar')) From f8fe1357b088432e60c48789ba29ab565538b585 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Wed, 15 Apr 2015 00:03:55 -0400 Subject: [PATCH 0363/3617] Updated the utils/unicode.py file with 2/3 compatibility. NOTES: 1. replaced unicode, str, etc with their six counterparts 2. isinstance(obj, basestring) -> isinstance(obj, (string_types, text_type)) 3. I'm not entirely confident about the behaviour of __str__ and __unicode__ between versions so that might require a bit more testing. --- v2/ansible/utils/unicode.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/v2/ansible/utils/unicode.py b/v2/ansible/utils/unicode.py index 7bd035c0075609..e6f43d799c20c4 100644 --- a/v2/ansible/utils/unicode.py +++ b/v2/ansible/utils/unicode.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from six import string_types, text_type, binary_type + # to_bytes and to_unicode were written by Toshio Kuratomi for the # python-kitchen library https://pypi.python.org/pypi/kitchen # They are licensed in kitchen under the terms of the GPLv2+ @@ -88,13 +90,13 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): ''' # Could use isbasestring/isunicode here but we want this code to be as # fast as possible - if isinstance(obj, basestring): - if isinstance(obj, unicode): + if isinstance(obj, (string_types, text_type)): + if isinstance(obj, text_type): return obj if encoding in _UTF8_ALIASES: - return unicode(obj, 'utf-8', errors) + return text_type(obj, 'utf-8', errors) if encoding in _LATIN1_ALIASES: - return unicode(obj, 'latin-1', errors) + return text_type(obj, 'latin-1', errors) return obj.decode(encoding, errors) if not nonstring: @@ -116,13 +118,13 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): simple = obj.__str__() except (UnicodeError, AttributeError): simple = u'' - if isinstance(simple, str): - return unicode(simple, encoding, errors) + if isinstance(simple, binary_type): + return text_type(simple, encoding, errors) return simple elif nonstring in ('repr', 'strict'): obj_repr = repr(obj) if isinstance(obj_repr, str): - obj_repr = unicode(obj_repr, encoding, errors) + obj_repr = text_type(obj_repr, encoding, errors) if nonstring == 'repr': return obj_repr raise TypeError('to_unicode was given "%(obj)s" which is neither' @@ -197,7 +199,7 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): ''' # Could use isbasestring, isbytestring here but we want this to be as fast # as possible - if isinstance(obj, basestring): + if isinstance(obj, (string_types, text_type)): if isinstance(obj, str): return obj return obj.encode(encoding, errors) @@ -210,7 +212,7 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): return obj elif nonstring == 'simplerepr': try: - simple = str(obj) + simple = binary_type(obj) except UnicodeError: try: simple = obj.__str__() @@ -221,7 +223,7 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): simple = obj.__unicode__() except (AttributeError, UnicodeError): simple = '' - if isinstance(simple, unicode): + if isinstance(simple, text_type): simple = simple.encode(encoding, 'replace') return simple elif nonstring in ('repr', 'strict'): @@ -229,10 +231,10 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): obj_repr = obj.__repr__() except (AttributeError, UnicodeError): obj_repr = '' - if isinstance(obj_repr, unicode): + if isinstance(obj_repr, text_type): obj_repr = obj_repr.encode(encoding, errors) else: - obj_repr = str(obj_repr) + obj_repr = binary_type(obj_repr) if nonstring == 'repr': return obj_repr raise TypeError('to_bytes was given "%(obj)s" which is neither' From 28443cf0a9b4f317c8b351e97e10a5ed6dedc629 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Wed, 15 Apr 2015 00:34:30 -0400 Subject: [PATCH 0364/3617] Updated parsing/vault/test_vault.py to use the fake byte literals in six when using hexlify. This was to fix the `TypeError: 'str' does not support the buffer interface` errors. --- v2/test/parsing/vault/test_vault.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/v2/test/parsing/vault/test_vault.py b/v2/test/parsing/vault/test_vault.py index d24573c72945e0..5609596404f9a7 100644 --- a/v2/test/parsing/vault/test_vault.py +++ b/v2/test/parsing/vault/test_vault.py @@ -24,6 +24,8 @@ import shutil import time import tempfile +import six + from binascii import unhexlify from binascii import hexlify from nose.plugins.skip import SkipTest @@ -63,13 +65,13 @@ def test_methods_exist(self): 'decrypt', '_add_header', '_split_header',] - for slot in slots: + for slot in slots: assert hasattr(v, slot), "VaultLib is missing the %s method" % slot def test_is_encrypted(self): v = VaultLib(None) assert not v.is_encrypted("foobar"), "encryption check on plaintext failed" - data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible") + data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible")) assert v.is_encrypted(data), "encryption check on headered text failed" def test_add_header(self): @@ -82,15 +84,15 @@ def test_add_header(self): header = lines[0] assert header.endswith(';TEST'), "header does end with cipher name" header_parts = header.split(';') - assert len(header_parts) == 3, "header has the wrong number of parts" + assert len(header_parts) == 3, "header has the wrong number of parts" assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT" assert header_parts[1] == v.version, "header version is incorrect" assert header_parts[2] == 'TEST', "header does end with cipher name" def test_split_header(self): v = VaultLib('ansible') - data = "$ANSIBLE_VAULT;9.9;TEST\nansible" - rdata = v._split_header(data) + data = "$ANSIBLE_VAULT;9.9;TEST\nansible" + rdata = v._split_header(data) lines = rdata.split('\n') assert lines[0] == "ansible" assert v.cipher_name == 'TEST', "cipher name was not set" @@ -104,7 +106,7 @@ def test_encrypt_decrypt_aes(self): enc_data = v.encrypt("foobar") dec_data = v.decrypt(enc_data) assert enc_data != "foobar", "encryption failed" - assert dec_data == "foobar", "decryption failed" + assert dec_data == "foobar", "decryption failed" def test_encrypt_decrypt_aes256(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: @@ -114,20 +116,20 @@ def test_encrypt_decrypt_aes256(self): enc_data = v.encrypt("foobar") dec_data = v.decrypt(enc_data) assert enc_data != "foobar", "encryption failed" - assert dec_data == "foobar", "decryption failed" + assert dec_data == "foobar", "decryption failed" def test_encrypt_encrypted(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = 'AES' - data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible") + data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible")) error_hit = False try: enc_data = v.encrypt(data) except errors.AnsibleError as e: error_hit = True - assert error_hit, "No error was thrown when trying to encrypt data with a header" + assert error_hit, "No error was thrown when trying to encrypt data with a header" def test_decrypt_decrypted(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: @@ -139,7 +141,7 @@ def test_decrypt_decrypted(self): dec_data = v.decrypt(data) except errors.AnsibleError as e: error_hit = True - assert error_hit, "No error was thrown when trying to decrypt data without a header" + assert error_hit, "No error was thrown when trying to decrypt data without a header" def test_cipher_not_set(self): # not setting the cipher should default to AES256 @@ -152,5 +154,5 @@ def test_cipher_not_set(self): enc_data = v.encrypt(data) except errors.AnsibleError as e: error_hit = True - assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set" - assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name + assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set" + assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name From 176ae06cbd235034b87f25c56371a07b6fb1108e Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Wed, 15 Apr 2015 14:08:53 -0400 Subject: [PATCH 0365/3617] Updated the vault/__init__.py and test_vault.py files to support 2/3. Existing tests pass under both versions, but there could still be some issues since, it involves a lot of 2/3 bytes-unicode conversions. --- v2/ansible/parsing/vault/__init__.py | 150 ++++++++++++++++----------- v2/test/parsing/vault/test_vault.py | 17 +-- 2 files changed, 99 insertions(+), 68 deletions(-) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index 92c99fdad5e124..ddb92e4e7d362f 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -22,6 +22,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import sys import os import shlex import shutil @@ -35,7 +36,10 @@ from hashlib import md5 from binascii import hexlify from binascii import unhexlify +from six import binary_type, byte2int, PY2, text_type from ansible import constants as C +from ansible.utils.unicode import to_unicode, to_bytes + try: from Crypto.Hash import SHA256, HMAC @@ -60,13 +64,13 @@ # AES IMPORTS try: from Crypto.Cipher import AES as AES - HAS_AES = True + HAS_AES = True except ImportError: - HAS_AES = False + HAS_AES = False CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto" -HEADER='$ANSIBLE_VAULT' +HEADER=u'$ANSIBLE_VAULT' CIPHER_WHITELIST=['AES', 'AES256'] class VaultLib(object): @@ -76,26 +80,28 @@ def __init__(self, password): self.cipher_name = None self.version = '1.1' - def is_encrypted(self, data): + def is_encrypted(self, data): + data = to_unicode(data) if data.startswith(HEADER): return True else: return False def encrypt(self, data): + data = to_unicode(data) if self.is_encrypted(data): raise errors.AnsibleError("data is already encrypted") if not self.cipher_name: self.cipher_name = "AES256" - #raise errors.AnsibleError("the cipher must be set before encrypting data") + # raise errors.AnsibleError("the cipher must be set before encrypting data") - if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: + if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: cipher = globals()['Vault' + self.cipher_name] this_cipher = cipher() else: - raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + raise errors.AnsibleError("{} cipher could not be found".format(self.cipher_name)) """ # combine sha + data @@ -106,11 +112,13 @@ def encrypt(self, data): # encrypt sha + data enc_data = this_cipher.encrypt(data, self.password) - # add header + # add header tmp_data = self._add_header(enc_data) return tmp_data def decrypt(self, data): + data = to_bytes(data) + if self.password is None: raise errors.AnsibleError("A vault password must be specified to decrypt data") @@ -121,48 +129,47 @@ def decrypt(self, data): data = self._split_header(data) # create the cipher object - if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: - cipher = globals()['Vault' + self.cipher_name] + ciphername = to_unicode(self.cipher_name) + if 'Vault' + ciphername in globals() and ciphername in CIPHER_WHITELIST: + cipher = globals()['Vault' + ciphername] this_cipher = cipher() else: - raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + raise errors.AnsibleError("{} cipher could not be found".format(ciphername)) # try to unencrypt data data = this_cipher.decrypt(data, self.password) if data is None: raise errors.AnsibleError("Decryption failed") - return data + return data - def _add_header(self, data): + def _add_header(self, data): # combine header and encrypted data in 80 char columns #tmpdata = hexlify(data) - tmpdata = [data[i:i+80] for i in range(0, len(data), 80)] - + tmpdata = [to_bytes(data[i:i+80]) for i in range(0, len(data), 80)] if not self.cipher_name: raise errors.AnsibleError("the cipher must be set before adding a header") - dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n" - + dirty_data = to_bytes(HEADER + ";" + self.version + ";" + self.cipher_name + "\n") for l in tmpdata: - dirty_data += l + '\n' + dirty_data += l + b'\n' return dirty_data - def _split_header(self, data): + def _split_header(self, data): # used by decrypt - tmpdata = data.split('\n') - tmpheader = tmpdata[0].strip().split(';') + tmpdata = data.split(b'\n') + tmpheader = tmpdata[0].strip().split(b';') - self.version = str(tmpheader[1].strip()) - self.cipher_name = str(tmpheader[2].strip()) - clean_data = '\n'.join(tmpdata[1:]) + self.version = to_unicode(tmpheader[1].strip()) + self.cipher_name = to_unicode(tmpheader[2].strip()) + clean_data = b'\n'.join(tmpdata[1:]) """ - # strip out newline, join, unhex + # strip out newline, join, unhex clean_data = [ x.strip() for x in clean_data ] clean_data = unhexlify(''.join(clean_data)) """ @@ -176,9 +183,9 @@ def __exit__(self, *err): pass class VaultEditor(object): - # uses helper methods for write_file(self, filename, data) - # to write a file so that code isn't duplicated for simple - # file I/O, ditto read_file(self, filename) and launch_editor(self, filename) + # uses helper methods for write_file(self, filename, data) + # to write a file so that code isn't duplicated for simple + # file I/O, ditto read_file(self, filename) and launch_editor(self, filename) # ... "Don't Repeat Yourself", etc. def __init__(self, cipher_name, password, filename): @@ -302,7 +309,7 @@ def rekey_file(self, new_password): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: raise errors.AnsibleError(CRYPTO_UPGRADE) - # decrypt + # decrypt tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) @@ -324,7 +331,7 @@ def read_data(self, filename): return tmpdata def write_data(self, data, filename): - if os.path.isfile(filename): + if os.path.isfile(filename): os.remove(filename) f = open(filename, "wb") f.write(data) @@ -369,9 +376,10 @@ def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): """ Create a key and an initialization vector """ - d = d_i = '' + d = d_i = b'' while len(d) < key_length + iv_length: - d_i = md5(d_i + password + salt).digest() + text = "{}{}{}".format(d_i, password, salt) + d_i = md5(to_bytes(text)).digest() d += d_i key = d[:key_length] @@ -385,45 +393,49 @@ def encrypt(self, data, password, key_length=32): # combine sha + data - this_sha = sha256(data).hexdigest() + this_sha = sha256(to_bytes(data)).hexdigest() tmp_data = this_sha + "\n" + data - in_file = BytesIO(tmp_data) + in_file = BytesIO(to_bytes(tmp_data)) in_file.seek(0) out_file = BytesIO() bs = AES.block_size - # Get a block of random data. EL does not have Crypto.Random.new() + # Get a block of random data. EL does not have Crypto.Random.new() # so os.urandom is used for cross platform purposes salt = os.urandom(bs - len('Salted__')) key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) cipher = AES.new(key, AES.MODE_CBC, iv) - out_file.write('Salted__' + salt) + full = to_bytes(b'Salted__' + salt) + out_file.write(full) + print(repr(full)) finished = False while not finished: chunk = in_file.read(1024 * bs) if len(chunk) == 0 or len(chunk) % bs != 0: padding_length = (bs - len(chunk) % bs) or bs - chunk += padding_length * chr(padding_length) + chunk += to_bytes(padding_length * chr(padding_length)) finished = True out_file.write(cipher.encrypt(chunk)) out_file.seek(0) enc_data = out_file.read() + #print(enc_data) tmp_data = hexlify(enc_data) + assert isinstance(tmp_data, binary_type) return tmp_data - + def decrypt(self, data, password, key_length=32): """ Read encrypted data from in_file and write decrypted to out_file """ # http://stackoverflow.com/a/14989032 - data = ''.join(data.split('\n')) + data = b''.join(data.split(b'\n')) data = unhexlify(data) in_file = BytesIO(data) @@ -431,29 +443,35 @@ def decrypt(self, data, password, key_length=32): out_file = BytesIO() bs = AES.block_size - salt = in_file.read(bs)[len('Salted__'):] + tmpsalt = in_file.read(bs) + print(repr(tmpsalt)) + salt = tmpsalt[len('Salted__'):] key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) cipher = AES.new(key, AES.MODE_CBC, iv) - next_chunk = '' + next_chunk = b'' finished = False while not finished: chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs)) if len(next_chunk) == 0: - padding_length = ord(chunk[-1]) + if PY2: + padding_length = ord(chunk[-1]) + else: + padding_length = chunk[-1] + chunk = chunk[:-padding_length] finished = True out_file.write(chunk) # reset the stream pointer to the beginning out_file.seek(0) - new_data = out_file.read() + new_data = to_unicode(out_file.read()) # split out sha and verify decryption split_data = new_data.split("\n") this_sha = split_data[0] this_data = '\n'.join(split_data[1:]) - test_sha = sha256(this_data).hexdigest() + test_sha = sha256(to_bytes(this_data)).hexdigest() if this_sha != test_sha: raise errors.AnsibleError("Decryption failed") @@ -465,7 +483,7 @@ def decrypt(self, data, password, key_length=32): class VaultAES256(object): """ - Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. + Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. Keys are derived using PBKDF2 """ @@ -481,7 +499,7 @@ def gen_key_initctr(self, password, salt): keylength = 32 # match the size used for counter.new to avoid extra work - ivlength = 16 + ivlength = 16 hash_function = SHA256 @@ -489,7 +507,7 @@ def gen_key_initctr(self, password, salt): pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest() - derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, + derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, count=10000, prf=pbkdf2_prf) key1 = derivedkey[:keylength] @@ -523,28 +541,28 @@ def encrypt(self, data, password): cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) # ENCRYPT PADDED DATA - cryptedData = cipher.encrypt(data) + cryptedData = cipher.encrypt(data) # COMBINE SALT, DIGEST AND DATA hmac = HMAC.new(key2, cryptedData, SHA256) - message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) ) + message = b''.join([hexlify(salt), b"\n", to_bytes(hmac.hexdigest()), b"\n", hexlify(cryptedData)]) message = hexlify(message) return message def decrypt(self, data, password): # SPLIT SALT, DIGEST, AND DATA - data = ''.join(data.split("\n")) + data = b''.join(data.split(b"\n")) data = unhexlify(data) - salt, cryptedHmac, cryptedData = data.split("\n", 2) + salt, cryptedHmac, cryptedData = data.split(b"\n", 2) salt = unhexlify(salt) cryptedData = unhexlify(cryptedData) key1, key2, iv = self.gen_key_initctr(password, salt) - # EXIT EARLY IF DIGEST DOESN'T MATCH + # EXIT EARLY IF DIGEST DOESN'T MATCH hmacDecrypt = HMAC.new(key2, cryptedData, SHA256) - if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()): + if not self.is_equal(cryptedHmac, to_bytes(hmacDecrypt.hexdigest())): return None # SET THE COUNTER AND THE CIPHER @@ -555,19 +573,31 @@ def decrypt(self, data, password): decryptedData = cipher.decrypt(cryptedData) # UNPAD DATA - padding_length = ord(decryptedData[-1]) + try: + padding_length = ord(decryptedData[-1]) + except TypeError: + padding_length = decryptedData[-1] + decryptedData = decryptedData[:-padding_length] - return decryptedData + return to_unicode(decryptedData) def is_equal(self, a, b): + """ + Comparing 2 byte arrrays in constant time + to avoid timing attacks. + + It would be nice if there was a library for this but + hey. + """ # http://codahale.com/a-lesson-in-timing-attacks/ if len(a) != len(b): return False - + result = 0 for x, y in zip(a, b): - result |= ord(x) ^ ord(y) - return result == 0 - - + if PY2: + result |= ord(x) ^ ord(y) + else: + result |= x ^ y + return result == 0 diff --git a/v2/test/parsing/vault/test_vault.py b/v2/test/parsing/vault/test_vault.py index 5609596404f9a7..2aaac27fc7e9a8 100644 --- a/v2/test/parsing/vault/test_vault.py +++ b/v2/test/parsing/vault/test_vault.py @@ -31,6 +31,7 @@ from nose.plugins.skip import SkipTest from ansible.compat.tests import unittest +from ansible.utils.unicode import to_bytes, to_unicode from ansible import errors from ansible.parsing.vault import VaultLib @@ -70,8 +71,8 @@ def test_methods_exist(self): def test_is_encrypted(self): v = VaultLib(None) - assert not v.is_encrypted("foobar"), "encryption check on plaintext failed" - data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible")) + assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed" + data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") assert v.is_encrypted(data), "encryption check on headered text failed" def test_add_header(self): @@ -79,9 +80,9 @@ def test_add_header(self): v.cipher_name = "TEST" sensitive_data = "ansible" data = v._add_header(sensitive_data) - lines = data.split('\n') + lines = data.split(b'\n') assert len(lines) > 1, "failed to properly add header" - header = lines[0] + header = to_unicode(lines[0]) assert header.endswith(';TEST'), "header does end with cipher name" header_parts = header.split(';') assert len(header_parts) == 3, "header has the wrong number of parts" @@ -91,10 +92,10 @@ def test_add_header(self): def test_split_header(self): v = VaultLib('ansible') - data = "$ANSIBLE_VAULT;9.9;TEST\nansible" + data = b"$ANSIBLE_VAULT;9.9;TEST\nansible" rdata = v._split_header(data) - lines = rdata.split('\n') - assert lines[0] == "ansible" + lines = rdata.split(b'\n') + assert lines[0] == b"ansible" assert v.cipher_name == 'TEST', "cipher name was not set" assert v.version == "9.9" @@ -102,7 +103,7 @@ def test_encrypt_decrypt_aes(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') - v.cipher_name = 'AES' + v.cipher_name = u'AES' enc_data = v.encrypt("foobar") dec_data = v.decrypt(enc_data) assert enc_data != "foobar", "encryption failed" From 43ab4c12dd378ec9e930172c6530c7cd6bccfb9b Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Wed, 15 Apr 2015 22:32:03 -0400 Subject: [PATCH 0366/3617] Fixed NoneType import error which worked in python2, but not 3. In mod_args we were checking `isinstance(thing, NoneType)` when thing is None works the same since NoneType can't be subclassed in python 2 or 3 and it removes the need for the NoneType import. --- v2/ansible/parsing/mod_args.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index 6650355ba303ff..e3fdba093d43a1 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -20,7 +20,6 @@ __metaclass__ = type from six import iteritems, string_types -from types import NoneType from ansible.errors import AnsibleParserError from ansible.plugins import module_loader @@ -165,7 +164,7 @@ def _normalize_old_style_args(self, thing, action): # form is like: local_action: copy src=a dest=b ... pretty common check_raw = action in ('command', 'shell', 'script') args = parse_kv(thing, check_raw=check_raw) - elif isinstance(thing, NoneType): + elif thing is None: # this can happen with modules which take no params, like ping: args = None else: From f3fed01a7ef82900248e2f6745568b82304b1114 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Thu, 16 Apr 2015 12:53:59 -0400 Subject: [PATCH 0367/3617] Attempted to updated the vault editor to support 2/3 compatibility. Unfortunately, I wasn't able to fix a bug in the VaultAES in which during the test_decrypt_1_0 and test_rekey_migration in which VaultAES wasn't successfully writing the writing the encrypted key to out_file (BytesIO). Added skipping vault_editor tests test_decrypt_1_0 and test_rekey_migration in python3 since I wasn't able to successfully backport VaultAES without weird bugs. --- v2/ansible/parsing/vault/__init__.py | 14 +++---- v2/test/parsing/vault/test_vault_editor.py | 44 +++++++++++++--------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index ddb92e4e7d362f..80c48a3b69c1a6 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -73,6 +73,7 @@ HEADER=u'$ANSIBLE_VAULT' CIPHER_WHITELIST=['AES', 'AES256'] + class VaultLib(object): def __init__(self, password): @@ -334,7 +335,7 @@ def write_data(self, data, filename): if os.path.isfile(filename): os.remove(filename) f = open(filename, "wb") - f.write(data) + f.write(to_bytes(data)) f.close() def shuffle_files(self, src, dest): @@ -410,7 +411,6 @@ def encrypt(self, data, password, key_length=32): cipher = AES.new(key, AES.MODE_CBC, iv) full = to_bytes(b'Salted__' + salt) out_file.write(full) - print(repr(full)) finished = False while not finished: chunk = in_file.read(1024 * bs) @@ -422,10 +422,8 @@ def encrypt(self, data, password, key_length=32): out_file.seek(0) enc_data = out_file.read() - #print(enc_data) tmp_data = hexlify(enc_data) - assert isinstance(tmp_data, binary_type) return tmp_data @@ -444,7 +442,6 @@ def decrypt(self, data, password, key_length=32): bs = AES.block_size tmpsalt = in_file.read(bs) - print(repr(tmpsalt)) salt = tmpsalt[len('Salted__'):] key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) cipher = AES.new(key, AES.MODE_CBC, iv) @@ -461,11 +458,15 @@ def decrypt(self, data, password, key_length=32): chunk = chunk[:-padding_length] finished = True + out_file.write(chunk) + out_file.flush() # reset the stream pointer to the beginning out_file.seek(0) - new_data = to_unicode(out_file.read()) + out_data = out_file.read() + out_file.close() + new_data = to_unicode(out_data) # split out sha and verify decryption split_data = new_data.split("\n") @@ -476,7 +477,6 @@ def decrypt(self, data, password, key_length=32): if this_sha != test_sha: raise errors.AnsibleError("Decryption failed") - #return out_file.read() return this_data diff --git a/v2/test/parsing/vault/test_vault_editor.py b/v2/test/parsing/vault/test_vault_editor.py index c788df54ae53c9..fd52ca2490e2c0 100644 --- a/v2/test/parsing/vault/test_vault_editor.py +++ b/v2/test/parsing/vault/test_vault_editor.py @@ -21,6 +21,7 @@ __metaclass__ = type #!/usr/bin/env python +import sys import getpass import os import shutil @@ -32,6 +33,7 @@ from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch +from ansible.utils.unicode import to_bytes, to_unicode from ansible import errors from ansible.parsing.vault import VaultLib @@ -88,12 +90,12 @@ def test_methods_exist(self): 'read_data', 'write_data', 'shuffle_files'] - for slot in slots: + for slot in slots: assert hasattr(v, slot), "VaultLib is missing the %s method" % slot @patch.object(VaultEditor, '_editor_shell_command') def test_create_file(self, mock_editor_shell_command): - + def sc_side_effect(filename): return ['touch', filename] mock_editor_shell_command.side_effect = sc_side_effect @@ -107,12 +109,16 @@ def sc_side_effect(filename): self.assertTrue(os.path.exists(tmp_file.name)) def test_decrypt_1_0(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + """ + Skip testing decrypting 1.0 files if we don't have access to AES, KDF or + Counter, or we are running on python3 since VaultAES hasn't been backported. + """ + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or sys.version > '3': raise SkipTest v10_file = tempfile.NamedTemporaryFile(delete=False) with v10_file as f: - f.write(v10_data) + f.write(to_bytes(v10_data)) ve = VaultEditor(None, "ansible", v10_file.name) @@ -125,13 +131,13 @@ def test_decrypt_1_0(self): # verify decrypted content f = open(v10_file.name, "rb") - fdata = f.read() - f.close() + fdata = to_unicode(f.read()) + f.cloes() os.unlink(v10_file.name) - assert error_hit == False, "error decrypting 1.0 file" - assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() def test_decrypt_1_1(self): @@ -140,7 +146,7 @@ def test_decrypt_1_1(self): v11_file = tempfile.NamedTemporaryFile(delete=False) with v11_file as f: - f.write(v11_data) + f.write(to_bytes(v11_data)) ve = VaultEditor(None, "ansible", v11_file.name) @@ -153,28 +159,32 @@ def test_decrypt_1_1(self): # verify decrypted content f = open(v11_file.name, "rb") - fdata = f.read() + fdata = to_unicode(f.read()) f.close() os.unlink(v11_file.name) - assert error_hit == False, "error decrypting 1.0 file" - assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() + assert error_hit == False, "error decrypting 1.0 file" + assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip() def test_rekey_migration(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: + """ + Skip testing rekeying files if we don't have access to AES, KDF or + Counter, or we are running on python3 since VaultAES hasn't been backported. + """ + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or sys.version > '3': raise SkipTest v10_file = tempfile.NamedTemporaryFile(delete=False) with v10_file as f: - f.write(v10_data) + f.write(to_bytes(v10_data)) ve = VaultEditor(None, "ansible", v10_file.name) # make sure the password functions for the cipher error_hit = False - try: + try: ve.rekey_file('ansible2') except errors.AnsibleError as e: error_hit = True @@ -184,7 +194,7 @@ def test_rekey_migration(self): fdata = f.read() f.close() - assert error_hit == False, "error rekeying 1.0 file to 1.1" + assert error_hit == False, "error rekeying 1.0 file to 1.1" # ensure filedata can be decrypted, is 1.1 and is AES256 vl = VaultLib("ansible2") @@ -198,7 +208,7 @@ def test_rekey_migration(self): os.unlink(v10_file.name) assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name - assert error_hit == False, "error decrypting migrated 1.0 file" + assert error_hit == False, "error decrypting migrated 1.0 file" assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data From 3e25f633fe3d2c6ea9a89c0f2d41f009752aa404 Mon Sep 17 00:00:00 2001 From: Rory Finnegan Date: Thu, 16 Apr 2015 16:01:13 -0400 Subject: [PATCH 0368/3617] Applied some stashed fixes. * Fixed file.close() typo in test_vault_editor * Updated unicode.py to redefine basestring properly in python3 and fixed a couple missed py27 specific code. * Realized the patch in test_data_loader was still failing cause we are passing the string 'builtins.open' and not actually using it in that file and soe instead of failing in py34 it would fail in py27. --- v2/ansible/utils/unicode.py | 21 ++++++++++++--------- v2/test/parsing/test_data_loader.py | 9 +++++++-- v2/test/parsing/vault/test_vault_editor.py | 2 +- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/v2/ansible/utils/unicode.py b/v2/ansible/utils/unicode.py index e6f43d799c20c4..2cff2e5e45c76d 100644 --- a/v2/ansible/utils/unicode.py +++ b/v2/ansible/utils/unicode.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from six import string_types, text_type, binary_type +from six import string_types, text_type, binary_type, PY3 # to_bytes and to_unicode were written by Toshio Kuratomi for the # python-kitchen library https://pypi.python.org/pypi/kitchen @@ -37,6 +37,9 @@ # EXCEPTION_CONVERTERS is defined below due to using to_unicode +if PY3: + basestring = (str, bytes) + def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): '''Convert an object into a :class:`unicode` string @@ -90,7 +93,7 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): ''' # Could use isbasestring/isunicode here but we want this code to be as # fast as possible - if isinstance(obj, (string_types, text_type)): + if isinstance(obj, basestring): if isinstance(obj, text_type): return obj if encoding in _UTF8_ALIASES: @@ -112,7 +115,7 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): simple = None if not simple: try: - simple = str(obj) + simple = text_type(obj) except UnicodeError: try: simple = obj.__str__() @@ -123,7 +126,7 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): return simple elif nonstring in ('repr', 'strict'): obj_repr = repr(obj) - if isinstance(obj_repr, str): + if isinstance(obj_repr, binary_type): obj_repr = text_type(obj_repr, encoding, errors) if nonstring == 'repr': return obj_repr @@ -199,15 +202,15 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): ''' # Could use isbasestring, isbytestring here but we want this to be as fast # as possible - if isinstance(obj, (string_types, text_type)): - if isinstance(obj, str): + if isinstance(obj, basestring): + if isinstance(obj, binary_type): return obj return obj.encode(encoding, errors) if not nonstring: nonstring = 'simplerepr' if nonstring == 'empty': - return '' + return b'' elif nonstring == 'passthru': return obj elif nonstring == 'simplerepr': @@ -222,7 +225,7 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): try: simple = obj.__unicode__() except (AttributeError, UnicodeError): - simple = '' + simple = b'' if isinstance(simple, text_type): simple = simple.encode(encoding, 'replace') return simple @@ -230,7 +233,7 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): try: obj_repr = obj.__repr__() except (AttributeError, UnicodeError): - obj_repr = '' + obj_repr = b'' if isinstance(obj_repr, text_type): obj_repr = obj_repr.encode(encoding, errors) else: diff --git a/v2/test/parsing/test_data_loader.py b/v2/test/parsing/test_data_loader.py index 5117150b4fe9df..b9c37cdd0c7e84 100644 --- a/v2/test/parsing/test_data_loader.py +++ b/v2/test/parsing/test_data_loader.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from six.moves import builtins +from six import PY2 from yaml.scanner import ScannerError from ansible.compat.tests import unittest @@ -80,6 +80,11 @@ def test_parse_from_vault_1_1_file(self): 3135306561356164310a343937653834643433343734653137383339323330626437313562306630 3035 """ - with patch('builtins.open', mock_open(read_data=vaulted_data)): + if PY2: + builtins_name = '__builtin__' + else: + builtins_name = 'builtins' + + with patch(builtins_name + '.open', mock_open(read_data=vaulted_data)): output = self._loader.load_from_file('dummy_vault.txt') self.assertEqual(output, dict(foo='bar')) diff --git a/v2/test/parsing/vault/test_vault_editor.py b/v2/test/parsing/vault/test_vault_editor.py index fd52ca2490e2c0..2ddf3de27a2cd1 100644 --- a/v2/test/parsing/vault/test_vault_editor.py +++ b/v2/test/parsing/vault/test_vault_editor.py @@ -132,7 +132,7 @@ def test_decrypt_1_0(self): # verify decrypted content f = open(v10_file.name, "rb") fdata = to_unicode(f.read()) - f.cloes() + f.close() os.unlink(v10_file.name) From 5c64956a7eaefc6008222f88114fb35d76591fda Mon Sep 17 00:00:00 2001 From: Jeff Rizzo Date: Thu, 16 Apr 2015 16:43:38 -0700 Subject: [PATCH 0369/3617] Set distribution, release, and version for NetBSD. --- lib/ansible/module_utils/facts.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index a9f1b17e5bd5a9..fe607aa3cfd21d 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -270,6 +270,10 @@ def get_distribution_facts(self): self.facts['distribution'] = 'FreeBSD' self.facts['distribution_release'] = platform.release() self.facts['distribution_version'] = platform.version() + elif self.facts['system'] == 'NetBSD': + self.facts['distribution'] = 'NetBSD' + self.facts['distribution_release'] = platform.release() + self.facts['distribution_version'] = platform.version() elif self.facts['system'] == 'OpenBSD': self.facts['distribution'] = 'OpenBSD' self.facts['distribution_release'] = platform.release() From f4172fb9daf27baaedd63d1f67ad072b2263d9e7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 17 Apr 2015 13:00:25 -0500 Subject: [PATCH 0370/3617] Fix tag handling on meta:flush_handlers tasks Fixes #10758 --- lib/ansible/playbook/play.py | 12 +++++++----- lib/ansible/playbook/task.py | 2 ++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 9fd8a86f4e4127..665f1ef091abac 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -590,15 +590,17 @@ def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, bec included_become_vars[k] = become_vars[k] x[k] = become_vars[k] - if 'meta' in x: - if x['meta'] == 'flush_handlers': - results.append(Task(self, x)) - continue - task_vars = vars.copy() if original_file: task_vars['_original_file'] = original_file + if 'meta' in x: + if x['meta'] == 'flush_handlers': + if role_name and 'role_name' not in x: + x['role_name'] = role_name + results.append(Task(self, x, module_vars=task_vars, role_name=role_name)) + continue + if 'include' in x: tokens = split_args(str(x['include'])) included_additional_conditions = list(additional_conditions) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index b8b58f1c0bddd0..70c1bc8df6bb00 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -52,6 +52,8 @@ def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=No if 'meta' in ds: self.meta = ds['meta'] self.tags = [] + self.module_vars = module_vars + self.role_name = role_name return else: self.meta = None From 986910be5d96d90da2105c736547d197ce33789e Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Fri, 17 Apr 2015 14:48:57 -0400 Subject: [PATCH 0371/3617] Adding a new VMware utilities module --- lib/ansible/module_utils/vmware.py | 181 +++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 lib/ansible/module_utils/vmware.py diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py new file mode 100644 index 00000000000000..d7dcc256fec99d --- /dev/null +++ b/lib/ansible/module_utils/vmware.py @@ -0,0 +1,181 @@ +#!/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Joseph Callen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +try: + import atexit + import time + # requests is required for exception handling of the ConnectionError + import requests + from pyVim import connect + from pyVmomi import vim, vmodl + HAS_PYVMOMI = True +except ImportError: + HAS_PYVMOMI = False + + +class TaskError(Exception): + pass + + +def task_success(task): + return True + + +def task_running(task): + time.sleep(15) + return False + + +def task_error(task): + + try: + raise TaskError(task.info.error) + except AttributeError: + raise TaskError("Unknown error has occurred") + + +def task_queued(task): + time.sleep(15) + return False + + +def wait_for_task(task): + + task_state = { + vim.TaskInfo.State.success: task_success, + vim.TaskInfo.State.running: task_running, + vim.TaskInfo.State.queued: task_queued, + vim.TaskInfo.State.error: task_error, + } + + while True: + try: + is_finished = task_state[task.info.state](task) + if is_finished: + return True, task.info.result + # This exception should be handled in the module that calls this method + # and fail with an appropriate message to module.fail_json() + except TaskError: + raise + + +def find_dvspg_by_name(dv_switch, portgroup_name): + portgroups = dv_switch.portgroup + + for pg in portgroups: + if pg.name == portgroup_name: + return pg + + return None + + +def find_cluster_by_name_datacenter(datacenter, cluster_name): + try: + host_folder = datacenter.hostFolder + for folder in host_folder.childEntity: + if folder.name == cluster_name: + return folder + return None + # This exception should be handled in the module that calls this method + # and fail with an appropriate message to module.fail_json() + except vmodl.MethodFault: + raise + + +def find_datacenter_by_name(content, datacenter_name, throw=True): + try: + datacenters = get_all_objs(content, [vim.Datacenter]) + for dc in datacenters: + if dc.name == datacenter_name: + return dc + + return None + # This exception should be handled in the module that calls this method + # and fail with an appropriate message to module.fail_json() + except vmodl.MethodFault: + raise + + +def find_dvs_by_name(content, switch_name): + try: + vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch]) + for dvs in vmware_distributed_switches: + if dvs.name == switch_name: + return dvs + return None + # This exception should be handled in the module that calls this method + # and fail with an appropriate message to module.fail_json() + except vmodl.MethodFault: + raise + + +def find_hostsystem_by_name(content, hostname): + try: + host_system = get_all_objs(content, [vim.HostSystem]) + for host in host_system: + if host.name == hostname: + return host + return None + # This exception should be handled in the module that calls this method + # and fail with an appropriate message to module.fail_json() + except vmodl.MethodFault: + raise + + +def vmware_argument_spec(): + return dict( + hostname=dict(type='str', required=True), + username=dict(type='str', aliases=['user', 'admin'], required=True), + password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True), + ) + + +def connect_to_api(module, disconnect_atexit=True): + hostname = module.params['hostname'] + username = module.params['username'] + password = module.params['password'] + try: + service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password) + + # Disabling atexit should be used in special cases only. + # Such as IP change of the ESXi host which removes the connection anyway. + # Also removal significantly speeds up the return of the module + + if disconnect_atexit: + atexit.register(connect.Disconnect, service_instance) + return service_instance.RetrieveContent() + except vim.fault.InvalidLogin as invalid_login: + module.fail_json(msg=invalid_login.msg) + except requests.ConnectionError: + module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.") + + +def get_all_objs(content, vimtype): + try: + obj = {} + container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) + for managed_object_ref in container.view: + obj.update({managed_object_ref: managed_object_ref.name}) + return obj + # This exception should be handled in the module that calls this method + # and fail with an appropriate message to module.fail_json() + except vmodl.MethodFault: + raise \ No newline at end of file From 5913227d9f7b3fb6a8f5cabfc3c28a7cd052a578 Mon Sep 17 00:00:00 2001 From: Kevin Ndung'u Date: Sat, 18 Apr 2015 12:47:02 +0300 Subject: [PATCH 0372/3617] Make shell quoting rules explanation a bit clearer I personally got confused by the use of 'vs' in the explanation. Maybe this explains it better? --- docsite/rst/intro_adhoc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index cfc880ce0bf670..9e104d5836fe29 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -108,7 +108,7 @@ Using the :ref:`shell` module looks like this:: When running any command with the Ansible *ad hoc* CLI (as opposed to :doc:`Playbooks `), pay particular attention to shell quoting rules, so the local shell doesn't eat a variable before it gets passed to Ansible. -For example, using double vs single quotes in the above example would +For example, using double rather than single quotes in the above example would evaluate the variable on the box you were on. So far we've been demoing simple command execution, but most Ansible modules usually do not work like From d34e7d7bca7cfd2db0f16618a5667888dfc7880d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Sat, 18 Apr 2015 16:02:04 -0500 Subject: [PATCH 0373/3617] Correct the ternary example. Fixes #10763 --- docsite/rst/playbooks_filters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst index 79f721295698c6..63b0dabf13b659 100644 --- a/docsite/rst/playbooks_filters.rst +++ b/docsite/rst/playbooks_filters.rst @@ -301,7 +301,7 @@ Other Useful Filters To use one value on true and another on false (since 1.9):: - {{ name == "John" | ternary('Mr','Ms') }} + {{ (name == "John") | ternary('Mr','Ms') }} To concatenate a list into a string:: From 2a6a01e1227ada38d3677a4a04edfc33414f29e4 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 18 Apr 2015 22:33:55 -0400 Subject: [PATCH 0374/3617] Port some changes that occured on v1 get_facts modules to v2 - 5c64956a7 Set distribution, release, and version for NetBSD - ec01e071d adjusted for the possibility of lsblk not existing for fact gathering - d4eddabb2 Patch for bug #10485 - ansible_distribution fact populates as 'RedHat' on Oracle Linux systems - 7813ffd71 Adding uptime_seconds fact for linux and darwin platforms - 29cca0191 Adding oVirt recognition for oVirt guests. - d0197195e Handle /etc/os-release files with 'Raspbian' in them - 58a5f8dfa Pulls machine id in ansible facts - 1968f9969 Wrong OS_FAMILY declaration for openSUSE - 5dec45e24 Fix wrong distribution facts on SLES/openSUSE and a few others --- v2/ansible/module_utils/facts.py | 199 ++++++++++++++++++++++++++----- 1 file changed, 170 insertions(+), 29 deletions(-) diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index 5844c4f67871d0..5d70df4294c980 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -87,7 +87,8 @@ class Facts(object): _I386RE = re.compile(r'i([3456]86|86pc)') # For the most part, we assume that platform.dist() will tell the truth. # This is the fallback to handle unknowns or exceptions - OSDIST_LIST = ( ('/etc/redhat-release', 'RedHat'), + OSDIST_LIST = ( ('/etc/oracle-release', 'Oracle Linux'), + ('/etc/redhat-release', 'RedHat'), ('/etc/vmware-release', 'VMwareESX'), ('/etc/openwrt_release', 'OpenWrt'), ('/etc/system-release', 'OtherLinux'), @@ -170,9 +171,14 @@ def get_platform_facts(self): if self.facts['system'] == 'Linux': self.get_distribution_facts() elif self.facts['system'] == 'AIX': - rc, out, err = module.run_command("/usr/sbin/bootinfo -p") - data = out.split('\n') - self.facts['architecture'] = data[0] + try: + rc, out, err = module.run_command("/usr/sbin/bootinfo -p") + data = out.split('\n') + self.facts['architecture'] = data[0] + except: + self.facts['architecture'] = 'Not Available' + elif self.facts['system'] == 'OpenBSD': + self.facts['architecture'] = platform.uname()[5] def get_local_facts(self): @@ -229,8 +235,8 @@ def get_distribution_facts(self): RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat', SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat', OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', - XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse', - SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo', + XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', SLES = 'Suse', + SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo', Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris', SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin', @@ -261,6 +267,10 @@ def get_distribution_facts(self): self.facts['distribution'] = 'FreeBSD' self.facts['distribution_release'] = platform.release() self.facts['distribution_version'] = platform.version() + elif self.facts['system'] == 'NetBSD': + self.facts['distribution'] = 'NetBSD' + self.facts['distribution_release'] = platform.release() + self.facts['distribution_version'] = platform.version() elif self.facts['system'] == 'OpenBSD': self.facts['distribution'] = 'OpenBSD' self.facts['distribution_release'] = platform.release() @@ -284,6 +294,13 @@ def get_distribution_facts(self): # Once we determine the value is one of these distros # we trust the values are always correct break + elif name == 'Oracle Linux': + data = get_file_content(path) + if 'Oracle Linux' in data: + self.facts['distribution'] = name + else: + self.facts['distribution'] = data.split()[0] + break elif name == 'RedHat': data = get_file_content(path) if 'Red Hat' in data: @@ -354,24 +371,49 @@ def get_distribution_facts(self): data = get_file_content(path) if 'suse' in data.lower(): if path == '/etc/os-release': - release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) - distdata = get_file_content(path).split('\n')[0] - self.facts['distribution'] = distdata.split('=')[1] - if release: - self.facts['distribution_release'] = release.groups()[0] - break + for line in data.splitlines(): + distribution = re.search("^NAME=(.*)", line) + if distribution: + self.facts['distribution'] = distribution.group(1).strip('"') + distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line) # example pattern are 13.04 13.0 13 + if distribution_version: + self.facts['distribution_version'] = distribution_version.group(1) + if 'open' in data.lower(): + release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line) + if release: + self.facts['distribution_release'] = release.groups()[0] + elif 'enterprise' in data.lower(): + release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) # SLES doesn't got funny release names + if release: + release = release.group(1) + else: + release = "0" # no minor number, so it is the first release + self.facts['distribution_release'] = release + break elif path == '/etc/SuSE-release': - data = data.splitlines() - distdata = get_file_content(path).split('\n')[0] - self.facts['distribution'] = distdata.split()[0] - for line in data: - release = re.search('CODENAME *= *([^\n]+)', line) - if release: - self.facts['distribution_release'] = release.groups()[0].strip() - break + if 'open' in data.lower(): + data = data.splitlines() + distdata = get_file_content(path).split('\n')[0] + self.facts['distribution'] = distdata.split()[0] + for line in data: + release = re.search('CODENAME *= *([^\n]+)', line) + if release: + self.facts['distribution_release'] = release.groups()[0].strip() + elif 'enterprise' in data.lower(): + lines = data.splitlines() + distribution = lines[0].split()[0] + if "Server" in data: + self.facts['distribution'] = "SLES" + elif "Desktop" in data: + self.facts['distribution'] = "SLED" + for line in lines: + release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names + if release: + self.facts['distribution_release'] = release.group(1) + self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1) elif name == 'Debian': data = get_file_content(path) - if 'Debian' in data: + if 'Debian' in data or 'Raspbian' in data: release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) if release: self.facts['distribution_release'] = release.groups()[0] @@ -403,7 +445,10 @@ def get_distribution_facts(self): self.facts['distribution_release'] = release.group(1).strip('"') else: self.facts['distribution'] = name - + machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id") + if machine_id: + machine_id = machine_id.split('\n')[0] + self.facts["machine_id"] = machine_id self.facts['os_family'] = self.facts['distribution'] if self.facts['distribution'] in OS_FAMILY: self.facts['os_family'] = OS_FAMILY[self.facts['distribution']] @@ -462,7 +507,7 @@ def get_lsb_facts(self): if rc == 0: self.facts['lsb'] = {} for line in out.split('\n'): - if len(line) < 1: + if len(line) < 1 or ':' not in line: continue value = line.split(':', 1)[1].strip() if 'LSB Version:' in line: @@ -635,6 +680,7 @@ def populate(self): self.get_memory_facts() self.get_dmi_facts() self.get_device_facts() + self.get_uptime_facts() try: self.get_mount_facts() except TimeoutError: @@ -855,13 +901,14 @@ def get_mount_facts(self): size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail) except OSError, e: continue + + uuid = 'NA' lsblkPath = module.get_bin_path("lsblk") - rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) + if lsblkPath: + rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True) - if rc == 0: - uuid = out.strip() - else: - uuid = 'NA' + if rc == 0: + uuid = out.strip() self.facts['mounts'].append( {'mount': fields[1], @@ -973,6 +1020,9 @@ def get_device_facts(self): self.facts['devices'][diskname] = d + def get_uptime_facts(self): + uptime_seconds_string = get_file_content('/proc/uptime').split(' ')[0] + self.facts['uptime_seconds'] = int(float(uptime_seconds_string)) class SunOSHardware(Hardware): """ @@ -987,6 +1037,10 @@ def __init__(self): def populate(self): self.get_cpu_facts() self.get_memory_facts() + try: + self.get_mount_facts() + except TimeoutError: + pass return self.facts def get_cpu_facts(self): @@ -1047,6 +1101,17 @@ def get_memory_facts(self): self.facts['swap_allocated_mb'] = allocated / 1024 self.facts['swap_reserved_mb'] = reserved / 1024 + @timeout(10) + def get_mount_facts(self): + self.facts['mounts'] = [] + # For a detailed format description see mnttab(4) + # special mount_point fstype options time + fstab = get_file_content('/etc/mnttab') + if fstab: + for line in fstab.split('\n'): + fields = line.rstrip('\n').split('\t') + self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4]}) + class OpenBSDHardware(Hardware): """ OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts: @@ -1071,6 +1136,7 @@ def populate(self): self.get_memory_facts() self.get_processor_facts() self.get_device_facts() + self.get_mount_facts() return self.facts def get_sysctl(self): @@ -1083,6 +1149,19 @@ def get_sysctl(self): sysctl[key] = value.strip() return sysctl + @timeout(10) + def get_mount_facts(self): + self.facts['mounts'] = [] + fstab = get_file_content('/etc/fstab') + if fstab: + for line in fstab.split('\n'): + if line.startswith('#') or line.strip() == '': + continue + fields = re.sub(r'\s+',' ',line.rstrip('\n')).split() + if fields[1] == 'none' or fields[3] == 'xx': + continue + self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]}) + def get_memory_facts(self): # Get free memory. vmstat output looks like: # procs memory page disks traps cpu @@ -2133,7 +2212,40 @@ def get_interfaces_info(self, ifconfig_path): self.parse_inet6_line(words, current_if, ips) else: self.parse_unknown_line(words, current_if, ips) - + uname_path = module.get_bin_path('uname') + if uname_path: + rc, out, err = module.run_command([uname_path, '-W']) + # don't bother with wpars it does not work + # zero means not in wpar + if out.split()[0] == '0': + if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']): + entstat_path = module.get_bin_path('entstat') + if entstat_path: + rc, out, err = module.run_command([entstat_path, current_if['device'] ]) + if rc != 0: + break + for line in out.split('\n'): + if not line: + pass + buff = re.match('^Hardware Address: (.*)', line) + if buff: + current_if['macaddress'] = buff.group(1) + + buff = re.match('^Device Type:', line) + if buff and re.match('.*Ethernet', line): + current_if['type'] = 'ether' + # device must have mtu attribute in ODM + if 'mtu' not in current_if: + lsattr_path = module.get_bin_path('lsattr') + if lsattr_path: + rc, out, err = module.run_command([lsattr_path,'-El', current_if['device'] ]) + if rc != 0: + break + for line in out.split('\n'): + if line: + words = line.split() + if words[0] == 'mtu': + current_if['mtu'] = words[1] return interfaces, ips # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here @@ -2360,6 +2472,11 @@ def get_virtual_facts(self): self.facts['virtualization_role'] = 'guest' return + if sys_vendor == 'oVirt': + self.facts['virtualization_type'] = 'kvm' + self.facts['virtualization_role'] = 'guest' + return + if os.path.exists('/proc/self/status'): for line in get_file_lines('/proc/self/status'): if re.match('^VxID: \d+', line): @@ -2514,6 +2631,30 @@ def get_virtual_facts(self): if 'VirtualBox' in line: self.facts['virtualization_type'] = 'virtualbox' self.facts['virtualization_role'] = 'guest' + # Detect domaining on Sparc hardware + if os.path.exists("/usr/sbin/virtinfo"): + # The output of virtinfo is different whether we are on a machine with logical + # domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first. + rc, out, err = module.run_command("/usr/sbin/virtinfo -p") + # The output contains multiple lines with different keys like this: + # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false + # The output may also be not formated and the returncode is set to 0 regardless of the error condition: + # virtinfo can only be run from the global zone + try: + for line in out.split('\n'): + fields = line.split('|') + if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ): + self.facts['virtualization_type'] = 'ldom' + self.facts['virtualization_role'] = 'guest' + hostfeatures = [] + for field in fields[2:]: + arg = field.split('=') + if( arg[1] == 'true' ): + hostfeatures.append(arg[0]) + if( len(hostfeatures) > 0 ): + self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')' + except ValueError, e: + pass def get_file_content(path, default=None, strip=True): data = default From 873fd7db56eadfb0aa4b135c01d0a16f8f240c8a Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 18 Apr 2015 23:13:32 -0400 Subject: [PATCH 0375/3617] Add a requires on python-six 1.4.0 ( for add_metaclass ) This also mean that this doesn't run on RHEL 7 as of today. --- v2/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/setup.py b/v2/setup.py index a9a518798188ea..e982c382f29823 100644 --- a/v2/setup.py +++ b/v2/setup.py @@ -18,7 +18,7 @@ author_email='michael@ansible.com', url='http://ansible.com/', license='GPLv3', - install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'], + install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6', 'six >= 1.4.0'], # package_dir={ '': 'lib' }, # packages=find_packages('lib'), package_data={ From cd25e0fba0c91af61a4161b7bb55570e28586bdb Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 19 Apr 2015 00:06:50 -0400 Subject: [PATCH 0376/3617] Fix C.ANSIBLE_SSH_CONTROL_PATH string interpolation Since C.ANSIBLE_SSH_CONTROL_PATH use the old format ( "%{directory}" ), we need to use the % operator and not the format method, at least for python 2. --- v2/ansible/plugins/connections/ssh.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index c07582f6b747cd..1d54d3ba48c7b6 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -74,7 +74,7 @@ def _connect(self): self._common_args += ( "-o", "ControlMaster=auto", "-o", "ControlPersist=60s", - "-o", "ControlPath=\"{0}\"".format(C.ANSIBLE_SSH_CONTROL_PATH.format(dict(directory=self._cp_dir))), + "-o", "ControlPath=\"{0}\"".format(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)), ) cp_in_use = False @@ -87,7 +87,7 @@ def _connect(self): if cp_in_use and not cp_path_set: self._common_args += ("-o", "ControlPath=\"{0}\"".format( - C.ANSIBLE_SSH_CONTROL_PATH.format(dict(directory=self._cp_dir))) + C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)) ) if not C.HOST_KEY_CHECKING: From d8be6fec65415407e5970ac06fd40e06b5b6af22 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Sun, 19 Apr 2015 17:00:35 +0100 Subject: [PATCH 0377/3617] Consistently use "OracleLinux" in OS detection. Previously, a mixture of "OracleLinux" and "Oracle Linux" was used, causing the `ansible_os_family` fact not to be set to `RedHat`. Fixes #10742. --- lib/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4827370c7bc6c7..4689dd2da9e907 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -87,7 +87,7 @@ class Facts(object): _I386RE = re.compile(r'i([3456]86|86pc)') # For the most part, we assume that platform.dist() will tell the truth. # This is the fallback to handle unknowns or exceptions - OSDIST_LIST = ( ('/etc/oracle-release', 'Oracle Linux'), + OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'), ('/etc/redhat-release', 'RedHat'), ('/etc/vmware-release', 'VMwareESX'), ('/etc/openwrt_release', 'OpenWrt'), @@ -297,7 +297,7 @@ def get_distribution_facts(self): # Once we determine the value is one of these distros # we trust the values are always correct break - elif name == 'Oracle Linux': + elif name == 'OracleLinux': data = get_file_content(path) if 'Oracle Linux' in data: self.facts['distribution'] = name From e38eb2589af447d6f4d02294ad47f143e05280a9 Mon Sep 17 00:00:00 2001 From: Romain Dartigues Date: Sun, 19 Apr 2015 18:18:52 +0200 Subject: [PATCH 0378/3617] Undefined names found by pyflakes --- plugins/inventory/consul_io.py | 1 + plugins/inventory/softlayer.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/consul_io.py b/plugins/inventory/consul_io.py index e0ff3fbbebd675..7bbe63b13e6389 100755 --- a/plugins/inventory/consul_io.py +++ b/plugins/inventory/consul_io.py @@ -125,6 +125,7 @@ import re import argparse from time import time +import sys import ConfigParser import urllib, urllib2, base64 diff --git a/plugins/inventory/softlayer.py b/plugins/inventory/softlayer.py index ef8a2f6a7409a6..d2a15b1218647e 100755 --- a/plugins/inventory/softlayer.py +++ b/plugins/inventory/softlayer.py @@ -55,7 +55,7 @@ def __init__(self): self.get_all_servers() print self.json_format_dict(self.inventory, True) elif self.args.host: - self.get_virtual_servers(client) + self.get_virtual_servers() print self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True) def to_safe(self, word): From 6d68d66d3c2e28c6c7513bded317e383512bcd45 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 19 Apr 2015 23:31:44 -0400 Subject: [PATCH 0379/3617] 1st draft port to v2 --- v2/bin/ansible-vault | 193 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100755 v2/bin/ansible-vault diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault new file mode 100755 index 00000000000000..0aa1c0d4bfb354 --- /dev/null +++ b/v2/bin/ansible-vault @@ -0,0 +1,193 @@ +#!/usr/bin/env python + +# (c) 2014, James Tanner +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-vault is a script that encrypts/decrypts YAML files. See +# http://docs.ansible.com/playbooks_vault.html for more details. + +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + +import os +import sys +import traceback + +from ansible.errors import AnsibleError +from ansible.utils.vault import VaultEditor +from ansible.utils.cli import base_parser, ask_vault_passwords + +#------------------------------------------------------------------------------------- +# Utility functions for parsing actions/options +#------------------------------------------------------------------------------------- + + + +class Cli(object): + + VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") + + + def __init__(self, display=None): + + self.vault_pass = None + + if display is None: + self.display = Display() + else: + self.display = display + + + def parse(self): + + # create parser for CLI options + parser = base_parser( + usage = "%prog vaultfile.yml", + ) + + return parser.parse_args() + + def run(self, options, args) + + action = self.get_action(args) + + if not action: + parser.print_help() + raise AnsibleError("missing required action") + + # options specific to actions + if action == "create": + parser.set_usage("usage: %prog create [options] file_name") + elif action == "decrypt": + parser.set_usage("usage: %prog decrypt [options] file_name") + elif action == "edit": + parser.set_usage("usage: %prog edit [options] file_name") + elif action == "view": + parser.set_usage("usage: %prog view [options] file_name") + elif action == "encrypt": + parser.set_usage("usage: %prog encrypt [options] file_name") + elif action == "rekey": + parser.set_usage("usage: %prog rekey [options] file_name") + + if len(args) == 0 or len(args) > 1: + parser.print_help() + raise AnsibleError("Vault requires a single filename as a parameter") + + if options.vault_password_file: + # read vault_pass from a file + self.vault_pass = read_vault_file(options.vault_password_file) + else: + self.vault_pass, _= ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False) + + # execute the desired action + fn = getattr(self, "execute_%s" % action) + fn(args, options) + + def get_action(self, args): + """ + Get the action the user wants to execute from the + sys argv list. + """ + for i in range(0,len(args)): + arg = args[i] + if arg in VALID_ACTIONS: + del args[i] + return arg + return None + + def execute_create(args, options): + + cipher = 'AES256' + if hasattr(options, 'cipher'): + cipher = options.cipher + + this_editor = VaultEditor(cipher, self.vault_pass, args[0]) + this_editor.create_file() + + def execute_decrypt(args, options): + + cipher = 'AES256' + if hasattr(options, 'cipher'): + cipher = options.cipher + + for f in args: + this_editor = VaultEditor(cipher, self.vault_pass, f) + this_editor.decrypt_file() + + self.display.display("Decryption successful") + + def execute_edit(args, options): + + cipher = None + + for f in args: + this_editor = VaultEditor(cipher, self.vault_pass, f) + this_editor.edit_file() + + def execute_view(args, options): + + cipher = None + + for f in args: + this_editor = VaultEditor(cipher, self.vault_pass, f) + this_editor.view_file() + + def execute_encrypt(args, options): + + cipher = 'AES256' + if hasattr(options, 'cipher'): + cipher = options.cipher + + for f in args: + this_editor = VaultEditor(cipher, self.vault_pass, f) + this_editor.encrypt_file() + + self.display.display("Encryption successful") + + def execute_rekey(args, options ): + __, new_password = ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True) + + cipher = None + for f in args: + this_editor = VaultEditor(cipher, self.vault_pass, f) + this_editor.rekey_file(new_password) + + self.display.display("Rekey successful") + +######################################################## + +if __name__ == "__main__": + + display = Display() + #display.display(" ".join(sys.argv), log_only=True) + + try: + cli = Cli(display=display) + (options, args) = cli.parse() + sys.exit(cli.run(options, args)) + except AnsibleError as e: + display.display("[ERROR]: %s" % e, color='red', stderr=True) + sys.exit(1) + except KeyboardInterrupt: + display.display("[ERROR]: interrupted", color='red', stderr=True) + sys.exit(1) From 1046a396ae8723d57196b1adf026b2c8cc89382b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 19 Apr 2015 23:34:07 -0400 Subject: [PATCH 0380/3617] fixed typo --- v2/bin/ansible-vault | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault index 0aa1c0d4bfb354..c72d3de017a6c4 100755 --- a/v2/bin/ansible-vault +++ b/v2/bin/ansible-vault @@ -67,7 +67,7 @@ class Cli(object): return parser.parse_args() - def run(self, options, args) + def run(self, options, args): action = self.get_action(args) From 0a26b149fc78ae5b4c920fc07032887d5366620a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Thu, 2 Apr 2015 00:41:07 +0200 Subject: [PATCH 0381/3617] cloudstack: add error result handling in async job --- lib/ansible/module_utils/cloudstack.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index f72d270d30b3b7..dd7e60f733492a 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -185,8 +185,10 @@ def _poll_job(self, job=None, key=None): if 'jobid' in job: while True: res = self.cs.queryAsyncJobResult(jobid=job['jobid']) - if res['jobstatus'] != 0: - if 'jobresult' in res and key is not None and key in res['jobresult']: + if res['jobstatus'] != 0 and 'jobresult' in res: + if 'errortext' in res['jobresult']: + self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext']) + if key and key in res['jobresult']: job = res['jobresult'][key] break time.sleep(2) From 822c2c0cd3a46fc7bebb316d49387a95580b5ac5 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 4 Apr 2015 00:40:31 +0200 Subject: [PATCH 0382/3617] cloudstack: fix vm not found by displayname --- lib/ansible/module_utils/cloudstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index dd7e60f733492a..2c891434bdebea 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -119,7 +119,7 @@ def get_vm_id(self): vms = self.cs.listVirtualMachines(**args) if vms: for v in vms['virtualmachine']: - if vm in [ v['name'], v['id'] ]: + if vm in [ v['displayname'], v['name'], v['id'] ]: self.vm_id = v['id'] return self.vm_id self.module.fail_json(msg="Virtual machine '%s' not found" % vm) From 6a35463e9c1d37dcedc060ea7ab2d9cfa50e6edd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Apr 2015 10:52:49 -0400 Subject: [PATCH 0383/3617] added note for new find module --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82a41702d5507d..92972008d144c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ Deprecated Modules: ec2_ami_search, in favor of the new ec2_ami_find New Modules: + find ec2_ami_find cloudtrail cloudstack_fw From d600c650951812c69937b772bf26c9c89d17e24a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 20 Apr 2015 11:15:31 -0500 Subject: [PATCH 0384/3617] Fix fetch action plugin in v2 to use the inventory_hostname in the dest dir Fixes #10736 --- v2/ansible/plugins/action/fetch.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/v2/ansible/plugins/action/fetch.py b/v2/ansible/plugins/action/fetch.py index 58e7cebb8d2d6b..c242c8739d014e 100644 --- a/v2/ansible/plugins/action/fetch.py +++ b/v2/ansible/plugins/action/fetch.py @@ -94,7 +94,11 @@ def run(self, tmp=None, task_vars=dict()): dest = self._loader.path_dwim(dest) else: # files are saved in dest dir, with a subdir for each host, then the filename - dest = "%s/%s/%s" % (self._loader.path_dwim(dest), self._connection_info.remote_addr, source_local) + if 'inventory_hostname' in task_vars: + target_name = task_vars['inventory_hostname'] + else: + target_name = self._connection_info.remote_addr + dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) dest = dest.replace("//","/") From 1359bbee87038c35bb49d4fb80e1749184b72f08 Mon Sep 17 00:00:00 2001 From: James Laska Date: Mon, 20 Apr 2015 12:49:25 -0400 Subject: [PATCH 0385/3617] Fix traceback with using GCE on EL6 with python-crypto2.6 This fix resolves an issue on EL6 systems where there may be multiple versions of pycrypto installed. EPEL provides both `python-crypto` and `python-crypto2.6`. These packages are co-installable. However, modules importing the `Crypto` library must specify which version to use, otherwise the default will be used. This change follows the same pattern established in `bin/ansible` for specifying python library requirements. --- plugins/inventory/gce.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/plugins/inventory/gce.py b/plugins/inventory/gce.py index e77178c16b306a..76e14f2301278d 100755 --- a/plugins/inventory/gce.py +++ b/plugins/inventory/gce.py @@ -72,6 +72,16 @@ Version: 0.0.1 ''' +__requires__ = ['pycrypto>=2.6'] +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_VERSION="v1" From 56deb35e67c1c60454e951cc7f0277cfed7774fd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Apr 2015 13:31:36 -0400 Subject: [PATCH 0386/3617] updated intro to new become syntax, also added link to full become docs --- docsite/rst/playbooks_intro.rst | 37 ++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 4e10528b8c65cd..afa97b3e0434f1 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -148,7 +148,7 @@ Remote users can also be defined per task:: The `remote_user` parameter for tasks was added in 1.4. -Support for running things from sudo is also available:: +Support for running things from as another user is also available (see :doc:`become`):: --- - hosts: webservers @@ -162,31 +162,44 @@ You can also use sudo on a particular task instead of the whole play:: remote_user: yourname tasks: - service: name=nginx state=started - sudo: yes + become: yes + become_method: sudo +.. note:: + + The becoem syntax deprecates the old sudo/su specific syntax begining in 1.9. -You can also login as you, and then sudo to different users than root:: +You can also login as you, and then become a user different than root:: --- - hosts: webservers remote_user: yourname - sudo: yes - sudo_user: postgres + become: yes + become_user: postgres + +You can also use other privilege escalation methods, like su:: + + --- + - hosts: webservers + remote_user: yourname + become: yes + become_method: su -If you need to specify a password to sudo, run `ansible-playbook` with ``--ask-sudo-pass`` (`-K`). -If you run a sudo playbook and the playbook seems to hang, it's probably stuck at the sudo prompt. -Just `Control-C` to kill it and run it again with `-K`. +If you need to specify a password to sudo, run `ansible-playbook` with ``--ask-become-pass`` or +when using the old sudo syntax ``--ask-sudo--pass`` (`-K`). If you run a become playbook and the +playbook seems to hang, it's probably stuck at the privilege escalation prompt. +Just `Control-C` to kill it and run it again adding the appropriate password. .. important:: - When using `sudo_user` to a user other than root, the module + When using `become_user` to a user other than root, the module arguments are briefly written into a random tempfile in /tmp. These are deleted immediately after the command is executed. This - only occurs when sudoing from a user like 'bob' to 'timmy', not - when going from 'bob' to 'root', or logging in directly as 'bob' or + only occurs when changing privileges from a user like 'bob' to 'timmy', + not when going from 'bob' to 'root', or logging in directly as 'bob' or 'root'. If it concerns you that this data is briefly readable (not writable), avoid transferring unencrypted passwords with - `sudo_user` set. In other cases, '/tmp' is not used and this does + `become_user` set. In other cases, '/tmp' is not used and this does not come into play. Ansible also takes care to not log password parameters. From d22898f7022f10e919ff82e232f092c74fa9ecf8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Apr 2015 13:42:02 -0400 Subject: [PATCH 0387/3617] changed vaulteditor import to new path, now vault seems to work --- v2/bin/ansible-vault | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault index c72d3de017a6c4..506402ee15f935 100755 --- a/v2/bin/ansible-vault +++ b/v2/bin/ansible-vault @@ -34,7 +34,7 @@ import sys import traceback from ansible.errors import AnsibleError -from ansible.utils.vault import VaultEditor +from ansible.parsing.vault import VaultEditor from ansible.utils.cli import base_parser, ask_vault_passwords #------------------------------------------------------------------------------------- From 1d966ac5bbe8ffd88f4aa9f0a091409ad39c2927 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mikko=20Ekstr=C3=B6m?= Date: Tue, 21 Apr 2015 00:13:52 +0200 Subject: [PATCH 0388/3617] Correct minor spelling typos. --- docsite/rst/playbooks_intro.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index afa97b3e0434f1..a27285b4a9ff18 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -167,7 +167,7 @@ You can also use sudo on a particular task instead of the whole play:: .. note:: - The becoem syntax deprecates the old sudo/su specific syntax begining in 1.9. + The become syntax deprecates the old sudo/su specific syntax beginning in 1.9. You can also login as you, and then become a user different than root:: @@ -314,7 +314,7 @@ The old form continues to work in newer versions without any plan of deprecation Handlers: Running Operations On Change `````````````````````````````````````` -As we've mentioned, modules are written to be 'idempotent' and can relay when +As we've mentioned, modules are written to be 'idempotent' and can relay when they have made a change on the remote system. Playbooks recognize this and have a basic event system that can be used to respond to change. From da3780908a0c084e9de3ad6dbb8b9a168eeaa4be Mon Sep 17 00:00:00 2001 From: "Hennadiy (Gena) Verkh" Date: Tue, 21 Apr 2015 11:32:10 +0200 Subject: [PATCH 0389/3617] Fixed links --- docsite/rst/community.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index f33109337dbe27..b056c3dacc2085 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -66,7 +66,7 @@ Bugs related to the core language should be reported to `github.com/ansible/ansi signing up for a free github account. Before reporting a bug, please use the bug/issue search to see if the issue has already been reported. -MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. +MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against. From 14637c6c7e7dac3b2a35bdace54c0d2b5e3577d5 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 21 Apr 2015 14:13:58 +0200 Subject: [PATCH 0390/3617] changelog: update cloudstack module names --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92972008d144c5..11f2726a673e9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,9 +16,9 @@ New Modules: find ec2_ami_find cloudtrail - cloudstack_fw - cloudstack_iso - cloudstack_sshkey + cloudstack: cs_firewall + cloudstack: cs_iso + cloudstack: cs_sshkeypair maven_artifact pushover zabbix_host From 9b317858c1c5a2f74cd55df4a5dfecf427a01594 Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Tue, 21 Apr 2015 08:33:32 -0400 Subject: [PATCH 0391/3617] Modified per @bcoca Removed try/except raises Modified wait_for_task Added api exception error message --- lib/ansible/module_utils/vmware.py | 138 ++++++++++------------------- 1 file changed, 47 insertions(+), 91 deletions(-) diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py index d7dcc256fec99d..5d94b9d6bba2c5 100644 --- a/lib/ansible/module_utils/vmware.py +++ b/lib/ansible/module_utils/vmware.py @@ -1,4 +1,3 @@ -#!/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen @@ -35,49 +34,24 @@ class TaskError(Exception): pass -def task_success(task): - return True - - -def task_running(task): - time.sleep(15) - return False - - -def task_error(task): - - try: - raise TaskError(task.info.error) - except AttributeError: - raise TaskError("Unknown error has occurred") - - -def task_queued(task): - time.sleep(15) - return False - - def wait_for_task(task): - task_state = { - vim.TaskInfo.State.success: task_success, - vim.TaskInfo.State.running: task_running, - vim.TaskInfo.State.queued: task_queued, - vim.TaskInfo.State.error: task_error, - } - while True: - try: - is_finished = task_state[task.info.state](task) - if is_finished: - return True, task.info.result - # This exception should be handled in the module that calls this method - # and fail with an appropriate message to module.fail_json() - except TaskError: - raise + if task.info.state == vim.TaskInfo.State.success: + return True, task.info.result + if task.info.state == vim.TaskInfo.State.error + try: + raise TaskError(task.info.error) + except AttributeError: + raise TaskError("An unknown error has occurred") + if task.info.state == vim.TaskInfo.State.running: + time.sleep(15) + if task.info.state = vim.TaskInfo.State.queued: + time.sleep(15) def find_dvspg_by_name(dv_switch, portgroup_name): + portgroups = dv_switch.portgroup for pg in portgroups: @@ -88,59 +62,44 @@ def find_dvspg_by_name(dv_switch, portgroup_name): def find_cluster_by_name_datacenter(datacenter, cluster_name): - try: - host_folder = datacenter.hostFolder - for folder in host_folder.childEntity: - if folder.name == cluster_name: - return folder - return None - # This exception should be handled in the module that calls this method - # and fail with an appropriate message to module.fail_json() - except vmodl.MethodFault: - raise + + host_folder = datacenter.hostFolder + for folder in host_folder.childEntity: + if folder.name == cluster_name: + return folder + return None def find_datacenter_by_name(content, datacenter_name, throw=True): - try: - datacenters = get_all_objs(content, [vim.Datacenter]) - for dc in datacenters: - if dc.name == datacenter_name: - return dc - return None - # This exception should be handled in the module that calls this method - # and fail with an appropriate message to module.fail_json() - except vmodl.MethodFault: - raise + datacenters = get_all_objs(content, [vim.Datacenter]) + for dc in datacenters: + if dc.name == datacenter_name: + return dc + + return None def find_dvs_by_name(content, switch_name): - try: - vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch]) - for dvs in vmware_distributed_switches: - if dvs.name == switch_name: - return dvs - return None - # This exception should be handled in the module that calls this method - # and fail with an appropriate message to module.fail_json() - except vmodl.MethodFault: - raise + + vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch]) + for dvs in vmware_distributed_switches: + if dvs.name == switch_name: + return dvs + return None def find_hostsystem_by_name(content, hostname): - try: - host_system = get_all_objs(content, [vim.HostSystem]) - for host in host_system: - if host.name == hostname: - return host - return None - # This exception should be handled in the module that calls this method - # and fail with an appropriate message to module.fail_json() - except vmodl.MethodFault: - raise + + host_system = get_all_objs(content, [vim.HostSystem]) + for host in host_system: + if host.name == hostname: + return host + return None def vmware_argument_spec(): + return dict( hostname=dict(type='str', required=True), username=dict(type='str', aliases=['user', 'admin'], required=True), @@ -149,6 +108,7 @@ def vmware_argument_spec(): def connect_to_api(module, disconnect_atexit=True): + hostname = module.params['hostname'] username = module.params['username'] password = module.params['password'] @@ -163,19 +123,15 @@ def connect_to_api(module, disconnect_atexit=True): atexit.register(connect.Disconnect, service_instance) return service_instance.RetrieveContent() except vim.fault.InvalidLogin as invalid_login: - module.fail_json(msg=invalid_login.msg) - except requests.ConnectionError: - module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.") + module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login)) + except requests.ConnectionError as connection_error: + module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error)) def get_all_objs(content, vimtype): - try: - obj = {} - container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) - for managed_object_ref in container.view: - obj.update({managed_object_ref: managed_object_ref.name}) - return obj - # This exception should be handled in the module that calls this method - # and fail with an appropriate message to module.fail_json() - except vmodl.MethodFault: - raise \ No newline at end of file + + obj = {} + container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True) + for managed_object_ref in container.view: + obj.update({managed_object_ref: managed_object_ref.name}) + return obj From bc47fe6f82383ecc6ed3e7bd4f1497d627f8d2ba Mon Sep 17 00:00:00 2001 From: Pahaz Blinov Date: Tue, 21 Apr 2015 19:13:11 +0500 Subject: [PATCH 0392/3617] Python 3 compatible `except` statment --- v2/ansible/executor/task_queue_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index a1fa01e1865f98..a5c2920aec5e88 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -92,7 +92,7 @@ def __init__(self, inventory, callback, variable_manager, loader, display, optio if fileno is not None: try: new_stdin = os.fdopen(os.dup(fileno)) - except OSError, e: + except OSError: # couldn't dupe stdin, most likely because it's # not a valid file descriptor, so we just rely on # using the one that was passed in From c58aaf72fcc308d9c3f876019e46d2ee882ae3b1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 21 Apr 2015 09:48:13 -0500 Subject: [PATCH 0393/3617] Properly handle lack of stdout in results in v2 Fixes #10549 --- v2/ansible/plugins/action/__init__.py | 32 +++++++++++++-------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index c5b88e76946351..c49ac8e6f00d2b 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -412,22 +412,22 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ cmd2 = self._shell.remove(tmp, recurse=True) self._low_level_execute_command(cmd2, tmp, sudoable=False) - # FIXME: in error situations, the stdout may not contain valid data, so we - # should check for bad rc codes better to catch this here - if 'stdout' in res and res['stdout'].strip(): - try: - data = json.loads(self._filter_leading_non_json_lines(res['stdout'])) - except ValueError: - # not valid json, lets try to capture error - data = {'traceback': res['stdout']} - if 'parsed' in data and data['parsed'] == False: - data['msg'] += res['stderr'] - # pre-split stdout into lines, if stdout is in the data and there - # isn't already a stdout_lines value there - if 'stdout' in data and 'stdout_lines' not in data: - data['stdout_lines'] = data.get('stdout', '').splitlines() - else: - data = dict() + try: + data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', ''))) + except ValueError: + # not valid json, lets try to capture error + data = dict(failed=True, parsed=False) + if 'stderr' in res and res['stderr'].startswith('Traceback'): + data['traceback'] = res['stderr'] + else: + data['msg'] = res.get('stdout', '') + if 'stderr' in res: + data['msg'] += res['stderr'] + + # pre-split stdout into lines, if stdout is in the data and there + # isn't already a stdout_lines value there + if 'stdout' in data and 'stdout_lines' not in data: + data['stdout_lines'] = data.get('stdout', '').splitlines() # store the module invocation details back into the result data['invocation'] = dict( From d996a2c216c4ad7a3cb69e160c55cbbf6a15c62e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 21 Apr 2015 11:48:43 -0400 Subject: [PATCH 0394/3617] ported fix from v1 to v2 on oracle linux family detection --- v2/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index 5d70df4294c980..66ca86c3969e1f 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -87,7 +87,7 @@ class Facts(object): _I386RE = re.compile(r'i([3456]86|86pc)') # For the most part, we assume that platform.dist() will tell the truth. # This is the fallback to handle unknowns or exceptions - OSDIST_LIST = ( ('/etc/oracle-release', 'Oracle Linux'), + OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'), ('/etc/redhat-release', 'RedHat'), ('/etc/vmware-release', 'VMwareESX'), ('/etc/openwrt_release', 'OpenWrt'), @@ -294,7 +294,7 @@ def get_distribution_facts(self): # Once we determine the value is one of these distros # we trust the values are always correct break - elif name == 'Oracle Linux': + elif name == 'OracleLinux': data = get_file_content(path) if 'Oracle Linux' in data: self.facts['distribution'] = name From 7669a0b275639c46b847d9a6703d25298adb27b3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 21 Apr 2015 12:02:32 -0500 Subject: [PATCH 0395/3617] Fixing some v2 bugs --- v2/ansible/parsing/__init__.py | 2 +- v2/ansible/plugins/action/set_fact.py | 2 +- v2/ansible/vars/__init__.py | 20 ++++++++++++++------ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index bce5b2b667824a..bf96fba84202c0 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -120,7 +120,7 @@ def is_directory(self, path): return os.path.isdir(path) def list_directory(self, path): - return os.path.listdir(path) + return os.listdir(path) def _safe_load(self, stream, file_name=None): ''' Implements yaml.safe_load(), except using our custom loader class. ''' diff --git a/v2/ansible/plugins/action/set_fact.py b/v2/ansible/plugins/action/set_fact.py index a7ddf10b474a44..6086ee6e8b2b50 100644 --- a/v2/ansible/plugins/action/set_fact.py +++ b/v2/ansible/plugins/action/set_fact.py @@ -35,4 +35,4 @@ def run(self, tmp=None, task_vars=dict()): if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v) facts[k] = v - return dict(changed=True, ansible_facts=facts) + return dict(changed=False, ansible_facts=facts) diff --git a/v2/ansible/vars/__init__.py b/v2/ansible/vars/__init__.py index 183116ea2d84fe..f30d52b7a3a1c0 100644 --- a/v2/ansible/vars/__init__.py +++ b/v2/ansible/vars/__init__.py @@ -29,6 +29,7 @@ from sha import sha as sha1 from ansible import constants as C +from ansible.errors import * from ansible.parsing import DataLoader from ansible.plugins.cache import FactCache from ansible.template import Templar @@ -78,14 +79,19 @@ def set_extra_vars(self, value): def set_inventory(self, inventory): self._inventory = inventory + def _validate_both_dicts(self, a, b): + ''' + Validates that both arguments are dictionaries, or an error is raised. + ''' + if not (isinstance(a, dict) and isinstance(b, dict)): + raise AnsibleError("failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)) + def _combine_vars(self, a, b): ''' Combines dictionaries of variables, based on the hash behavior ''' - # FIXME: do we need this from utils, or should it just - # be merged into this definition? - #_validate_both_dicts(a, b) + self._validate_both_dicts(a, b) if C.DEFAULT_HASH_BEHAVIOUR == "merge": return self._merge_dicts(a, b) @@ -100,9 +106,7 @@ def _merge_dicts(self, a, b): result = dict() - # FIXME: do we need this from utils, or should it just - # be merged into this definition? - #_validate_both_dicts(a, b) + self._validate_both_dicts(a, b) for dicts in a, b: # next, iterate over b keys and values @@ -183,6 +187,8 @@ def get_vars(self, loader, play=None, host=None, task=None): try: vars_file = templar.template(vars_file) data = loader.load_from_file(vars_file) + if data is None: + data = dict() all_vars = self._combine_vars(all_vars, data) except: # FIXME: get_vars should probably be taking a flag to determine @@ -258,6 +264,8 @@ def _load_inventory_file(self, path, loader): else: data = loader.load_from_file(path) + if data is None: + data = dict() name = self._get_inventory_basename(path) return (name, data) From 6935d467eb6714301c9c2eb86366033e1edb4532 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 21 Apr 2015 10:08:00 -0700 Subject: [PATCH 0396/3617] Add tests for yum module taking lists of packages in various formats --- test/integration/roles/test_yum/tasks/yum.yml | 106 +++++++++++++++++- 1 file changed, 104 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_yum/tasks/yum.yml b/test/integration/roles/test_yum/tasks/yum.yml index 6630a2ff4cee12..78bb9abf7830a0 100644 --- a/test/integration/roles/test_yum/tasks/yum.yml +++ b/test/integration/roles/test_yum/tasks/yum.yml @@ -28,7 +28,7 @@ - debug: var=yum_result - debug: var=rpm_result -- name: verify uninstalltion of sos +- name: verify uninstallation of sos assert: that: - "yum_result.rc == 0" @@ -73,7 +73,7 @@ - "'rc' in yum_result" - "'results' in yum_result" -# INSTALL AGAIN +# INSTALL AGAIN - name: install sos again yum: name=sos state=present register: yum_result @@ -83,4 +83,106 @@ that: - "not yum_result.changed" +# Multiple packages +- name: uninstall sos and python-q + yum: name=sos,python-q state=removed + register: yum_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check python-q with rpm + shell: rpm -q python-q + failed_when: False + register: rpm_python_q_result + +- name: verify packages installed + assert: + that: + - "rpm_sos_result.rc != 0" + - "rpm_python_q_result.rc != 0" + +- name: install sos and python-q as comma separated + yum: name=sos,python-q state=present + register: yum_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check python-q with rpm + shell: rpm -q python-q + failed_when: False + register: rpm_python_q_result + +- name: verify packages installed + assert: + that: + - "yum_result.rc == 0" + - "yum_result.changed" + - "rpm_sos_result.rc == 0" + - "rpm_python_q_result.rc == 0" + +- name: uninstall sos and python-q + yum: name=sos,python-q state=removed + register: yum_result + +- name: install sos and python-q as list + yum: + name: + - sos + - python-q + state: present + register: yum_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check python-q with rpm + shell: rpm -q python-q + failed_when: False + register: rpm_python_q_result + +- name: verify packages installed + assert: + that: + - "yum_result.rc == 0" + - "yum_result.changed" + - "rpm_sos_result.rc == 0" + - "rpm_python_q_result.rc == 0" + +- name: uninstall sos and python-q + yum: name=sos,python-q state=removed + register: yum_result + +- name: install sos and python-q as comma separated with spaces + yum: + name: "sos, python-q" + state: present + register: yum_result + +- name: check sos with rpm + shell: rpm -q sos + failed_when: False + register: rpm_sos_result + +- name: check sos with rpm + shell: rpm -q python-q + failed_when: False + register: rpm_python_q_result + +- name: verify packages installed + assert: + that: + - "yum_result.rc == 0" + - "yum_result.changed" + - "rpm_sos_result.rc == 0" + - "rpm_python_q_result.rc == 0" +- name: uninstall sos and python-q + yum: name=sos,python-q state=removed From f0158d4eed76d71f566f9c58fffd230a625ea975 Mon Sep 17 00:00:00 2001 From: Hiroaki Nakamura Date: Wed, 22 Apr 2015 02:57:26 +0900 Subject: [PATCH 0397/3617] Add Python 2.7 to Conrol Machine Requirements in docsite --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 4a4504388a56fe..604be2abc9ea52 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -46,7 +46,7 @@ information about running from source. It's not necessary to install the progra Control Machine Requirements ```````````````````````````` -Currently Ansible can be run from any machine with Python 2.6 installed (Windows isn't supported for the control machine). +Currently Ansible can be run from any machine with Python 2.6 or 2.7 installed (Windows isn't supported for the control machine). This includes Red Hat, Debian, CentOS, OS X, any of the BSDs, and so on. From c5e9a87e74ac4381a1d230a03d39fd9ae58ea89f Mon Sep 17 00:00:00 2001 From: Greg Taylor Date: Tue, 21 Apr 2015 11:11:57 -0700 Subject: [PATCH 0398/3617] Correct emphasis markup nit in playbooks_vault.rst It looks like the original intention was to italicize, but someone was used to another markup language. I have switched the wrapped tags so we're showing italics and not a broken link. --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 56cb3c78bd8de1..9ccb5b50f17b01 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -77,7 +77,7 @@ If you have existing files that you no longer want to keep encrypted, you can pe Viewing Encrypted Files ``````````````````````` -_Available since Ansible 1.8_ +*Available since Ansible 1.8* If you want to view the contents of an encrypted file without editing it, you can use the `ansible-vault view` command:: From 5ff9859c9b22432dbf43955ff32ae9f84d8b6569 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 21 Apr 2015 15:25:23 -0400 Subject: [PATCH 0399/3617] added new cloudstabck modules to changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11f2726a673e9c..752c1c85c7b633 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,9 +16,13 @@ New Modules: find ec2_ami_find cloudtrail + cloudstack: cs_affinitygroup cloudstack: cs_firewall cloudstack: cs_iso cloudstack: cs_sshkeypair + cloudstack: cs_securitygroup + cloudstack: cs_securitygroup_rule + cloudstack: cs_vmsnapshot maven_artifact pushover zabbix_host From 9f54276fdd8052d1f0b8fe54a54a3b759b67bfc0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 21 Apr 2015 15:27:09 -0400 Subject: [PATCH 0400/3617] changed formating to rst lists --- CHANGELOG.md | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 752c1c85c7b633..a3e0e58311d438 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,38 +4,37 @@ Ansible Changes By Release ## 2.0 "TBD" - ACTIVE DEVELOPMENT Major Changes: - - big_ip modules now support turning off ssl certificate validation (use only for self signed) - - - template code now retains types for bools and Numbers instead of turning them into strings - - If you need the old behaviour, quote the value and it will get passed around as a string + * big_ip modules now support turning off ssl certificate validation (use only for self signed) + * template code now retains types for bools and Numbers instead of turning them into strings + If you need the old behaviour, quote the value and it will get passed around as a string Deprecated Modules: - ec2_ami_search, in favor of the new ec2_ami_find + * ec2_ami_search, in favor of the new ec2_ami_find New Modules: - find - ec2_ami_find - cloudtrail - cloudstack: cs_affinitygroup - cloudstack: cs_firewall - cloudstack: cs_iso - cloudstack: cs_sshkeypair - cloudstack: cs_securitygroup - cloudstack: cs_securitygroup_rule - cloudstack: cs_vmsnapshot - maven_artifact - pushover - zabbix_host - zabbix_hostmacro - zabbix_screen - vertica_configuration - vertica_facts - vertica_role - vertica_schema - vertica_user + * find + * ec2_ami_find + * cloudtrail + * cloudstack: cs_affinitygroup + * cloudstack: cs_firewall + * cloudstack: cs_iso + * cloudstack: cs_sshkeypair + * cloudstack: cs_securitygroup + * cloudstack: cs_securitygroup_rule + * cloudstack: cs_vmsnapshot + * maven_artifact + * pushover + * zabbix_host + * zabbix_hostmacro + * zabbix_screen + * vertica_configuration + * vertica_facts + * vertica_role + * vertica_schema + * vertica_user New Inventory scripts: - fleetctl + * fleetctl Other Notable Changes: From 8161dab60af01afa2c5dd4cc84618ba5ddbc80ec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 21 Apr 2015 15:32:35 -0400 Subject: [PATCH 0401/3617] added notes about privilege escalation limitations --- docsite/rst/become.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 70b781887a0a77..83f8ce1bb8a759 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -64,7 +64,7 @@ sudo and su still work! ----------------------- Old playbooks will not need to be changed, even though they are deprecated, sudo and su directives will continue to work though it -is recommended to move to become as they may be retired at one point. You cannot mix directives on the same object though, ansible +is recommended to move to become as they may be retired at one point. You cannot mix directives on the same object though, Ansible will complain if you try to. Become will default to using the old sudo/su configs and variables if they exist, but will override them if you specify any of the @@ -74,6 +74,10 @@ new ones. .. note:: Privilege escalation methods must also be supported by the connection plugin used, most will warn if they do not, some will just ignore it as they always run as root (jail, chroot, etc). +.. note:: Methods cannot be chained, you cannot use 'sudo /bin/su -' to become a user, you need to have privileges to run the command as that user in sudo or be able to su directly to it (the same for pbrun, pfexec or other supported methods). + +.. note:: Privilege escalation permissions have to be general, Ansible does not always use a specific command to do something but runs modules (code) from a temporary file name which changes every time. So if you have '/sbin/sevice' or '/bin/chmod' as the allowed commands this will fail with ansible. + .. seealso:: `Mailing List `_ From b5127c3442c4ae3c66c7f06ae7d956ae9551f6a4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 10:19:53 -0400 Subject: [PATCH 0402/3617] added new vmware_datacenter module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a3e0e58311d438..58638e96a79cd0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ New Modules: * vertica_role * vertica_schema * vertica_user + * vmware_datacenter New Inventory scripts: * fleetctl From 87bc7058060457d4802a10a7a2df71aaaf7bf158 Mon Sep 17 00:00:00 2001 From: Joseph Callen Date: Wed, 22 Apr 2015 14:54:05 -0400 Subject: [PATCH 0403/3617] Fixes VMware module utils Resolves syntax errors in the `wait_for_tasks` Removes throw from `find_datacenter_by_name` --- lib/ansible/module_utils/vmware.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py index 5d94b9d6bba2c5..e2d8c18ca481c7 100644 --- a/lib/ansible/module_utils/vmware.py +++ b/lib/ansible/module_utils/vmware.py @@ -39,14 +39,14 @@ def wait_for_task(task): while True: if task.info.state == vim.TaskInfo.State.success: return True, task.info.result - if task.info.state == vim.TaskInfo.State.error + if task.info.state == vim.TaskInfo.State.error: try: raise TaskError(task.info.error) except AttributeError: raise TaskError("An unknown error has occurred") if task.info.state == vim.TaskInfo.State.running: time.sleep(15) - if task.info.state = vim.TaskInfo.State.queued: + if task.info.state == vim.TaskInfo.State.queued: time.sleep(15) @@ -70,7 +70,7 @@ def find_cluster_by_name_datacenter(datacenter, cluster_name): return None -def find_datacenter_by_name(content, datacenter_name, throw=True): +def find_datacenter_by_name(content, datacenter_name): datacenters = get_all_objs(content, [vim.Datacenter]) for dc in datacenters: From 8c08f1b3024ccbacfc4fee3e8d77c9a31b291feb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 23 Apr 2015 18:54:48 -0500 Subject: [PATCH 0404/3617] Updating connection plugins not yet updated in v2 to catch new code --- v2/ansible/plugins/connections/accelerate.py | 19 +- v2/ansible/plugins/connections/chroot.py | 12 +- v2/ansible/plugins/connections/funcd.py | 11 +- v2/ansible/plugins/connections/jail.py | 12 +- v2/ansible/plugins/connections/libvirt_lxc.py | 12 +- .../plugins/connections/paramiko_ssh.py | 55 +++--- v2/ansible/plugins/connections/winrm.py | 59 ++++--- v2/ansible/plugins/connections/zone.py | 162 ++++++++++++++++++ 8 files changed, 255 insertions(+), 87 deletions(-) create mode 100644 v2/ansible/plugins/connections/zone.py diff --git a/v2/ansible/plugins/connections/accelerate.py b/v2/ansible/plugins/connections/accelerate.py index 78e2630eff08d4..0627267c16b215 100644 --- a/v2/ansible/plugins/connections/accelerate.py +++ b/v2/ansible/plugins/connections/accelerate.py @@ -14,8 +14,6 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type import json import os @@ -52,6 +50,7 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args, self.accport = port[1] self.is_connected = False self.has_pipelining = False + self.become_methods_supported=['sudo'] if not self.port: self.port = constants.DEFAULT_REMOTE_PORT @@ -142,7 +141,7 @@ def connect(self, allow_ssh=True): # shutdown, so we'll reconnect. wrong_user = True - except AnsibleError as e: + except AnsibleError, e: if allow_ssh: if "WRONG_USER" in e: vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host) @@ -228,11 +227,11 @@ def validate_user(self): else: return response.get('rc') == 0 - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' - if su or su_user: - raise AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") @@ -240,8 +239,8 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable if executable == "": executable = constants.DEFAULT_EXECUTABLE - if self.runner.sudo and sudoable and sudo_user: - cmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) + if self.runner.become and sudoable: + cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe) vvv("EXEC COMMAND %s" % cmd) @@ -294,8 +293,8 @@ def put_file(self, in_path, out_path): if fd.tell() >= fstat.st_size: last = True data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last) - if self.runner.sudo: - data['user'] = self.runner.sudo_user + if self.runner.become: + data['user'] = self.runner.become_user data = utils.jsonify(data) data = utils.encrypt(self.key, data) diff --git a/v2/ansible/plugins/connections/chroot.py b/v2/ansible/plugins/connections/chroot.py index 4e61f4ea559e01..3e960472879603 100644 --- a/v2/ansible/plugins/connections/chroot.py +++ b/v2/ansible/plugins/connections/chroot.py @@ -15,8 +15,6 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type import distutils.spawn import traceback @@ -26,6 +24,7 @@ from ansible import errors from ansible import utils from ansible.callbacks import vvv +import ansible.constants as C class Connection(object): ''' Local chroot based connections ''' @@ -33,6 +32,7 @@ class Connection(object): def __init__(self, runner, host, port, *args, **kwargs): self.chroot = host self.has_pipelining = False + self.become_methods_supported=C.BECOME_METHODS if os.geteuid() != 0: raise errors.AnsibleError("chroot connection requires running as root") @@ -62,16 +62,16 @@ def connect(self, port=None): return self - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the chroot ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter chroot as root so sudo stuff can be ignored + # We enter chroot as root so we ignore privlege escalation? if executable: local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] diff --git a/v2/ansible/plugins/connections/funcd.py b/v2/ansible/plugins/connections/funcd.py index 83a0c9b01d302f..92b7f53605baab 100644 --- a/v2/ansible/plugins/connections/funcd.py +++ b/v2/ansible/plugins/connections/funcd.py @@ -18,9 +18,6 @@ # along with Ansible. If not, see . # --- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - # The func transport permit to use ansible over func. For people who have already setup # func and that wish to play with ansible, this permit to move gradually to ansible # without having to redo completely the setup of the network. @@ -56,16 +53,14 @@ def connect(self, port=None): self.client = fc.Client(self.host) return self - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, - executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, + executable='/bin/sh', in_data=None): ''' run a command on the remote minion ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") - if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + # totally ignores privlege escalation vvv("EXEC %s" % (cmd), host=self.host) p = self.client.command.run(cmd)[self.host] return (p[0], '', p[1], p[2]) diff --git a/v2/ansible/plugins/connections/jail.py b/v2/ansible/plugins/connections/jail.py index a81f587bfd06de..c7b61bc638cd4f 100644 --- a/v2/ansible/plugins/connections/jail.py +++ b/v2/ansible/plugins/connections/jail.py @@ -16,8 +16,6 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type import distutils.spawn import traceback @@ -26,6 +24,7 @@ import subprocess from ansible import errors from ansible.callbacks import vvv +import ansible.constants as C class Connection(object): ''' Local chroot based connections ''' @@ -63,6 +62,7 @@ def __init__(self, runner, host, port, *args, **kwargs): self.runner = runner self.host = host self.has_pipelining = False + self.become_methods_supported=C.BECOME_METHODS if os.geteuid() != 0: raise errors.AnsibleError("jail connection requires running as root") @@ -93,16 +93,16 @@ def _generate_cmd(self, executable, cmd): local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd) return local_cmd - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the chroot ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter chroot as root so sudo stuff can be ignored + # Ignores privilege escalation local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.jail) diff --git a/v2/ansible/plugins/connections/libvirt_lxc.py b/v2/ansible/plugins/connections/libvirt_lxc.py index ee824554a0212d..34cdb592b246b7 100644 --- a/v2/ansible/plugins/connections/libvirt_lxc.py +++ b/v2/ansible/plugins/connections/libvirt_lxc.py @@ -16,14 +16,13 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type import distutils.spawn import os import subprocess from ansible import errors from ansible.callbacks import vvv +import ansible.constants as C class Connection(object): ''' Local lxc based connections ''' @@ -52,6 +51,7 @@ def __init__(self, runner, host, port, *args, **kwargs): self.host = host # port is unused, since this is local self.port = port + self.become_methods_supported=C.BECOME_METHODS def connect(self, port=None): ''' connect to the lxc; nothing to do here ''' @@ -67,16 +67,16 @@ def _generate_cmd(self, executable, cmd): local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd) return local_cmd - def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the chroot ''' - if su or su_user: - raise errors.AnsibleError("Internal Error: this module does not support running commands via su") + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter lxc as root so sudo stuff can be ignored + # We ignore privelege escalation! local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.lxc) diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/v2/ansible/plugins/connections/paramiko_ssh.py index 167b0d39a88230..8eaf97c3f6d2ec 100644 --- a/v2/ansible/plugins/connections/paramiko_ssh.py +++ b/v2/ansible/plugins/connections/paramiko_ssh.py @@ -14,8 +14,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type + # --- # The paramiko transport is provided because many distributions, in particular EL6 and before @@ -126,6 +125,9 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args, self.private_key_file = private_key_file self.has_pipelining = False + # TODO: add pbrun, pfexec + self.become_methods_supported=['sudo', 'su', 'pbrun'] + def _cache_key(self): return "%s__%s__" % (self.host, self.user) @@ -171,7 +173,7 @@ def _connect_uncached(self): key_filename=key_filename, password=self.password, timeout=self.runner.timeout, port=self.port) - except Exception as e: + except Exception, e: msg = str(e) if "PID check failed" in msg: @@ -185,9 +187,12 @@ def _connect_uncached(self): return ssh - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' + if self.runner.become and sudoable and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) + if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") @@ -198,7 +203,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable self.ssh.get_transport().set_keepalive(5) chan = self.ssh.get_transport().open_session() - except Exception as e: + except Exception, e: msg = "Failed to open session" if len(str(e)) > 0: @@ -207,7 +212,7 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable no_prompt_out = '' no_prompt_err = '' - if not (self.runner.sudo and sudoable) and not (self.runner.su and su): + if not (self.runner.become and sudoable): if executable: quoted_command = executable + ' -c ' + pipes.quote(cmd) @@ -225,50 +230,46 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0))) - if self.runner.sudo or sudoable: - shcmd, prompt, success_key = utils.make_sudo_cmd(self.runner.sudo_exe, sudo_user, executable, cmd) - elif self.runner.su or su: - shcmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd) + if self.runner.become and sudoable: + shcmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe) vvv("EXEC %s" % shcmd, host=self.host) - sudo_output = '' + become_output = '' try: chan.exec_command(shcmd) - if self.runner.sudo_pass or self.runner.su_pass: + if self.runner.become_pass: while True: - if success_key in sudo_output or \ - (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \ - (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)): + if success_key in become_output or \ + (prompt and become_output.endswith(prompt)) or \ + utils.su_prompts.check_su_prompt(become_output): break chunk = chan.recv(bufsize) if not chunk: - if 'unknown user' in sudo_output: + if 'unknown user' in become_output: raise errors.AnsibleError( - 'user %s does not exist' % sudo_user) + 'user %s does not exist' % become_user) else: raise errors.AnsibleError('ssh connection ' + 'closed waiting for password prompt') - sudo_output += chunk + become_output += chunk - if success_key not in sudo_output: + if success_key not in become_output: if sudoable: - chan.sendall(self.runner.sudo_pass + '\n') - elif su: - chan.sendall(self.runner.su_pass + '\n') + chan.sendall(self.runner.become_pass + '\n') else: - no_prompt_out += sudo_output - no_prompt_err += sudo_output + no_prompt_out += become_output + no_prompt_err += become_output except socket.timeout: - raise errors.AnsibleError('ssh timed out waiting for sudo.\n' + sudo_output) + raise errors.AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output) stdout = ''.join(chan.makefile('rb', bufsize)) stderr = ''.join(chan.makefile_stderr('rb', bufsize)) @@ -285,7 +286,7 @@ def put_file(self, in_path, out_path): try: self.sftp = self.ssh.open_sftp() - except Exception as e: + except Exception, e: raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e) try: @@ -309,7 +310,7 @@ def fetch_file(self, in_path, out_path): try: self.sftp = self._connect_sftp() - except Exception as e: + except Exception, e: raise errors.AnsibleError("failed to open a SFTP connection (%s)", e) try: diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py index f3d6a03ba07eac..b41a74c8e1f994 100644 --- a/v2/ansible/plugins/connections/winrm.py +++ b/v2/ansible/plugins/connections/winrm.py @@ -14,18 +14,15 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type + +from __future__ import absolute_import import base64 -import hashlib -import imp import os import re import shlex import traceback - -from six.moves.urllib import parse as urlparse +import urlparse from ansible import errors from ansible import utils from ansible.callbacks import vvv, vvvv, verbose @@ -38,9 +35,12 @@ except ImportError: raise errors.AnsibleError("winrm is not installed") -_winrm_cache = { - # 'user:pwhash@host:port': -} +HAVE_KERBEROS = False +try: + import kerberos + HAVE_KERBEROS = True +except ImportError: + pass def vvvvv(msg, host=None): verbose(msg, host=host, caplevel=4) @@ -48,6 +48,11 @@ def vvvvv(msg, host=None): class Connection(object): '''WinRM connections over HTTP/HTTPS.''' + transport_schemes = { + 'http': [('kerberos', 'http'), ('plaintext', 'http'), ('plaintext', 'https')], + 'https': [('kerberos', 'https'), ('plaintext', 'https')], + } + def __init__(self, runner, host, port, user, password, *args, **kwargs): self.runner = runner self.host = host @@ -61,6 +66,10 @@ def __init__(self, runner, host, port, user, password, *args, **kwargs): self.shell_id = None self.delegate = None + # Add runas support + #self.become_methods_supported=['runas'] + self.become_methods_supported=[] + def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. @@ -69,23 +78,22 @@ def _winrm_connect(self): vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \ (self.user, port, self.host), host=self.host) netloc = '%s:%d' % (self.host, port) - cache_key = '%s:%s@%s:%d' % (self.user, hashlib.md5(self.password).hexdigest(), self.host, port) - if cache_key in _winrm_cache: - vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host) - return _winrm_cache[cache_key] - transport_schemes = [('plaintext', 'https'), ('plaintext', 'http')] # FIXME: ssl/kerberos - if port == 5985: - transport_schemes = reversed(transport_schemes) exc = None - for transport, scheme in transport_schemes: + for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']: + if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user): + continue + if transport == 'kerberos': + realm = self.user.split('@', 1)[1].strip() or None + else: + realm = None endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', '')) vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self.host) protocol = Protocol(endpoint, transport=transport, - username=self.user, password=self.password) + username=self.user, password=self.password, + realm=realm) try: protocol.send_message('') - _winrm_cache[cache_key] = protocol return protocol except WinRMTransportError, exc: err_msg = str(exc) @@ -97,7 +105,6 @@ def _winrm_connect(self): if code == 401: raise errors.AnsibleError("the username/password specified for this server was incorrect") elif code == 411: - _winrm_cache[cache_key] = protocol return protocol vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host) continue @@ -133,7 +140,11 @@ def connect(self): self.protocol = self._winrm_connect() return self - def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None): + + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) + cmd = cmd.encode('utf-8') cmd_parts = shlex.split(cmd, posix=False) if '-EncodedCommand' in cmd_parts: @@ -144,11 +155,11 @@ def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable vvv("EXEC %s" % cmd, host=self.host) # For script/raw support. if cmd_parts and cmd_parts[0].lower().endswith('.ps1'): - script = powershell._build_file_cmd(cmd_parts) + script = powershell._build_file_cmd(cmd_parts, quote_args=False) cmd_parts = powershell._encode_script(script, as_list=True) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) - except Exception as e: + except Exception, e: traceback.print_exc() raise errors.AnsibleError("failed to exec cmd %s" % cmd) return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8')) @@ -194,7 +205,7 @@ def put_file(self, in_path, out_path): def fetch_file(self, in_path, out_path): out_path = out_path.replace('\\', '/') vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - buffer_size = 2**20 # 1MB chunks + buffer_size = 2**19 # 0.5MB chunks if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) out_file = None diff --git a/v2/ansible/plugins/connections/zone.py b/v2/ansible/plugins/connections/zone.py new file mode 100644 index 00000000000000..211bd0fbcc63f8 --- /dev/null +++ b/v2/ansible/plugins/connections/zone.py @@ -0,0 +1,162 @@ +# Based on local.py (c) 2012, Michael DeHaan +# and chroot.py (c) 2013, Maykel Moya +# and jail.py (c) 2013, Michael Scherer +# (c) 2015, Dagobert Michelsen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import distutils.spawn +import traceback +import os +import shutil +import subprocess +from subprocess import Popen,PIPE +from ansible import errors +from ansible.callbacks import vvv +import ansible.constants as C + +class Connection(object): + ''' Local zone based connections ''' + + def _search_executable(self, executable): + cmd = distutils.spawn.find_executable(executable) + if not cmd: + raise errors.AnsibleError("%s command not found in PATH") % executable + return cmd + + def list_zones(self): + pipe = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'], + cwd=self.runner.basedir, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + #stdout, stderr = p.communicate() + zones = [] + for l in pipe.stdout.readlines(): + # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared + s = l.split(':') + if s[1] != 'global': + zones.append(s[1]) + + return zones + + def get_zone_path(self): + #solaris10vm# zoneadm -z cswbuild list -p + #-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared + pipe = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'], + cwd=self.runner.basedir, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + #stdout, stderr = p.communicate() + path = pipe.stdout.readlines()[0].split(':')[3] + return path + '/root' + + def __init__(self, runner, host, port, *args, **kwargs): + self.zone = host + self.runner = runner + self.host = host + self.has_pipelining = False + self.become_methods_supported=C.BECOME_METHODS + + if os.geteuid() != 0: + raise errors.AnsibleError("zone connection requires running as root") + + self.zoneadm_cmd = self._search_executable('zoneadm') + self.zlogin_cmd = self._search_executable('zlogin') + + if not self.zone in self.list_zones(): + raise errors.AnsibleError("incorrect zone name %s" % self.zone) + + + self.host = host + # port is unused, since this is local + self.port = port + + def connect(self, port=None): + ''' connect to the zone; nothing to do here ''' + + vvv("THIS IS A LOCAL ZONE DIR", host=self.zone) + + return self + + # a modifier + def _generate_cmd(self, executable, cmd): + if executable: + local_cmd = [self.zlogin_cmd, self.zone, executable, cmd] + else: + local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd) + return local_cmd + + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None): + ''' run a command on the zone ''' + + if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: + raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) + + if in_data: + raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + + # We happily ignore privelege escalation + if executable == '/bin/sh': + executable = None + local_cmd = self._generate_cmd(executable, cmd) + + vvv("EXEC %s" % (local_cmd), host=self.zone) + p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), + cwd=self.runner.basedir, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + stdout, stderr = p.communicate() + return (p.returncode, '', stdout, stderr) + + def _normalize_path(self, path, prefix): + if not path.startswith(os.path.sep): + path = os.path.join(os.path.sep, path) + normpath = os.path.normpath(path) + return os.path.join(prefix, normpath[1:]) + + def _copy_file(self, in_path, out_path): + if not os.path.exists(in_path): + raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) + try: + shutil.copyfile(in_path, out_path) + except shutil.Error: + traceback.print_exc() + raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) + except IOError: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file to %s" % out_path) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to zone ''' + + out_path = self._normalize_path(out_path, self.get_zone_path()) + vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) + + self._copy_file(in_path, out_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from zone to local ''' + + in_path = self._normalize_path(in_path, self.get_zone_path()) + vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) + + self._copy_file(in_path, out_path) + + def close(self): + ''' terminate the connection; nothing to do here ''' + pass From 8574d40b98bb90bb1fe2de8d4f46efff9d4dd67b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 24 Apr 2015 02:47:56 -0400 Subject: [PATCH 0405/3617] Initial work to make paramiko connections work under v2 --- v2/ansible/executor/connection_info.py | 17 +- v2/ansible/executor/process/worker.py | 29 ++- v2/ansible/executor/task_executor.py | 5 +- v2/ansible/executor/task_queue_manager.py | 13 +- v2/ansible/plugins/connections/__init__.py | 4 +- .../plugins/connections/paramiko_ssh.py | 221 ++++++++---------- v2/ansible/plugins/connections/ssh.py | 2 +- 7 files changed, 124 insertions(+), 167 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index e036342c191116..cf5763ba818dcc 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -44,12 +44,13 @@ def __init__(self, play=None, options=None, passwords=None): passwords = {} # connection - self.connection = None - self.remote_addr = None - self.remote_user = None - self.password = passwords.get('conn_pass','') - self.port = None - self.private_key_file = None + self.connection = None + self.remote_addr = None + self.remote_user = None + self.password = passwords.get('conn_pass','') + self.port = 22 + self.private_key_file = C.DEFAULT_PRIVATE_KEY_FILE + self.timeout = C.DEFAULT_TIMEOUT # privilege escalation self.become = None @@ -119,9 +120,7 @@ def set_options(self, options): self.connection = options.connection self.remote_user = options.remote_user - #if 'port' in options and options.port is not None: - # self.port = options.port - self.private_key_file = None + self.private_key_file = options.private_key_file # privilege escalation self.become = options.become diff --git a/v2/ansible/executor/process/worker.py b/v2/ansible/executor/process/worker.py index f24e6abd5e0f47..7a75af146ef36c 100644 --- a/v2/ansible/executor/process/worker.py +++ b/v2/ansible/executor/process/worker.py @@ -51,7 +51,7 @@ class WorkerProcess(multiprocessing.Process): for reading later. ''' - def __init__(self, tqm, main_q, rslt_q, loader, new_stdin): + def __init__(self, tqm, main_q, rslt_q, loader): # takes a task queue manager as the sole param: self._main_q = main_q @@ -59,23 +59,20 @@ def __init__(self, tqm, main_q, rslt_q, loader, new_stdin): self._loader = loader # dupe stdin, if we have one + self._new_stdin = sys.stdin try: fileno = sys.stdin.fileno() + if fileno is not None: + try: + self._new_stdin = os.fdopen(os.dup(fileno)) + except OSError, e: + # couldn't dupe stdin, most likely because it's + # not a valid file descriptor, so we just rely on + # using the one that was passed in + pass except ValueError: - fileno = None - - self._new_stdin = new_stdin - if not new_stdin and fileno is not None: - try: - self._new_stdin = os.fdopen(os.dup(fileno)) - except OSError, e: - # couldn't dupe stdin, most likely because it's - # not a valid file descriptor, so we just rely on - # using the one that was passed in - pass - - if self._new_stdin: - sys.stdin = self._new_stdin + # couldn't get stdin's fileno, so we just carry on + pass super(WorkerProcess, self).__init__() @@ -118,7 +115,7 @@ def run(self): # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (host, task)) - executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._loader, module_loader).run() + executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._new_stdin, self._loader, module_loader).run() debug("done running TaskExecutor() for %s/%s" % (host, task)) task_result = TaskResult(host, task, executor_result) diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 0c57a42857d862..e011792cbec105 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -45,11 +45,12 @@ class TaskExecutor: class. ''' - def __init__(self, host, task, job_vars, connection_info, loader, module_loader): + def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, module_loader): self._host = host self._task = task self._job_vars = job_vars self._connection_info = connection_info + self._new_stdin = new_stdin self._loader = loader self._module_loader = module_loader @@ -370,7 +371,7 @@ def _get_connection(self, variables): if conn_type == 'smart': conn_type = 'ssh' - connection = connection_loader.get(conn_type, self._connection_info) + connection = connection_loader.get(conn_type, self._connection_info, self._new_stdin) if not connection: raise AnsibleError("the connection plugin '%s' was not found" % conn_type) diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index a5c2920aec5e88..e13930c6df8414 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -87,21 +87,10 @@ def __init__(self, inventory, callback, variable_manager, loader, display, optio self._workers = [] for i in range(self._options.forks): - # duplicate stdin, if possible - new_stdin = None - if fileno is not None: - try: - new_stdin = os.fdopen(os.dup(fileno)) - except OSError: - # couldn't dupe stdin, most likely because it's - # not a valid file descriptor, so we just rely on - # using the one that was passed in - pass - main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() - prc = WorkerProcess(self, main_q, rslt_q, loader, new_stdin) + prc = WorkerProcess(self, main_q, rslt_q, loader) prc.start() self._workers.append((prc, main_q, rslt_q)) diff --git a/v2/ansible/plugins/connections/__init__.py b/v2/ansible/plugins/connections/__init__.py index 8f84e6a01ac4ce..d11f3651827304 100644 --- a/v2/ansible/plugins/connections/__init__.py +++ b/v2/ansible/plugins/connections/__init__.py @@ -43,10 +43,12 @@ class ConnectionBase: has_pipelining = False become_methods = C.BECOME_METHODS - def __init__(self, connection_info, *args, **kwargs): + def __init__(self, connection_info, new_stdin, *args, **kwargs): # All these hasattrs allow subclasses to override these parameters if not hasattr(self, '_connection_info'): self._connection_info = connection_info + if not hasattr(self, '_new_stdin'): + self._new_stdin = new_stdin if not hasattr(self, '_display'): self._display = Display(verbosity=connection_info.verbosity) if not hasattr(self, '_connected'): diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/v2/ansible/plugins/connections/paramiko_ssh.py index 8eaf97c3f6d2ec..256578a0d70212 100644 --- a/v2/ansible/plugins/connections/paramiko_ssh.py +++ b/v2/ansible/plugins/connections/paramiko_ssh.py @@ -34,12 +34,13 @@ import fcntl import re import sys + from termios import tcflush, TCIFLUSH from binascii import hexlify -from ansible.callbacks import vvv -from ansible import errors -from ansible import utils + from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.plugins.connections import ConnectionBase AUTHENTICITY_MSG=""" paramiko: The authenticity of host '%s' can't be established. @@ -67,33 +68,38 @@ class MyAddPolicy(object): local L{HostKeys} object, and saving it. This is used by L{SSHClient}. """ - def __init__(self, runner): - self.runner = runner + def __init__(self, new_stdin): + self._new_stdin = new_stdin def missing_host_key(self, client, hostname, key): if C.HOST_KEY_CHECKING: - fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) - fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) + # FIXME: need to fix lock file stuff + #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) + #fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) old_stdin = sys.stdin - sys.stdin = self.runner._new_stdin - fingerprint = hexlify(key.get_fingerprint()) - ktype = key.get_name() + sys.stdin = self._new_stdin # clear out any premature input on sys.stdin tcflush(sys.stdin, TCIFLUSH) + fingerprint = hexlify(key.get_fingerprint()) + ktype = key.get_name() + inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint)) sys.stdin = old_stdin + if inp not in ['yes','y','']: - fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN) - fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN) - raise errors.AnsibleError("host connection rejected by user") + # FIXME: lock file stuff + #fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN) + #fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN) + raise AnsibleError("host connection rejected by user") - fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN) - fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) + # FIXME: lock file stuff + #fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN) + #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) key._added_by_ansible_this_time = True @@ -110,28 +116,18 @@ def missing_host_key(self, client, hostname, key): SSH_CONNECTION_CACHE = {} SFTP_CONNECTION_CACHE = {} -class Connection(object): +class Connection(ConnectionBase): ''' SSH based connections with Paramiko ''' - def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs): - - self.ssh = None - self.sftp = None - self.runner = runner - self.host = host - self.port = port or 22 - self.user = user - self.password = password - self.private_key_file = private_key_file - self.has_pipelining = False - - # TODO: add pbrun, pfexec - self.become_methods_supported=['sudo', 'su', 'pbrun'] + @property + def transport(self): + ''' used to identify this connection object from other classes ''' + return 'paramiko' def _cache_key(self): - return "%s__%s__" % (self.host, self.user) + return "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user) - def connect(self): + def _connect(self): cache_key = self._cache_key() if cache_key in SSH_CONNECTION_CACHE: self.ssh = SSH_CONNECTION_CACHE[cache_key] @@ -143,9 +139,9 @@ def _connect_uncached(self): ''' activates the connection object ''' if not HAVE_PARAMIKO: - raise errors.AnsibleError("paramiko is not installed") + raise AnsibleError("paramiko is not installed") - vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host) + self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._connection_info.remote_user, self._connection_info.port, self._connection_info.remote_addr), host=self._connection_info.remote_addr) ssh = paramiko.SSHClient() @@ -154,122 +150,95 @@ def _connect_uncached(self): if C.HOST_KEY_CHECKING: ssh.load_system_host_keys() - ssh.set_missing_host_key_policy(MyAddPolicy(self.runner)) + ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin)) allow_agent = True - if self.password is not None: + if self._connection_info.password is not None: allow_agent = False try: - - if self.private_key_file: - key_filename = os.path.expanduser(self.private_key_file) - elif self.runner.private_key_file: - key_filename = os.path.expanduser(self.runner.private_key_file) - else: - key_filename = None - ssh.connect(self.host, username=self.user, allow_agent=allow_agent, look_for_keys=True, - key_filename=key_filename, password=self.password, - timeout=self.runner.timeout, port=self.port) - + key_filename = None + if self._connection_info.private_key_file: + key_filename = os.path.expanduser(self._connection_info.private_key_file) + + ssh.connect( + self._connection_info.remote_addr, + username=self._connection_info.remote_user, + allow_agent=allow_agent, + look_for_keys=True, + key_filename=key_filename, + password=self._connection_info.password, + timeout=self._connection_info.timeout, + port=self._connection_info.port + ) except Exception, e: - msg = str(e) if "PID check failed" in msg: - raise errors.AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible") + raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible") elif "Private key file is encrypted" in msg: msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u .' % ( - self.user, self.host, self.port, msg) - raise errors.AnsibleConnectionFailed(msg) + self._connection_info.remote_user, self._connection_info.remote_addr, self._connection_info.port, msg) + raise AnsibleConnectionFailure(msg) else: - raise errors.AnsibleConnectionFailed(msg) + raise AnsibleConnectionFailure(msg) return ssh - def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' - if self.runner.become and sudoable and self.runner.become_method not in self.become_methods_supported: - raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) - if in_data: - raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + raise AnsibleError("Internal Error: this module does not support optimized module pipelining") bufsize = 4096 try: - self.ssh.get_transport().set_keepalive(5) chan = self.ssh.get_transport().open_session() - except Exception, e: - msg = "Failed to open session" if len(str(e)) > 0: msg += ": %s" % str(e) - raise errors.AnsibleConnectionFailed(msg) + raise AnsibleConnectionFailure(msg) - no_prompt_out = '' - no_prompt_err = '' - if not (self.runner.become and sudoable): - - if executable: - quoted_command = executable + ' -c ' + pipes.quote(cmd) - else: - quoted_command = cmd - vvv("EXEC %s" % quoted_command, host=self.host) - chan.exec_command(quoted_command) - - else: - - # sudo usually requires a PTY (cf. requiretty option), therefore - # we give it one by default (pty=True in ansble.cfg), and we try - # to initialise from the calling environment - if C.PARAMIKO_PTY: - chan.get_pty(term=os.getenv('TERM', 'vt100'), - width=int(os.getenv('COLUMNS', 0)), - height=int(os.getenv('LINES', 0))) - if self.runner.become and sudoable: - shcmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe) - - vvv("EXEC %s" % shcmd, host=self.host) - become_output = '' - - try: + # sudo usually requires a PTY (cf. requiretty option), therefore + # we give it one by default (pty=True in ansble.cfg), and we try + # to initialise from the calling environment + if C.PARAMIKO_PTY: + chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0))) - chan.exec_command(shcmd) + self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr) - if self.runner.become_pass: - - while True: - - if success_key in become_output or \ - (prompt and become_output.endswith(prompt)) or \ - utils.su_prompts.check_su_prompt(become_output): - break - chunk = chan.recv(bufsize) - - if not chunk: - if 'unknown user' in become_output: - raise errors.AnsibleError( - 'user %s does not exist' % become_user) - else: - raise errors.AnsibleError('ssh connection ' + - 'closed waiting for password prompt') - become_output += chunk - - if success_key not in become_output: - - if sudoable: - chan.sendall(self.runner.become_pass + '\n') - else: - no_prompt_out += become_output - no_prompt_err += become_output - - except socket.timeout: + no_prompt_out = '' + no_prompt_err = '' + become_output = '' - raise errors.AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output) + try: + chan.exec_command(cmd) + if self._connection_info.become_pass: + while True: + if success_key in become_output or \ + (prompt and become_output.endswith(prompt)) or \ + utils.su_prompts.check_su_prompt(become_output): + break + chunk = chan.recv(bufsize) + if not chunk: + if 'unknown user' in become_output: + raise AnsibleError( + 'user %s does not exist' % become_user) + else: + raise AnsibleError('ssh connection ' + + 'closed waiting for password prompt') + become_output += chunk + if success_key not in become_output: + if self._connection_info.become: + chan.sendall(self._connection_info.become_pass + '\n') + else: + no_prompt_out += become_output + no_prompt_err += become_output + except socket.timeout: + raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output) stdout = ''.join(chan.makefile('rb', bufsize)) stderr = ''.join(chan.makefile_stderr('rb', bufsize)) @@ -279,24 +248,24 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' - vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) + raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) try: self.sftp = self.ssh.open_sftp() except Exception, e: - raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e) + raise AnsibleError("failed to open a SFTP connection (%s)" % e) try: self.sftp.put(in_path, out_path) except IOError: - raise errors.AnsibleError("failed to transfer file to %s" % out_path) + raise AnsibleError("failed to transfer file to %s" % out_path) def _connect_sftp(self): - cache_key = "%s__%s__" % (self.host, self.user) + cache_key = "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user) if cache_key in SFTP_CONNECTION_CACHE: return SFTP_CONNECTION_CACHE[cache_key] else: @@ -306,17 +275,17 @@ def _connect_sftp(self): def fetch_file(self, in_path, out_path): ''' save a remote file to the specified path ''' - vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) try: self.sftp = self._connect_sftp() except Exception, e: - raise errors.AnsibleError("failed to open a SFTP connection (%s)", e) + raise AnsibleError("failed to open a SFTP connection (%s)", e) try: self.sftp.get(in_path, out_path) except IOError: - raise errors.AnsibleError("failed to transfer file from %s" % in_path) + raise AnsibleError("failed to transfer file from %s" % in_path) def _any_keys_added(self): diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index 1d54d3ba48c7b6..de7e923da70cd4 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -50,7 +50,7 @@ def __init__(self, connection_info, *args, **kwargs): self._cp_dir = '/tmp' #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) - super(Connection, self).__init__(connection_info) + super(Connection, self).__init__(connection_info, *args, **kwargs) @property def transport(self): From 31520cdd178246f94921ba9d9866abf23b28e252 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 24 Apr 2015 18:58:57 +0200 Subject: [PATCH 0406/3617] cloudstack: fix other projects not found --- lib/ansible/module_utils/cloudstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 2c891434bdebea..627ef9655e2ab9 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -77,7 +77,7 @@ def get_project_id(self): if not project: return None - projects = self.cs.listProjects() + projects = self.cs.listProjects(listall=True) if projects: for p in projects['project']: if project in [ p['name'], p['displaytext'], p['id'] ]: From 88540d3cdcef13775664b83b717b32c41137dd38 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 24 Apr 2015 19:04:33 +0200 Subject: [PATCH 0407/3617] cloudstack: add _get_by_key() to utils Generic method to get the whole dict or just a singe value by key if found. --- lib/ansible/module_utils/cloudstack.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 627ef9655e2ab9..9ef9d229ba7a49 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -69,6 +69,14 @@ def _connect(self): self.cs = CloudStack(**read_config()) + def _get_by_key(self, key=None, my_dict={}): + if key: + if key in my_dict: + return my_dict[key] + self.module.fail_json(msg="Something went wrong: %s not found" % key) + return my_dict + + def get_project_id(self): if self.project_id: return self.project_id From 765c8fe36871751c2d5d8c2d1c9362d5b571629d Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 24 Apr 2015 19:09:30 +0200 Subject: [PATCH 0408/3617] cloudstack: use _get_by_key in get_...() methods in utils But also add backward compatibility for existing modules in extras. --- lib/ansible/module_utils/cloudstack.py | 85 +++++++++++++++++--------- 1 file changed, 55 insertions(+), 30 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 9ef9d229ba7a49..d98d00b76ce7e4 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -44,11 +44,11 @@ def __init__(self, module): self.module = module self._connect() - self.project_id = None - self.ip_address_id = None - self.zone_id = None - self.vm_id = None - self.os_type_id = None + self.project = None + self.ip_address = None + self.zone = None + self.vm = None + self.os_type = None self.hypervisor = None @@ -77,9 +77,14 @@ def _get_by_key(self, key=None, my_dict={}): return my_dict + # TODO: for backward compatibility only, remove if not used anymore def get_project_id(self): - if self.project_id: - return self.project_id + return get_project(key='id') + + + def get_project(self, key=None): + if self.project: + return self._get_by_key(key, self.project) project = self.module.params.get('project') if not project: @@ -89,14 +94,19 @@ def get_project_id(self): if projects: for p in projects['project']: if project in [ p['name'], p['displaytext'], p['id'] ]: - self.project_id = p['id'] - return self.project_id + self.project = p + return self._get_by_key(key, self.project) self.module.fail_json(msg="project '%s' not found" % project) + # TODO: for backward compatibility only, remove if not used anymore def get_ip_address_id(self): - if self.ip_address_id: - return self.ip_address_id + return get_ip_address(key='id') + + + def get_ip_address(self, key=None): + if self.ip_address: + return self._get_by_key(key, self.ip_address) ip_address = self.module.params.get('ip_address') if not ip_address: @@ -104,58 +114,73 @@ def get_ip_address_id(self): args = {} args['ipaddress'] = ip_address - args['projectid'] = self.get_project_id() + args['projectid'] = self.get_project(key='id') ip_addresses = self.cs.listPublicIpAddresses(**args) if not ip_addresses: self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress']) - self.ip_address_id = ip_addresses['publicipaddress'][0]['id'] - return self.ip_address_id + self.ip_address = ip_addresses['publicipaddress'][0] + return self._get_by_key(key, self.ip_address) + # TODO: for backward compatibility only, remove if not used anymore def get_vm_id(self): - if self.vm_id: - return self.vm_id + return get_vm(key='id') + + + def get_vm(self, key=None): + if self.vm: + return self._get_by_key(key, self.vm) vm = self.module.params.get('vm') if not vm: self.module.fail_json(msg="Virtual machine param 'vm' is required") args = {} - args['projectid'] = self.get_project_id() + args['projectid'] = self.get_project(key='id') vms = self.cs.listVirtualMachines(**args) if vms: for v in vms['virtualmachine']: - if vm in [ v['displayname'], v['name'], v['id'] ]: - self.vm_id = v['id'] - return self.vm_id + if vm in [ v['name'], v['displayname'], v['id'] ]: + self.vm = v + return self._get_by_key(key, self.vm) self.module.fail_json(msg="Virtual machine '%s' not found" % vm) + # TODO: for backward compatibility only, remove if not used anymore def get_zone_id(self): - if self.zone_id: - return self.zone_id + return get_zone(key='id') + + + def get_zone(self, key=None): + if self.zone: + return self._get_by_key(key, self.zone) zone = self.module.params.get('zone') zones = self.cs.listZones() # use the first zone if no zone param given if not zone: - self.zone_id = zones['zone'][0]['id'] - return self.zone_id + self.zone = zones['zone'][0] + return self._get_by_key(key, self.zone) if zones: for z in zones['zone']: if zone in [ z['name'], z['id'] ]: - self.zone_id = z['id'] - return self.zone_id + self.zone = z + return self._get_by_key(key, self.zone) self.module.fail_json(msg="zone '%s' not found" % zone) + # TODO: for backward compatibility only, remove if not used anymore def get_os_type_id(self): - if self.os_type_id: - return self.os_type_id + return get_os_type(key='id') + + + def get_os_type(self, key=None): + if self.os_type: + return self._get_by_key(key, self.zone) os_type = self.module.params.get('os_type') if not os_type: @@ -165,8 +190,8 @@ def get_os_type_id(self): if os_types: for o in os_types['ostype']: if os_type in [ o['description'], o['id'] ]: - self.os_type_id = o['id'] - return self.os_type_id + self.os_type = o + return self._get_by_key(key, self.os_type) self.module.fail_json(msg="OS type '%s' not found" % os_type) From 6354ca07189e7d21a31722f6216231f61221c995 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 24 Apr 2015 19:16:42 +0200 Subject: [PATCH 0409/3617] cloudstack: add _has_changed() to utils Generic method to compare values in dict. --- lib/ansible/module_utils/cloudstack.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index d98d00b76ce7e4..afffb061f55296 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -69,6 +69,27 @@ def _connect(self): self.cs = CloudStack(**read_config()) + def _has_changed(self, want_dict, current_dict, only_keys=None): + for key, value in want_dict.iteritems(): + + # Optionally limit by a list of keys + if only_keys and key not in only_keys: + continue; + + if key in current_dict: + + # API returns string for int in some cases, just to make sure + if isinstance(value, int): + current_dict[key] = int(current_dict[key]) + elif isinstance(value, str): + current_dict[key] = str(current_dict[key]) + + # Only need to detect a singe change, not every item + if value != current_dict[key]: + return True + return False + + def _get_by_key(self, key=None, my_dict={}): if key: if key in my_dict: From 3c0e406f5db4c61dd38e505061145b4f1e02f518 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 24 Apr 2015 20:25:19 +0200 Subject: [PATCH 0410/3617] cloudstack: fix missing self. in cloudstack utils --- lib/ansible/module_utils/cloudstack.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index afffb061f55296..74afc79836103e 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -100,7 +100,7 @@ def _get_by_key(self, key=None, my_dict={}): # TODO: for backward compatibility only, remove if not used anymore def get_project_id(self): - return get_project(key='id') + return self.get_project(key='id') def get_project(self, key=None): @@ -122,7 +122,7 @@ def get_project(self, key=None): # TODO: for backward compatibility only, remove if not used anymore def get_ip_address_id(self): - return get_ip_address(key='id') + return self.get_ip_address(key='id') def get_ip_address(self, key=None): @@ -147,7 +147,7 @@ def get_ip_address(self, key=None): # TODO: for backward compatibility only, remove if not used anymore def get_vm_id(self): - return get_vm(key='id') + return self.get_vm(key='id') def get_vm(self, key=None): @@ -171,7 +171,7 @@ def get_vm(self, key=None): # TODO: for backward compatibility only, remove if not used anymore def get_zone_id(self): - return get_zone(key='id') + return self.get_zone(key='id') def get_zone(self, key=None): @@ -196,7 +196,7 @@ def get_zone(self, key=None): # TODO: for backward compatibility only, remove if not used anymore def get_os_type_id(self): - return get_os_type(key='id') + return self.get_os_type(key='id') def get_os_type(self, key=None): From 1674b474450c81a05c25d214a12984006a61c302 Mon Sep 17 00:00:00 2001 From: Paul Logston Date: Fri, 24 Apr 2015 21:14:06 -0400 Subject: [PATCH 0411/3617] Make ec2 inventory plugin Python 3 compatible --- plugins/inventory/ec2.py | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 76871b0266dba0..7df08b240bc366 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -122,7 +122,13 @@ from boto import ec2 from boto import rds from boto import route53 -import ConfigParser +import six + +try: + import ConfigParser as configparser +except: + import configparser + from collections import defaultdict try: @@ -166,7 +172,7 @@ def __init__(self): else: data_to_print = self.json_format_dict(self.inventory, True) - print data_to_print + print(data_to_print) def is_cache_valid(self): @@ -184,8 +190,10 @@ def is_cache_valid(self): def read_settings(self): ''' Reads the settings from the ec2.ini file ''' - - config = ConfigParser.SafeConfigParser() + if six.PY2: + config = configparser.SafeConfigParser() + else: + config = configparser.ConfigParser() ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) config.read(ec2_ini_path) @@ -282,7 +290,7 @@ def read_settings(self): self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None - except ConfigParser.NoOptionError, e: + except configparser.NoOptionError as e: self.pattern_include = None # Do we need to exclude hosts that match a pattern? @@ -292,7 +300,7 @@ def read_settings(self): self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None - except ConfigParser.NoOptionError, e: + except configparser.NoOptionError as e: self.pattern_exclude = None # Instance filters (see boto and EC2 API docs). Ignore invalid filters. @@ -354,7 +362,7 @@ def get_instances_by_region(self, region): conn = self.connect(region) reservations = [] if self.ec2_instance_filters: - for filter_key, filter_values in self.ec2_instance_filters.iteritems(): + for filter_key, filter_values in self.ec2_instance_filters.items(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() @@ -363,7 +371,7 @@ def get_instances_by_region(self, region): for instance in reservation.instances: self.add_instance(instance, region) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: @@ -381,7 +389,7 @@ def get_rds_instances_by_region(self, region): instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) - except boto.exception.BotoServerError, e: + except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': @@ -515,7 +523,7 @@ def add_instance(self, instance, region): # Inventory: Group by tag keys if self.group_by_tag_keys: - for k, v in instance.tags.iteritems(): + for k, v in instance.tags.items(): key = self.to_safe("tag_" + k + "=" + v) self.push(self.inventory, key, dest) if self.nested_groups: @@ -690,7 +698,9 @@ def get_host_info_dict_from_instance(self, instance): instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif type(value) in [int, bool]: instance_vars[key] = value - elif type(value) in [str, unicode]: + elif six.PY2 and type(value) in [str, unicode]: + instance_vars[key] = value.strip() + elif six.PY3 and type(value) in [str]: instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' @@ -699,7 +709,7 @@ def get_host_info_dict_from_instance(self, instance): elif key == 'ec2__placement': instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': - for k, v in value.iteritems(): + for k, v in value.items(): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': From ca88189bf765a1f519733706e299f45bd2dc3ccd Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 25 Apr 2015 18:31:58 +0200 Subject: [PATCH 0412/3617] cloudstack: add method to to get infos of API get_capabilities() allows you to get infos e.g. `cloudstackversion` to compare functionality of the API in your modules. --- lib/ansible/module_utils/cloudstack.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 74afc79836103e..48f16a13992e93 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -50,6 +50,7 @@ def __init__(self, module): self.vm = None self.os_type = None self.hypervisor = None + self.capabilities = None def _connect(self): @@ -235,6 +236,14 @@ def get_hypervisor(self): self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor) + def get_capabilities(self, key=None): + if self.capabilities: + return self._get_by_key(key, self.capabilities) + capabilities = self.cs.listCapabilities() + self.capabilities = capabilities['capability'] + return self._get_by_key(key, self.capabilities) + + def _poll_job(self, job=None, key=None): if 'jobid' in job: while True: From 50932ce556d87606d417c6c042f653ab4f64be5e Mon Sep 17 00:00:00 2001 From: Paul Logston Date: Sat, 25 Apr 2015 17:06:01 -0400 Subject: [PATCH 0413/3617] Use six.moves to import configparser --- plugins/inventory/ec2.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 7df08b240bc366..ad92c16b9df374 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -124,11 +124,7 @@ from boto import route53 import six -try: - import ConfigParser as configparser -except: - import configparser - +from six.moves import configparser from collections import defaultdict try: From e8768b2b87f211467de67b8be1e3d218f9c46404 Mon Sep 17 00:00:00 2001 From: Paul Logston Date: Sat, 25 Apr 2015 19:45:22 -0400 Subject: [PATCH 0414/3617] Use six to check for string_types --- plugins/inventory/ec2.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index ad92c16b9df374..16ac93f5ee4827 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -694,9 +694,7 @@ def get_host_info_dict_from_instance(self, instance): instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif type(value) in [int, bool]: instance_vars[key] = value - elif six.PY2 and type(value) in [str, unicode]: - instance_vars[key] = value.strip() - elif six.PY3 and type(value) in [str]: + elif isinstance(value, six.string_types): instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' From 2f255f5b967ac4d8ddba53af21adf192f2330a53 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 26 Apr 2015 23:09:33 +0200 Subject: [PATCH 0415/3617] cloudstack: get_vm(): fix missing zone Fixes returning wrong VM having identical name in different zone. --- lib/ansible/module_utils/cloudstack.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 48f16a13992e93..0c7da28e2a7798 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -161,6 +161,7 @@ def get_vm(self, key=None): args = {} args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') vms = self.cs.listVirtualMachines(**args) if vms: for v in vms['virtualmachine']: From 3aede800c575989b7c7f2b18e2818b5b4fdf4fd2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Apr 2015 00:28:25 -0500 Subject: [PATCH 0416/3617] Fixing winrm connection for v2 --- v2/ansible/executor/connection_info.py | 2 +- v2/ansible/plugins/action/__init__.py | 22 +-- .../plugins/connections/paramiko_ssh.py | 7 +- v2/ansible/plugins/connections/ssh.py | 4 +- v2/ansible/plugins/connections/winrm.py | 135 +++++++++--------- v2/ansible/plugins/shell/powershell.py | 75 +++++----- v2/ansible/utils/display.py | 3 + 7 files changed, 124 insertions(+), 124 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index cf5763ba818dcc..05fd5e8784cffe 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -48,7 +48,7 @@ def __init__(self, play=None, options=None, passwords=None): self.remote_addr = None self.remote_user = None self.password = passwords.get('conn_pass','') - self.port = 22 + self.port = None self.private_key_file = C.DEFAULT_PRIVATE_KEY_FILE self.timeout = C.DEFAULT_TIMEOUT diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index c49ac8e6f00d2b..aead23503719b3 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -56,22 +56,12 @@ def __init__(self, task, connection, connection_info, loader, module_loader): def get_shell(self): - # FIXME: no more inject, get this from the host variables? - #default_shell = getattr(self._connection, 'default_shell', '') - #shell_type = inject.get('ansible_shell_type') - #if not shell_type: - # if default_shell: - # shell_type = default_shell - # else: - # shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) - - shell_type = getattr(self._connection, 'default_shell', '') - if not shell_type: - shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) - - shell_plugin = shell_loader.get(shell_type) - if shell_plugin is None: - shell_plugin = shell_loader.get('sh') + if hasattr(self._connection, '_shell'): + shell_plugin = getattr(self._connection, '_shell', '') + else: + shell_plugin = shell_loader.get(os.path.basename(C.DEFAULT_EXECUTABLE)) + if shell_plugin is None: + shell_plugin = shell_loader.get('sh') return shell_plugin diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/v2/ansible/plugins/connections/paramiko_ssh.py index 256578a0d70212..a2b961bd686609 100644 --- a/v2/ansible/plugins/connections/paramiko_ssh.py +++ b/v2/ansible/plugins/connections/paramiko_ssh.py @@ -141,7 +141,8 @@ def _connect_uncached(self): if not HAVE_PARAMIKO: raise AnsibleError("paramiko is not installed") - self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._connection_info.remote_user, self._connection_info.port, self._connection_info.remote_addr), host=self._connection_info.remote_addr) + port = self._connection_info.port or 22 + self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._connection_info.remote_user, port, self._connection_info.remote_addr), host=self._connection_info.remote_addr) ssh = paramiko.SSHClient() @@ -170,7 +171,7 @@ def _connect_uncached(self): key_filename=key_filename, password=self._connection_info.password, timeout=self._connection_info.timeout, - port=self._connection_info.port + port=port, ) except Exception, e: msg = str(e) @@ -178,7 +179,7 @@ def _connect_uncached(self): raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible") elif "Private key file is encrypted" in msg: msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u .' % ( - self._connection_info.remote_user, self._connection_info.remote_addr, self._connection_info.port, msg) + self._connection_info.remote_user, self._connection_info.remote_addr, port, msg) raise AnsibleConnectionFailure(msg) else: raise AnsibleConnectionFailure(msg) diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index de7e923da70cd4..cc5b321d143699 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -39,7 +39,7 @@ class Connection(ConnectionBase): ''' ssh based connections ''' - def __init__(self, connection_info, *args, **kwargs): + def __init__(self, *args, **kwargs): # SSH connection specific init stuff self.HASHED_KEY_MAGIC = "|1|" self._has_pipelining = True @@ -50,7 +50,7 @@ def __init__(self, connection_info, *args, **kwargs): self._cp_dir = '/tmp' #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) - super(Connection, self).__init__(connection_info, *args, **kwargs) + super(Connection, self).__init__(*args, **kwargs) @property def transport(self): diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py index b41a74c8e1f994..833358d58c1013 100644 --- a/v2/ansible/plugins/connections/winrm.py +++ b/v2/ansible/plugins/connections/winrm.py @@ -23,17 +23,13 @@ import shlex import traceback import urlparse -from ansible import errors -from ansible import utils -from ansible.callbacks import vvv, vvvv, verbose -from ansible.runner.shell_plugins import powershell try: from winrm import Response from winrm.exceptions import WinRMTransportError from winrm.protocol import Protocol except ImportError: - raise errors.AnsibleError("winrm is not installed") + raise AnsibleError("winrm is not installed") HAVE_KERBEROS = False try: @@ -42,10 +38,12 @@ except ImportError: pass -def vvvvv(msg, host=None): - verbose(msg, host=host, caplevel=4) +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.plugins.connections import ConnectionBase +from ansible.plugins import shell_loader -class Connection(object): +class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' transport_schemes = { @@ -53,69 +51,79 @@ class Connection(object): 'https': [('kerberos', 'https'), ('plaintext', 'https')], } - def __init__(self, runner, host, port, user, password, *args, **kwargs): - self.runner = runner - self.host = host - self.port = port - self.user = user - self.password = password - self.has_pipelining = False - self.default_shell = 'powershell' + def __init__(self, *args, **kwargs): + + self.has_pipelining = False self.default_suffixes = ['.ps1', ''] - self.protocol = None - self.shell_id = None - self.delegate = None + self.protocol = None + self.shell_id = None + self.delegate = None + + self._shell = shell_loader.get('powershell') - # Add runas support - #self.become_methods_supported=['runas'] + # TODO: Add runas support self.become_methods_supported=[] + super(Connection, self).__init__(*args, **kwargs) + + @property + def transport(self): + ''' used to identify this connection object from other classes ''' + return 'winrm' + def _winrm_connect(self): ''' Establish a WinRM connection over HTTP/HTTPS. ''' - port = self.port or 5986 - vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \ - (self.user, port, self.host), host=self.host) - netloc = '%s:%d' % (self.host, port) + port = self._connection_info.port or 5986 + self._display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \ + (self._connection_info.remote_user, port, self._connection_info.remote_addr), host=self._connection_info.remote_addr) + netloc = '%s:%d' % (self._connection_info.remote_addr, port) exc = None for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']: - if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user): + if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self._connection_info.remote_user): continue + if transport == 'kerberos': - realm = self.user.split('@', 1)[1].strip() or None + realm = self._connection_info.remote_user.split('@', 1)[1].strip() or None else: realm = None + endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', '')) - vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), - host=self.host) - protocol = Protocol(endpoint, transport=transport, - username=self.user, password=self.password, - realm=realm) + + self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._connection_info.remote_addr) + protocol = Protocol( + endpoint, + transport=transport, + username=self._connection_info.remote_user, + password=self._connection_info.password, + realm=realm + ) + try: protocol.send_message('') return protocol except WinRMTransportError, exc: err_msg = str(exc) if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I): - raise errors.AnsibleError("the connection attempt timed out") + raise AnsibleError("the connection attempt timed out") m = re.search(r'Code\s+?(\d{3})', err_msg) if m: code = int(m.groups()[0]) if code == 401: - raise errors.AnsibleError("the username/password specified for this server was incorrect") + raise AnsibleError("the username/password specified for this server was incorrect") elif code == 411: return protocol - vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host) + self._display.vvvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self._connection_info.remote_addr) continue if exc: - raise errors.AnsibleError(str(exc)) + raise AnsibleError(str(exc)) def _winrm_exec(self, command, args=(), from_exec=False): if from_exec: - vvvv("WINRM EXEC %r %r" % (command, args), host=self.host) + self._display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._connection_info.remote_addr) else: - vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host) + self._display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._connection_info.remote_addr) if not self.protocol: self.protocol = self._winrm_connect() if not self.shell_id: @@ -125,49 +133,46 @@ def _winrm_exec(self, command, args=(), from_exec=False): command_id = self.protocol.run_command(self.shell_id, command, args) response = Response(self.protocol.get_command_output(self.shell_id, command_id)) if from_exec: - vvvv('WINRM RESULT %r' % response, host=self.host) + self._display.vvvvv('WINRM RESULT %r' % response, host=self._connection_info.remote_addr) else: - vvvvv('WINRM RESULT %r' % response, host=self.host) - vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host) - vvvvv('WINRM STDERR %s' % response.std_err, host=self.host) + self._display.vvvvvv('WINRM RESULT %r' % response, host=self._connection_info.remote_addr) + self._display.vvvvvv('WINRM STDOUT %s' % response.std_out, host=self._connection_info.remote_addr) + self._display.vvvvvv('WINRM STDERR %s' % response.std_err, host=self._connection_info.remote_addr) return response finally: if command_id: self.protocol.cleanup_command(self.shell_id, command_id) - def connect(self): + def _connect(self): if not self.protocol: self.protocol = self._winrm_connect() return self - def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None): - - if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: - raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) + def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): cmd = cmd.encode('utf-8') cmd_parts = shlex.split(cmd, posix=False) if '-EncodedCommand' in cmd_parts: encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1] decoded_cmd = base64.b64decode(encoded_cmd) - vvv("EXEC %s" % decoded_cmd, host=self.host) + self._display.vvv("EXEC %s" % decoded_cmd, host=self._connection_info.remote_addr) else: - vvv("EXEC %s" % cmd, host=self.host) + self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr) # For script/raw support. if cmd_parts and cmd_parts[0].lower().endswith('.ps1'): - script = powershell._build_file_cmd(cmd_parts, quote_args=False) - cmd_parts = powershell._encode_script(script, as_list=True) + script = self._shell._build_file_cmd(cmd_parts, quote_args=False) + cmd_parts = self._shell._encode_script(script, as_list=True) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) except Exception, e: traceback.print_exc() - raise errors.AnsibleError("failed to exec cmd %s" % cmd) + raise AnsibleError("failed to exec cmd %s" % cmd) return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8')) def put_file(self, in_path, out_path): - vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) + raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) with open(in_path) as in_file: in_size = os.path.getsize(in_path) script_template = ''' @@ -179,8 +184,8 @@ def put_file(self, in_path, out_path): [void]$s.Close(); ''' # Determine max size of data we can pass per command. - script = script_template % (powershell._escape(out_path), in_size, '', in_size) - cmd = powershell._encode_script(script) + script = script_template % (self._shell._escape(out_path), in_size, '', in_size) + cmd = self._shell._encode_script(script) # Encode script with no data, subtract its length from 8190 (max # windows command length), divide by 2.67 (UTF16LE base64 command # encoding), then by 1.35 again (data base64 encoding). @@ -192,19 +197,19 @@ def put_file(self, in_path, out_path): if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'): out_path = out_path + '.ps1' b64_data = base64.b64encode(out_data) - script = script_template % (powershell._escape(out_path), offset, b64_data, in_size) - vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host) - cmd_parts = powershell._encode_script(script, as_list=True) + script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size) + self._display.vvvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self._connection_info.remote_addr) + cmd_parts = self._shell._encode_script(script, as_list=True) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(result.std_err.encode('utf-8')) except Exception: traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) + raise AnsibleError("failed to transfer file to %s" % out_path) def fetch_file(self, in_path, out_path): out_path = out_path.replace('\\', '/') - vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) buffer_size = 2**19 # 0.5MB chunks if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) @@ -233,9 +238,9 @@ def fetch_file(self, in_path, out_path): Write-Error "%(path)s does not exist"; Exit 1; } - ''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset) - vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host) - cmd_parts = powershell._encode_script(script, as_list=True) + ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset) + self._display.vvvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self._connection_info.remote_addr) + cmd_parts = self._shell._encode_script(script, as_list=True) result = self._winrm_exec(cmd_parts[0], cmd_parts[1:]) if result.status_code != 0: raise IOError(result.std_err.encode('utf-8')) @@ -259,7 +264,7 @@ def fetch_file(self, in_path, out_path): offset += len(data) except Exception: traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) + raise AnsibleError("failed to transfer file to %s" % out_path) finally: if out_file: out_file.close() diff --git a/v2/ansible/plugins/shell/powershell.py b/v2/ansible/plugins/shell/powershell.py index 9f3825c3b0f9a1..e4331e46c6559d 100644 --- a/v2/ansible/plugins/shell/powershell.py +++ b/v2/ansible/plugins/shell/powershell.py @@ -32,33 +32,6 @@ if _powershell_version: _common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:] -def _escape(value, include_vars=False): - '''Return value escaped for use in PowerShell command.''' - # http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences - # http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python - subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'), - ('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'), - ('\'', '`\''), ('`', '``'), ('\x00', '`0')] - if include_vars: - subs.append(('$', '`$')) - pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs) - substs = [s for p, s in subs] - replace = lambda m: substs[m.lastindex - 1] - return re.sub(pattern, replace, value) - -def _encode_script(script, as_list=False): - '''Convert a PowerShell script to a single base64-encoded command.''' - script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()]) - encoded_script = base64.b64encode(script.encode('utf-16-le')) - cmd_parts = _common_args + ['-EncodedCommand', encoded_script] - if as_list: - return cmd_parts - return ' '.join(cmd_parts) - -def _build_file_cmd(cmd_parts): - '''Build command line to run a file, given list of file name plus args.''' - return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts]) - class ShellModule(object): def env_prefix(self, **kwargs): @@ -75,19 +48,19 @@ def chmod(self, mode, path): return '' def remove(self, path, recurse=False): - path = _escape(path) + path = self._escape(path) if recurse: - return _encode_script('''Remove-Item "%s" -Force -Recurse;''' % path) + return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path) else: - return _encode_script('''Remove-Item "%s" -Force;''' % path) + return self._encode_script('''Remove-Item "%s" -Force;''' % path) def mkdtemp(self, basefile, system=False, mode=None): - basefile = _escape(basefile) + basefile = self._escape(basefile) # FIXME: Support system temp path! - return _encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile) + return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile) def md5(self, path): - path = _escape(path) + path = self._escape(path) script = ''' If (Test-Path -PathType Leaf "%(path)s") { @@ -105,15 +78,43 @@ def md5(self, path): Write-Host "1"; } ''' % dict(path=path) - return _encode_script(script) + return self._encode_script(script) def build_module_command(self, env_string, shebang, cmd, rm_tmp=None): cmd = cmd.encode('utf-8') cmd_parts = shlex.split(cmd, posix=False) if not cmd_parts[0].lower().endswith('.ps1'): cmd_parts[0] = '%s.ps1' % cmd_parts[0] - script = _build_file_cmd(cmd_parts) + script = self._build_file_cmd(cmd_parts) if rm_tmp: - rm_tmp = _escape(rm_tmp) + rm_tmp = self._escape(rm_tmp) script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp) - return _encode_script(script) + return self._encode_script(script) + + def _escape(self, value, include_vars=False): + '''Return value escaped for use in PowerShell command.''' + # http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences + # http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python + subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'), + ('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'), + ('\'', '`\''), ('`', '``'), ('\x00', '`0')] + if include_vars: + subs.append(('$', '`$')) + pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs) + substs = [s for p, s in subs] + replace = lambda m: substs[m.lastindex - 1] + return re.sub(pattern, replace, value) + + def _encode_script(self, script, as_list=False): + '''Convert a PowerShell script to a single base64-encoded command.''' + script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()]) + encoded_script = base64.b64encode(script.encode('utf-16-le')) + cmd_parts = _common_args + ['-EncodedCommand', encoded_script] + if as_list: + return cmd_parts + return ' '.join(cmd_parts) + + def _build_file_cmd(self, cmd_parts): + '''Build command line to run a file, given list of file name plus args.''' + return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts]) + diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index 63cc9e4c6dab9f..0881627c4bf445 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -73,6 +73,9 @@ def vvvv(self, msg, host=None): def vvvvv(self, msg, host=None): return self.verbose(msg, host=host, caplevel=4) + def vvvvvv(self, msg, host=None): + return self.verbose(msg, host=host, caplevel=5) + def verbose(self, msg, host=None, caplevel=2): # FIXME: this needs to be implemented #msg = utils.sanitize_output(msg) From ee3240346774cbfdc671b6ac114061673fb1b6b7 Mon Sep 17 00:00:00 2001 From: Tyler Harper Date: Mon, 27 Apr 2015 11:16:56 -0400 Subject: [PATCH 0417/3617] change --ansible-private-keyfile to --private-key An old command line option was left in the documentation. --- docsite/rst/intro_configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index a13f6c6ecd990d..91be8a98da242c 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -451,7 +451,7 @@ private_key_file ================ If you are using a pem file to authenticate with machines rather than SSH agent or passwords, you can set the default -value here to avoid re-specifying ``--ansible-private-keyfile`` with every invocation:: +value here to avoid re-specifying ``--private-key`` with every invocation:: private_key_file=/path/to/file.pem From af2dff9cfb01f2d5848c74aed7e995808943576b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Apr 2015 08:33:29 -0700 Subject: [PATCH 0418/3617] Restore the python3-compat import __future__ and Exception as update. --- v2/ansible/plugins/connections/paramiko_ssh.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/v2/ansible/plugins/connections/paramiko_ssh.py index a2b961bd686609..01e95451b80a55 100644 --- a/v2/ansible/plugins/connections/paramiko_ssh.py +++ b/v2/ansible/plugins/connections/paramiko_ssh.py @@ -14,7 +14,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type # --- # The paramiko transport is provided because many distributions, in particular EL6 and before @@ -173,7 +174,7 @@ def _connect_uncached(self): timeout=self._connection_info.timeout, port=port, ) - except Exception, e: + except Exception as e: msg = str(e) if "PID check failed" in msg: raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible") @@ -197,7 +198,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): try: self.ssh.get_transport().set_keepalive(5) chan = self.ssh.get_transport().open_session() - except Exception, e: + except Exception as e: msg = "Failed to open session" if len(str(e)) > 0: msg += ": %s" % str(e) @@ -256,7 +257,7 @@ def put_file(self, in_path, out_path): try: self.sftp = self.ssh.open_sftp() - except Exception, e: + except Exception as e: raise AnsibleError("failed to open a SFTP connection (%s)" % e) try: @@ -280,7 +281,7 @@ def fetch_file(self, in_path, out_path): try: self.sftp = self._connect_sftp() - except Exception, e: + except Exception as e: raise AnsibleError("failed to open a SFTP connection (%s)", e) try: From 800782922874c1a5357f05f7456fa5ce76a8da10 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Apr 2015 08:34:16 -0700 Subject: [PATCH 0419/3617] Enable warnings for python3 on scripts shebangs. --- v2/bin/ansible | 13 ++++++++++++- v2/bin/ansible-playbook | 32 +++++++++++++++++++++++++++++++- v2/bin/ansible-vault | 2 +- 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/v2/bin/ansible b/v2/bin/ansible index 9b3ccd38be673b..48f956baa1feb6 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python -tt -3 -Wd # (c) 2012, Michael DeHaan # @@ -19,6 +19,17 @@ ######################################################## +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + import os import sys diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index d663e2e0a3fd4a..a3f20cc28e1680 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -1,7 +1,37 @@ -#!/usr/bin/env python +#!/usr/bin/env python -tt -3 -Wd + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +######################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + import os import stat import sys diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault index 506402ee15f935..2771116b0b78ad 100755 --- a/v2/bin/ansible-vault +++ b/v2/bin/ansible-vault @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python -tt -3 -Wd # (c) 2014, James Tanner # From ed2e6fc8fa9963a518c5b31dc00bcfc3e09ff969 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Apr 2015 08:46:26 -0700 Subject: [PATCH 0420/3617] Restore python3 fixups --- v2/ansible/plugins/connections/accelerate.py | 5 ++++- v2/ansible/plugins/connections/chroot.py | 2 ++ v2/ansible/plugins/connections/funcd.py | 2 ++ v2/ansible/plugins/connections/jail.py | 2 ++ v2/ansible/plugins/connections/libvirt_lxc.py | 2 ++ v2/ansible/plugins/connections/ssh.py | 2 +- v2/ansible/plugins/connections/winrm.py | 7 ++++--- v2/ansible/plugins/connections/zone.py | 2 ++ 8 files changed, 19 insertions(+), 5 deletions(-) diff --git a/v2/ansible/plugins/connections/accelerate.py b/v2/ansible/plugins/connections/accelerate.py index 0627267c16b215..1095ed049c8858 100644 --- a/v2/ansible/plugins/connections/accelerate.py +++ b/v2/ansible/plugins/connections/accelerate.py @@ -15,6 +15,9 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import json import os import base64 @@ -141,7 +144,7 @@ def connect(self, allow_ssh=True): # shutdown, so we'll reconnect. wrong_user = True - except AnsibleError, e: + except AnsibleError as e: if allow_ssh: if "WRONG_USER" in e: vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host) diff --git a/v2/ansible/plugins/connections/chroot.py b/v2/ansible/plugins/connections/chroot.py index 3e960472879603..3ecc0f70301aa8 100644 --- a/v2/ansible/plugins/connections/chroot.py +++ b/v2/ansible/plugins/connections/chroot.py @@ -15,6 +15,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import distutils.spawn import traceback diff --git a/v2/ansible/plugins/connections/funcd.py b/v2/ansible/plugins/connections/funcd.py index 92b7f53605baab..92bda4bb347758 100644 --- a/v2/ansible/plugins/connections/funcd.py +++ b/v2/ansible/plugins/connections/funcd.py @@ -21,6 +21,8 @@ # The func transport permit to use ansible over func. For people who have already setup # func and that wish to play with ansible, this permit to move gradually to ansible # without having to redo completely the setup of the network. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type HAVE_FUNC=False try: diff --git a/v2/ansible/plugins/connections/jail.py b/v2/ansible/plugins/connections/jail.py index c7b61bc638cd4f..f7623b3938265b 100644 --- a/v2/ansible/plugins/connections/jail.py +++ b/v2/ansible/plugins/connections/jail.py @@ -16,6 +16,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import distutils.spawn import traceback diff --git a/v2/ansible/plugins/connections/libvirt_lxc.py b/v2/ansible/plugins/connections/libvirt_lxc.py index 34cdb592b246b7..392436073b78ee 100644 --- a/v2/ansible/plugins/connections/libvirt_lxc.py +++ b/v2/ansible/plugins/connections/libvirt_lxc.py @@ -16,6 +16,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import distutils.spawn import os diff --git a/v2/ansible/plugins/connections/ssh.py b/v2/ansible/plugins/connections/ssh.py index cc5b321d143699..49e1e3b966098b 100644 --- a/v2/ansible/plugins/connections/ssh.py +++ b/v2/ansible/plugins/connections/ssh.py @@ -236,7 +236,7 @@ def not_in_host_file(self, host): continue try: host_fh = open(hf) - except IOError, e: + except IOError as e: hfiles_not_found += 1 continue else: diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py index 833358d58c1013..0b480f3796b239 100644 --- a/v2/ansible/plugins/connections/winrm.py +++ b/v2/ansible/plugins/connections/winrm.py @@ -15,7 +15,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import absolute_import +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import base64 import os @@ -103,7 +104,7 @@ def _winrm_connect(self): try: protocol.send_message('') return protocol - except WinRMTransportError, exc: + except WinRMTransportError as exc: err_msg = str(exc) if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I): raise AnsibleError("the connection attempt timed out") @@ -164,7 +165,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): cmd_parts = self._shell._encode_script(script, as_list=True) try: result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True) - except Exception, e: + except Exception as e: traceback.print_exc() raise AnsibleError("failed to exec cmd %s" % cmd) return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8')) diff --git a/v2/ansible/plugins/connections/zone.py b/v2/ansible/plugins/connections/zone.py index 211bd0fbcc63f8..a4f8c1a027c231 100644 --- a/v2/ansible/plugins/connections/zone.py +++ b/v2/ansible/plugins/connections/zone.py @@ -17,6 +17,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import distutils.spawn import traceback From 5034a2c702c1dc8aee1d0ab25912f19cf065bc0e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Apr 2015 08:50:04 -0700 Subject: [PATCH 0421/3617] Use six to import urlparse --- v2/ansible/plugins/connections/winrm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/v2/ansible/plugins/connections/winrm.py b/v2/ansible/plugins/connections/winrm.py index 0b480f3796b239..8a42da2534b248 100644 --- a/v2/ansible/plugins/connections/winrm.py +++ b/v2/ansible/plugins/connections/winrm.py @@ -23,7 +23,8 @@ import re import shlex import traceback -import urlparse + +from six.moves.urllib import parse try: from winrm import Response @@ -90,7 +91,7 @@ def _winrm_connect(self): else: realm = None - endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', '')) + endpoint = parse.urlunsplit((scheme, netloc, '/wsman', '', '')) self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._connection_info.remote_addr) protocol = Protocol( From 0303d9ce491f3cda897450d803bf02b26cc8020c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 27 Apr 2015 12:20:17 -0400 Subject: [PATCH 0422/3617] added new consul modules to changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 58638e96a79cd0..202174c23a15be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,10 @@ Deprecated Modules: New Modules: * find * ec2_ami_find + * consul + * consul_acl + * consul_kv + * consul_session * cloudtrail * cloudstack: cs_affinitygroup * cloudstack: cs_firewall From 582259f98ba750d9eda833acdc1a2a490c516792 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Apr 2015 11:16:19 -0500 Subject: [PATCH 0423/3617] Applying c9fb97cc8 (permissions on symlink fix) to v2 --- v2/ansible/module_utils/basic.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py index b3cebf0ba5a0fc..b875160bb20b37 100644 --- a/v2/ansible/module_utils/basic.py +++ b/v2/ansible/module_utils/basic.py @@ -31,7 +31,7 @@ ANSIBLE_VERSION = "<>" -MODULE_ARGS = "" +MODULE_ARGS = "<>" MODULE_COMPLEX_ARGS = "<>" BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1] @@ -657,14 +657,25 @@ def set_mode_if_different(self, path, mode, changed): # FIXME: comparison against string above will cause this to be executed # every time try: - if 'lchmod' in dir(os): + if hasattr(os, 'lchmod'): os.lchmod(path, mode) else: - os.chmod(path, mode) + if not os.path.islink(path): + os.chmod(path, mode) + else: + # Attempt to set the perms of the symlink but be + # careful not to change the perms of the underlying + # file while trying + underlying_stat = os.stat(path) + os.chmod(path, mode) + new_underlying_stat = os.stat(path) + if underlying_stat.st_mode != new_underlying_stat.st_mode: + os.chmod(path, stat.S_IMODE(underlying_stat.st_mode)) + q_stat = os.stat(path) except OSError, e: if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links pass - elif e.errno == errno.ENOENT: # Can't set mode on broken symbolic links + elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links pass else: raise e From 55cf641b4b4925f24660e9d8a255c60ec9d74af3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 27 Apr 2015 11:28:20 -0500 Subject: [PATCH 0424/3617] Applying backup_local fixes to v2 --- v2/ansible/module_utils/basic.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/v2/ansible/module_utils/basic.py b/v2/ansible/module_utils/basic.py index b875160bb20b37..8f9b03f882d1a2 100644 --- a/v2/ansible/module_utils/basic.py +++ b/v2/ansible/module_utils/basic.py @@ -1293,14 +1293,18 @@ def sha256(self, filename): def backup_local(self, fn): '''make a date-marked backup of the specified file, return True or False on success or failure''' - # backups named basename-YYYY-MM-DD@HH:MM~ - ext = time.strftime("%Y-%m-%d@%H:%M~", time.localtime(time.time())) - backupdest = '%s.%s' % (fn, ext) - try: - shutil.copy2(fn, backupdest) - except shutil.Error, e: - self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e)) + backupdest = '' + if os.path.exists(fn): + # backups named basename-YYYY-MM-DD@HH:MM:SS~ + ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time())) + backupdest = '%s.%s' % (fn, ext) + + try: + shutil.copy2(fn, backupdest) + except (shutil.Error, IOError), e: + self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e)) + return backupdest def cleanup(self, tmpfile): From 49bf70ed9404fdc362710511e50a66942a30fc8a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Apr 2015 11:30:58 -0500 Subject: [PATCH 0425/3617] Moving new module_utils/cloudstack.py to v2 --- v2/ansible/module_utils/cloudstack.py | 195 ++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 v2/ansible/module_utils/cloudstack.py diff --git a/v2/ansible/module_utils/cloudstack.py b/v2/ansible/module_utils/cloudstack.py new file mode 100644 index 00000000000000..2c891434bdebea --- /dev/null +++ b/v2/ansible/module_utils/cloudstack.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + + +class AnsibleCloudStack: + + def __init__(self, module): + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + self.module = module + self._connect() + + self.project_id = None + self.ip_address_id = None + self.zone_id = None + self.vm_id = None + self.os_type_id = None + self.hypervisor = None + + + def _connect(self): + api_key = self.module.params.get('api_key') + api_secret = self.module.params.get('secret_key') + api_url = self.module.params.get('api_url') + api_http_method = self.module.params.get('api_http_method') + + if api_key and api_secret and api_url: + self.cs = CloudStack( + endpoint=api_url, + key=api_key, + secret=api_secret, + method=api_http_method + ) + else: + self.cs = CloudStack(**read_config()) + + + def get_project_id(self): + if self.project_id: + return self.project_id + + project = self.module.params.get('project') + if not project: + return None + + projects = self.cs.listProjects() + if projects: + for p in projects['project']: + if project in [ p['name'], p['displaytext'], p['id'] ]: + self.project_id = p['id'] + return self.project_id + self.module.fail_json(msg="project '%s' not found" % project) + + + def get_ip_address_id(self): + if self.ip_address_id: + return self.ip_address_id + + ip_address = self.module.params.get('ip_address') + if not ip_address: + self.module.fail_json(msg="IP address param 'ip_address' is required") + + args = {} + args['ipaddress'] = ip_address + args['projectid'] = self.get_project_id() + ip_addresses = self.cs.listPublicIpAddresses(**args) + + if not ip_addresses: + self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress']) + + self.ip_address_id = ip_addresses['publicipaddress'][0]['id'] + return self.ip_address_id + + + def get_vm_id(self): + if self.vm_id: + return self.vm_id + + vm = self.module.params.get('vm') + if not vm: + self.module.fail_json(msg="Virtual machine param 'vm' is required") + + args = {} + args['projectid'] = self.get_project_id() + vms = self.cs.listVirtualMachines(**args) + if vms: + for v in vms['virtualmachine']: + if vm in [ v['displayname'], v['name'], v['id'] ]: + self.vm_id = v['id'] + return self.vm_id + self.module.fail_json(msg="Virtual machine '%s' not found" % vm) + + + def get_zone_id(self): + if self.zone_id: + return self.zone_id + + zone = self.module.params.get('zone') + zones = self.cs.listZones() + + # use the first zone if no zone param given + if not zone: + self.zone_id = zones['zone'][0]['id'] + return self.zone_id + + if zones: + for z in zones['zone']: + if zone in [ z['name'], z['id'] ]: + self.zone_id = z['id'] + return self.zone_id + self.module.fail_json(msg="zone '%s' not found" % zone) + + + def get_os_type_id(self): + if self.os_type_id: + return self.os_type_id + + os_type = self.module.params.get('os_type') + if not os_type: + return None + + os_types = self.cs.listOsTypes() + if os_types: + for o in os_types['ostype']: + if os_type in [ o['description'], o['id'] ]: + self.os_type_id = o['id'] + return self.os_type_id + self.module.fail_json(msg="OS type '%s' not found" % os_type) + + + def get_hypervisor(self): + if self.hypervisor: + return self.hypervisor + + hypervisor = self.module.params.get('hypervisor') + hypervisors = self.cs.listHypervisors() + + # use the first hypervisor if no hypervisor param given + if not hypervisor: + self.hypervisor = hypervisors['hypervisor'][0]['name'] + return self.hypervisor + + for h in hypervisors['hypervisor']: + if hypervisor.lower() == h['name'].lower(): + self.hypervisor = h['name'] + return self.hypervisor + self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor) + + + def _poll_job(self, job=None, key=None): + if 'jobid' in job: + while True: + res = self.cs.queryAsyncJobResult(jobid=job['jobid']) + if res['jobstatus'] != 0 and 'jobresult' in res: + if 'errortext' in res['jobresult']: + self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext']) + if key and key in res['jobresult']: + job = res['jobresult'][key] + break + time.sleep(2) + return job From 8d174e704490a01badf73efbc7e4bfd3e169b8aa Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Apr 2015 11:37:49 -0500 Subject: [PATCH 0426/3617] Updating module_utils/ec2.py in v2 with version from v1 --- v2/ansible/module_utils/ec2.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/v2/ansible/module_utils/ec2.py b/v2/ansible/module_utils/ec2.py index 8d2a369e90040f..d02c3476f2e975 100644 --- a/v2/ansible/module_utils/ec2.py +++ b/v2/ansible/module_utils/ec2.py @@ -33,13 +33,14 @@ HAS_LOOSE_VERSION = False + def aws_common_argument_spec(): return dict( ec2_url=dict(), aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True), aws_access_key=dict(aliases=['ec2_access_key', 'access_key']), validate_certs=dict(default=True, type='bool'), - security_token=dict(no_log=True), + security_token=dict(aliases=['access_token'], no_log=True), profile=dict(), ) @@ -72,38 +73,38 @@ def get_aws_connection_info(module): validate_certs = module.params.get('validate_certs') if not ec2_url: - if 'EC2_URL' in os.environ: - ec2_url = os.environ['EC2_URL'] - elif 'AWS_URL' in os.environ: + if 'AWS_URL' in os.environ: ec2_url = os.environ['AWS_URL'] + elif 'EC2_URL' in os.environ: + ec2_url = os.environ['EC2_URL'] if not access_key: - if 'EC2_ACCESS_KEY' in os.environ: - access_key = os.environ['EC2_ACCESS_KEY'] - elif 'AWS_ACCESS_KEY_ID' in os.environ: + if 'AWS_ACCESS_KEY_ID' in os.environ: access_key = os.environ['AWS_ACCESS_KEY_ID'] elif 'AWS_ACCESS_KEY' in os.environ: access_key = os.environ['AWS_ACCESS_KEY'] + elif 'EC2_ACCESS_KEY' in os.environ: + access_key = os.environ['EC2_ACCESS_KEY'] else: # in case access_key came in as empty string access_key = None if not secret_key: - if 'EC2_SECRET_KEY' in os.environ: - secret_key = os.environ['EC2_SECRET_KEY'] - elif 'AWS_SECRET_ACCESS_KEY' in os.environ: + if 'AWS_SECRET_ACCESS_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] elif 'AWS_SECRET_KEY' in os.environ: secret_key = os.environ['AWS_SECRET_KEY'] + elif 'EC2_SECRET_KEY' in os.environ: + secret_key = os.environ['EC2_SECRET_KEY'] else: # in case secret_key came in as empty string secret_key = None if not region: - if 'EC2_REGION' in os.environ: - region = os.environ['EC2_REGION'] - elif 'AWS_REGION' in os.environ: + if 'AWS_REGION' in os.environ: region = os.environ['AWS_REGION'] + elif 'EC2_REGION' in os.environ: + region = os.environ['EC2_REGION'] else: # boto.config.get returns None if config not found region = boto.config.get('Boto', 'aws_region') @@ -113,6 +114,8 @@ def get_aws_connection_info(module): if not security_token: if 'AWS_SECURITY_TOKEN' in os.environ: security_token = os.environ['AWS_SECURITY_TOKEN'] + elif 'EC2_SECURITY_TOKEN' in os.environ: + security_token = os.environ['EC2_SECURITY_TOKEN'] else: # in case security_token came in as empty string security_token = None From 805e83d2091b312d8608be93e36260536ba9cb87 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Apr 2015 11:50:51 -0500 Subject: [PATCH 0427/3617] Updating module_utils/facts.py in v2 with v1 version --- v2/ansible/module_utils/facts.py | 45 +++++++++++++++++--------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index 66ca86c3969e1f..4689dd2da9e907 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -123,20 +123,23 @@ class Facts(object): { 'path' : '/usr/bin/pkg', 'name' : 'pkg' }, ] - def __init__(self): + def __init__(self, load_on_init=True): + self.facts = {} - self.get_platform_facts() - self.get_distribution_facts() - self.get_cmdline() - self.get_public_ssh_host_keys() - self.get_selinux_facts() - self.get_fips_facts() - self.get_pkg_mgr_facts() - self.get_lsb_facts() - self.get_date_time_facts() - self.get_user_facts() - self.get_local_facts() - self.get_env_facts() + + if load_on_init: + self.get_platform_facts() + self.get_distribution_facts() + self.get_cmdline() + self.get_public_ssh_host_keys() + self.get_selinux_facts() + self.get_fips_facts() + self.get_pkg_mgr_facts() + self.get_lsb_facts() + self.get_date_time_facts() + self.get_user_facts() + self.get_local_facts() + self.get_env_facts() def populate(self): return self.facts @@ -198,7 +201,7 @@ def get_local_facts(self): # if that fails, skip it rc, out, err = module.run_command(fn) else: - out = get_file_content(fn) + out = get_file_content(fn, default='') # load raw json fact = 'loading %s' % fact_base @@ -1668,6 +1671,7 @@ def get_memory_facts(self): if rc == 0: self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024 + class Network(Facts): """ This is a generic Network subclass of Facts. This should be further @@ -1775,7 +1779,7 @@ def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6): device = os.path.basename(path) interfaces[device] = { 'device': device } if os.path.exists(os.path.join(path, 'address')): - macaddress = get_file_content(os.path.join(path, 'address')) + macaddress = get_file_content(os.path.join(path, 'address'), default='') if macaddress and macaddress != '00:00:00:00:00:00': interfaces[device]['macaddress'] = macaddress if os.path.exists(os.path.join(path, 'mtu')): @@ -1798,15 +1802,15 @@ def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6): interfaces[device]['type'] = 'bridge' interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ] if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')): - interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id')) + interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='') if os.path.exists(os.path.join(path, 'bridge', 'stp_state')): interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1' if os.path.exists(os.path.join(path, 'bonding')): interfaces[device]['type'] = 'bonding' - interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves')).split() - interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode')).split()[0] - interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon')).split()[0] - interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate')).split()[0] + interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split() + interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0] + interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0] + interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0] primary = get_file_content(os.path.join(path, 'bonding', 'primary')) if primary: interfaces[device]['primary'] = primary @@ -2740,4 +2744,3 @@ def get_all_facts(module): setup_result['verbose_override'] = True return setup_result - From 93cc08e613fa667e60d7c5bfeff101100bba06a6 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 27 Apr 2015 11:55:14 -0500 Subject: [PATCH 0428/3617] Applying bf916fb5 fix to v2 --- v2/ansible/module_utils/powershell.ps1 | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/v2/ansible/module_utils/powershell.ps1 b/v2/ansible/module_utils/powershell.ps1 index c097c69768b09f..57d2c1b101caa7 100644 --- a/v2/ansible/module_utils/powershell.ps1 +++ b/v2/ansible/module_utils/powershell.ps1 @@ -142,3 +142,25 @@ Function ConvertTo-Bool return } +# Helper function to calculate md5 of a file in a way which powershell 3 +# and above can handle: +Function Get-FileMd5($path) +{ + $hash = "" + If (Test-Path -PathType Leaf $path) + { + $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); + [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $fp.Dispose(); + } + ElseIf (Test-Path -PathType Container $path) + { + $hash= "3"; + } + Else + { + $hash = "1"; + } + return $hash +} From 313d01736a4061c9ab92a638d9d0375ae50756de Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 27 Apr 2015 11:57:36 -0500 Subject: [PATCH 0429/3617] Applying cfd05ceaf fix for rax.py to v2 --- v2/ansible/module_utils/rax.py | 53 ++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/v2/ansible/module_utils/rax.py b/v2/ansible/module_utils/rax.py index 75363b1aacbbdc..e92754a947f301 100644 --- a/v2/ansible/module_utils/rax.py +++ b/v2/ansible/module_utils/rax.py @@ -84,6 +84,11 @@ def rax_to_dict(obj, obj_type='standard'): instance[key].append(rax_to_dict(item)) elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): if obj_type == 'server': + if key == 'image': + if not value: + instance['rax_boot_source'] = 'volume' + else: + instance['rax_boot_source'] = 'local' key = rax_slugify(key) instance[key] = value @@ -94,7 +99,35 @@ def rax_to_dict(obj, obj_type='standard'): return instance -def rax_find_image(module, rax_module, image): +def rax_find_bootable_volume(module, rax_module, server, exit=True): + """Find a servers bootable volume""" + cs = rax_module.cloudservers + cbs = rax_module.cloud_blockstorage + server_id = rax_module.utils.get_id(server) + volumes = cs.volumes.get_server_volumes(server_id) + bootable_volumes = [] + for volume in volumes: + vol = cbs.get(volume) + if module.boolean(vol.bootable): + bootable_volumes.append(vol) + if not bootable_volumes: + if exit: + module.fail_json(msg='No bootable volumes could be found for ' + 'server %s' % server_id) + else: + return False + elif len(bootable_volumes) > 1: + if exit: + module.fail_json(msg='Multiple bootable volumes found for server ' + '%s' % server_id) + else: + return False + + return bootable_volumes[0] + + +def rax_find_image(module, rax_module, image, exit=True): + """Find a server image by ID or Name""" cs = rax_module.cloudservers try: UUID(image) @@ -107,13 +140,17 @@ def rax_find_image(module, rax_module, image): image = cs.images.find(name=image) except (cs.exceptions.NotFound, cs.exceptions.NoUniqueMatch): - module.fail_json(msg='No matching image found (%s)' % - image) + if exit: + module.fail_json(msg='No matching image found (%s)' % + image) + else: + return False return rax_module.utils.get_id(image) def rax_find_volume(module, rax_module, name): + """Find a Block storage volume by ID or name""" cbs = rax_module.cloud_blockstorage try: UUID(name) @@ -129,6 +166,7 @@ def rax_find_volume(module, rax_module, name): def rax_find_network(module, rax_module, network): + """Find a cloud network by ID or name""" cnw = rax_module.cloud_networks try: UUID(network) @@ -151,6 +189,7 @@ def rax_find_network(module, rax_module, network): def rax_find_server(module, rax_module, server): + """Find a Cloud Server by ID or name""" cs = rax_module.cloudservers try: UUID(server) @@ -171,6 +210,7 @@ def rax_find_server(module, rax_module, server): def rax_find_loadbalancer(module, rax_module, loadbalancer): + """Find a Cloud Load Balancer by ID or name""" clb = rax_module.cloud_loadbalancers try: found = clb.get(loadbalancer) @@ -194,6 +234,10 @@ def rax_find_loadbalancer(module, rax_module, loadbalancer): def rax_argument_spec(): + """Return standard base dictionary used for the argument_spec + argument in AnsibleModule + + """ return dict( api_key=dict(type='str', aliases=['password'], no_log=True), auth_endpoint=dict(type='str'), @@ -209,10 +253,13 @@ def rax_argument_spec(): def rax_required_together(): + """Return the default list used for the required_together argument to + AnsibleModule""" return [['api_key', 'username']] def setup_rax_module(module, rax_module, region_required=True): + """Set up pyrax in a standard way for all modules""" rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION, rax_module.USER_AGENT) From 29201c0dea4d6be05cf17650f2aeb06c2e534711 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 27 Apr 2015 11:59:11 -0500 Subject: [PATCH 0430/3617] Applying 499081490 fix for rax.py to v2 --- v2/ansible/module_utils/rax.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/v2/ansible/module_utils/rax.py b/v2/ansible/module_utils/rax.py index e92754a947f301..73b48cc780d1b7 100644 --- a/v2/ansible/module_utils/rax.py +++ b/v2/ansible/module_utils/rax.py @@ -315,7 +315,11 @@ def setup_rax_module(module, rax_module, region_required=True): else: raise Exception('No credentials supplied!') except Exception, e: - module.fail_json(msg='%s' % e.message) + if e.message: + msg = str(e.message) + else: + msg = repr(e) + module.fail_json(msg=msg) if region_required and region not in rax_module.regions: module.fail_json(msg='%s is not a valid region, must be one of: %s' % From 6eba0d173daff5544b70a3227f7e2ffe793e36ef Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Apr 2015 13:57:26 -0500 Subject: [PATCH 0431/3617] Revert shebang change added in v2 bin/ scripts --- v2/bin/ansible | 2 +- v2/bin/ansible-playbook | 2 +- v2/bin/ansible-vault | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/v2/bin/ansible b/v2/bin/ansible index 48f956baa1feb6..d269790983ea22 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -1,4 +1,4 @@ -#!/usr/bin/env python -tt -3 -Wd +#!/usr/bin/env python # (c) 2012, Michael DeHaan # diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index a3f20cc28e1680..a182f629aaa507 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -1,4 +1,4 @@ -#!/usr/bin/env python -tt -3 -Wd +#!/usr/bin/env python # (c) 2012, Michael DeHaan # diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault index 2771116b0b78ad..506402ee15f935 100755 --- a/v2/bin/ansible-vault +++ b/v2/bin/ansible-vault @@ -1,4 +1,4 @@ -#!/usr/bin/env python -tt -3 -Wd +#!/usr/bin/env python # (c) 2014, James Tanner # From 3879550e748c5f0401f4be74f12206690be950dc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 27 Apr 2015 14:43:25 -0500 Subject: [PATCH 0432/3617] Finish backporting of smart transport selection from v1 into v2 --- v2/ansible/executor/task_executor.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index e011792cbec105..5dd3250ea0ec44 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -19,6 +19,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import json +import pipes +import subprocess +import sys +import time + from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError from ansible.executor.connection_info import ConnectionInformation @@ -32,10 +38,6 @@ __all__ = ['TaskExecutor'] -import json -import time -import pipes - class TaskExecutor: ''' @@ -365,11 +367,20 @@ def _get_connection(self, variables): if self._task.delegate_to is not None: self._compute_delegate(variables) - # FIXME: add all port/connection type munging here (accelerated mode, - # fixing up options for ssh, etc.)? and 'smart' conversion conn_type = self._connection_info.connection if conn_type == 'smart': conn_type = 'ssh' + if sys.platform.startswith('darwin') and self._connection_info.remote_pass: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified + conn_type = "paramiko" + else: + # see if SSH can support ControlPersist if not use paramiko + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err: + conn_type = "paramiko" connection = connection_loader.get(conn_type, self._connection_info, self._new_stdin) if not connection: From 92a25b340bbd2e1db0c282576bfd26450f92e761 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Wed, 17 Dec 2014 14:22:27 +0100 Subject: [PATCH 0433/3617] cloudstack: add dynamic inventory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: René Moser --- plugins/inventory/cloudstack.ini | 5 + plugins/inventory/cloudstack.py | 226 +++++++++++++++++++++++++++++++ 2 files changed, 231 insertions(+) create mode 100644 plugins/inventory/cloudstack.ini create mode 100755 plugins/inventory/cloudstack.py diff --git a/plugins/inventory/cloudstack.ini b/plugins/inventory/cloudstack.ini new file mode 100644 index 00000000000000..43777b593fb4a6 --- /dev/null +++ b/plugins/inventory/cloudstack.ini @@ -0,0 +1,5 @@ +[cloudstack] +#endpoint = https://api.exoscale.ch/compute +endpoint = https://cloud.example.com/client/api +key = cloudstack api key +secret = cloudstack api secret diff --git a/plugins/inventory/cloudstack.py b/plugins/inventory/cloudstack.py new file mode 100755 index 00000000000000..fdd166ec4971e9 --- /dev/null +++ b/plugins/inventory/cloudstack.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# (c) 2014, René Moser +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +""" +Ansible CloudStack external inventory script. +============================================= + +Generates Ansible inventory from CloudStack. Configuration is read from +'cloudstack.ini'. If you need to pass the project, write a simple wrapper +script, e.g. project_cloudstack.sh: + + #!/bin/bash + cloudstack.py --project $@ + + +When run against a specific host, this script returns the following attributes +based on the data obtained from CloudStack API: + + "web01": { + "cpu_number": 2, + "nic": [ + { + "ip": "10.102.76.98", + "mac": "02:00:50:99:00:01", + "type": "Isolated", + "netmask": "255.255.255.0", + "gateway": "10.102.76.1" + }, + { + "ip": "10.102.138.63", + "mac": "06:b7:5a:00:14:84", + "type": "Shared", + "netmask": "255.255.255.0", + "gateway": "10.102.138.1" + } + ], + "default_ip": "10.102.76.98", + "zone": "ZUERICH", + "created": "2014-07-02T07:53:50+0200", + "hypervisor": "VMware", + "memory": 2048, + "state": "Running", + "tags": [], + "cpu_speed": 1800, + "affinity_group": [], + "service_offering": "Small", + "cpu_used": "62%" + } + + +usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] +""" + +import os, sys +import argparse + +try: + import json +except: + import simplejson as json + + +try: + from cs import CloudStack, CloudStackException, read_config +except ImportError: + print >> sys.stderr, "Error: CloudStack library must be installed: pip install cs." + sys.exit(1) + + +class CloudStackInventory(object): + def __init__(self): + + parser = argparse.ArgumentParser() + parser.add_argument('--host') + parser.add_argument('--list', action='store_true') + parser.add_argument('--project') + + options = parser.parse_args() + try: + self.cs = CloudStack(**read_config()) + except CloudStackException, e: + print >> sys.stderr, "Error: Could not connect to CloudStack API" + + project_id = '' + if options.project: + project_id = self.get_project_id(options.project) + + if options.host: + data = self.get_host(options.host) + print json.dumps(data, indent=2) + + elif options.list: + data = self.get_list() + print json.dumps(data, indent=2) + else: + print >> sys.stderr, "usage: --list | --host [--project ]" + sys.exit(1) + + + def get_project_id(self, project): + projects = self.cs.listProjects() + if projects: + for p in projects['project']: + if p['name'] == project or p['id'] == project: + return p['id'] + print >> sys.stderr, "Error: Project %s not found." % project + sys.exit(1) + + + def get_host(self, name, project_id=''): + hosts = self.cs.listVirtualMachines(projectid=project_id) + data = {} + for host in hosts['virtualmachine']: + host_name = host['displayname'] + if name == host_name: + data['zone'] = host['zonename'] + if 'group' in host: + data['group'] = host['group'] + data['state'] = host['state'] + data['service_offering'] = host['serviceofferingname'] + data['affinity_group'] = host['affinitygroup'] + data['security_group'] = host['securitygroup'] + data['cpu_number'] = host['cpunumber'] + data['cpu_speed'] = host['cpuspeed'] + if 'cpuused' in host: + data['cpu_used'] = host['cpuused'] + data['memory'] = host['memory'] + data['tags'] = host['tags'] + data['hypervisor'] = host['hypervisor'] + data['created'] = host['created'] + data['nic'] = [] + for nic in host['nic']: + data['nic'].append({ + 'ip': nic['ipaddress'], + 'mac': nic['macaddress'], + 'netmask': nic['netmask'], + 'gateway': nic['gateway'], + 'type': nic['type'], + }) + if nic['isdefault']: + data['default_ip'] = nic['ipaddress'] + break; + return data + + + def get_list(self, project_id=''): + data = { + 'all': { + 'hosts': [], + }, + '_meta': { + 'hostvars': {}, + }, + } + + groups = self.cs.listInstanceGroups(projectid=project_id) + for group in groups['instancegroup']: + group_name = group['name'] + if group_name and not group_name in data: + data[group_name] = { + 'hosts': [] + } + + hosts = self.cs.listVirtualMachines(projectid=project_id) + for host in hosts['virtualmachine']: + host_name = host['displayname'] + data['all']['hosts'].append(host_name) + data['_meta']['hostvars'][host_name] = {} + data['_meta']['hostvars'][host_name]['zone'] = host['zonename'] + if 'group' in host: + data['_meta']['hostvars'][host_name]['group'] = host['group'] + data['_meta']['hostvars'][host_name]['state'] = host['state'] + data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname'] + data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup'] + data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup'] + data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber'] + data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed'] + if 'cpuused' in host: + data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused'] + data['_meta']['hostvars'][host_name]['created'] = host['created'] + data['_meta']['hostvars'][host_name]['memory'] = host['memory'] + data['_meta']['hostvars'][host_name]['tags'] = host['tags'] + data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor'] + data['_meta']['hostvars'][host_name]['created'] = host['created'] + data['_meta']['hostvars'][host_name]['nic'] = [] + for nic in host['nic']: + data['_meta']['hostvars'][host_name]['nic'].append({ + 'ip': nic['ipaddress'], + 'mac': nic['macaddress'], + 'netmask': nic['netmask'], + 'gateway': nic['gateway'], + 'type': nic['type'], + }) + if nic['isdefault']: + data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress'] + + group_name = '' + if 'group' in host: + group_name = host['group'] + + if group_name and group_name in data: + data[group_name]['hosts'].append(host_name) + return data + + +if __name__ == '__main__': + CloudStackInventory() From bfa71054f55865297a03ec9d66ce89e57b2824d8 Mon Sep 17 00:00:00 2001 From: Milamber Date: Sat, 3 Jan 2015 18:57:55 +0000 Subject: [PATCH 0434/3617] Fix an issue when the cloudstack installation don't have any instance group --- plugins/inventory/cloudstack.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/inventory/cloudstack.py b/plugins/inventory/cloudstack.py index fdd166ec4971e9..4969b613fe5a7a 100755 --- a/plugins/inventory/cloudstack.py +++ b/plugins/inventory/cloudstack.py @@ -173,12 +173,13 @@ def get_list(self, project_id=''): } groups = self.cs.listInstanceGroups(projectid=project_id) - for group in groups['instancegroup']: - group_name = group['name'] - if group_name and not group_name in data: - data[group_name] = { - 'hosts': [] - } + if groups: + for group in groups['instancegroup']: + group_name = group['name'] + if group_name and not group_name in data: + data[group_name] = { + 'hosts': [] + } hosts = self.cs.listVirtualMachines(projectid=project_id) for host in hosts['virtualmachine']: From 9e5a16703b81953f9ee0334ee52413533480f460 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 10 Feb 2015 09:50:41 +0100 Subject: [PATCH 0435/3617] cloudstack: add check for empty inventory --- plugins/inventory/cloudstack.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inventory/cloudstack.py b/plugins/inventory/cloudstack.py index 4969b613fe5a7a..d0b2f042d33f44 100755 --- a/plugins/inventory/cloudstack.py +++ b/plugins/inventory/cloudstack.py @@ -129,6 +129,8 @@ def get_project_id(self, project): def get_host(self, name, project_id=''): hosts = self.cs.listVirtualMachines(projectid=project_id) data = {} + if not hosts: + return data for host in hosts['virtualmachine']: host_name = host['displayname'] if name == host_name: @@ -182,6 +184,8 @@ def get_list(self, project_id=''): } hosts = self.cs.listVirtualMachines(projectid=project_id) + if not hosts: + return data for host in hosts['virtualmachine']: host_name = host['displayname'] data['all']['hosts'].append(host_name) From d9633037d5ccd597e8e9ff76404edf6f4b1fb4dc Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 28 Apr 2015 09:20:48 +0200 Subject: [PATCH 0436/3617] cloudstack: update copyright in dynamic inventory --- plugins/inventory/cloudstack.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/cloudstack.py b/plugins/inventory/cloudstack.py index d0b2f042d33f44..426cf163fd73c1 100755 --- a/plugins/inventory/cloudstack.py +++ b/plugins/inventory/cloudstack.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# (c) 2014, René Moser +# (c) 2015, René Moser # # This file is part of Ansible, # @@ -70,7 +70,8 @@ usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] """ -import os, sys +import os +import sys import argparse try: From cfbfd38723b8c6223203be6c73004475b3404dfa Mon Sep 17 00:00:00 2001 From: "Carlos E. Garcia" Date: Tue, 28 Apr 2015 09:36:42 -0400 Subject: [PATCH 0437/3617] just a few spelling error changes --- CHANGELOG.md | 2 +- docsite/rst/community.rst | 2 +- docsite/rst/playbooks_filters.rst | 2 +- lib/ansible/module_utils/database.py | 2 +- lib/ansible/module_utils/facts.py | 6 +++--- lib/ansible/playbook/__init__.py | 2 +- lib/ansible/playbook/play.py | 2 +- lib/ansible/runner/__init__.py | 2 +- lib/ansible/runner/connection_plugins/libvirt_lxc.py | 2 +- lib/ansible/runner/connection_plugins/zone.py | 2 +- lib/ansible/runner/lookup_plugins/url.py | 2 +- lib/ansible/utils/template.py | 2 +- plugins/inventory/collins.py | 2 +- plugins/inventory/consul_io.py | 2 +- plugins/inventory/openstack.py | 2 +- test/integration/roles/test_ec2_elb_lb/tasks/main.yml | 2 +- test/integration/roles/test_rax_clb/tasks/main.yml | 2 +- test/integration/roles/test_var_blending/files/foo.txt | 2 +- test/integration/roles/test_var_blending/templates/foo.j2 | 2 +- v2/ansible/executor/task_queue_manager.py | 2 +- v2/ansible/module_utils/database.py | 2 +- v2/ansible/module_utils/facts.py | 6 +++--- v2/ansible/parsing/__init__.py | 4 ++-- v2/ansible/parsing/mod_args.py | 2 +- v2/ansible/playbook/become.py | 2 +- v2/ansible/plugins/action/__init__.py | 4 ++-- v2/ansible/plugins/action/patch.py | 2 +- v2/ansible/plugins/connections/libvirt_lxc.py | 2 +- v2/ansible/plugins/connections/zone.py | 2 +- v2/ansible/plugins/lookup/url.py | 2 +- v2/ansible/template/__init__.py | 2 +- v2/hacking/module_formatter.py | 2 +- 32 files changed, 38 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 202174c23a15be..2a3d2b0167a3be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -138,7 +138,7 @@ Other Notable Changes: Operations that depend on a clean working tree may fail unless force=yes is added. * git: When local modifications exist in a checkout, the git module will now - fail unless force is explictly specified. Specifying force=yes will allow + fail unless force is explicitly specified. Specifying force=yes will allow the module to revert and overwrite local modifications to make git actions succeed. * hg: When local modifications exist in a checkout, the hg module used to diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index b056c3dacc2085..561e214bd9db19 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -183,7 +183,7 @@ to the `module development documentation . -# The OpenStack Inventory module uses os-client-config for configuation. +# The OpenStack Inventory module uses os-client-config for configuration. # https://github.com/stackforge/os-client-config # This means it will either: # - Respect normal OS_* environment variables like other OpenStack tools diff --git a/test/integration/roles/test_ec2_elb_lb/tasks/main.yml b/test/integration/roles/test_ec2_elb_lb/tasks/main.yml index ba3968a9c2e22a..8d73e854cf0530 100644 --- a/test/integration/roles/test_ec2_elb_lb/tasks/main.yml +++ b/test/integration/roles/test_ec2_elb_lb/tasks/main.yml @@ -7,7 +7,7 @@ # __Test Outline__ # # __ec2_elb_lb__ -# create test elb with listeners and certificat +# create test elb with listeners and certificate # change AZ's # change listeners # remove listeners diff --git a/test/integration/roles/test_rax_clb/tasks/main.yml b/test/integration/roles/test_rax_clb/tasks/main.yml index 8f6a990ceb2af1..2426fa3ae5944a 100644 --- a/test/integration/roles/test_rax_clb/tasks/main.yml +++ b/test/integration/roles/test_rax_clb/tasks/main.yml @@ -601,7 +601,7 @@ - rax_clb_a1.balancer.algorithm == 'LEAST_CONNECTIONS' - rax_clb_a1.balancer.status == 'ACTIVE' -- name: Test rax_clb with updated algoritm 2 +- name: Test rax_clb with updated algorithm 2 rax_clb: username: "{{ rackspace_username }}" api_key: "{{ rackspace_api_key }}" diff --git a/test/integration/roles/test_var_blending/files/foo.txt b/test/integration/roles/test_var_blending/files/foo.txt index a90999cbd8900d..d51be39b1b3ba8 100644 --- a/test/integration/roles/test_var_blending/files/foo.txt +++ b/test/integration/roles/test_var_blending/files/foo.txt @@ -4,7 +4,7 @@ This comes from host, not the parents or grandparents. The value of the grandparent variable grandparent_var is not overridden and is = 2000 -The value of the parent variable is not overriden and +The value of the parent variable is not overridden and is = 6000 The variable 'overridden_in_parent' is set in the parent diff --git a/test/integration/roles/test_var_blending/templates/foo.j2 b/test/integration/roles/test_var_blending/templates/foo.j2 index d3361db3433d26..10709b1adbece3 100644 --- a/test/integration/roles/test_var_blending/templates/foo.j2 +++ b/test/integration/roles/test_var_blending/templates/foo.j2 @@ -4,7 +4,7 @@ This comes from host, not the parents or grandparents. The value of the grandparent variable grandparent_var is not overridden and is = {{ grandparent_var }} -The value of the parent variable is not overriden and +The value of the parent variable is not overridden and is = {{ parent_var }} The variable 'overridden_in_parent' is set in the parent diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index e13930c6df8414..0785ed3f5e10b8 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -116,7 +116,7 @@ def _initialize_notified_handlers(self, handlers): for handler in handler_block.block: handler_list.append(handler) - # then initalize it with the handler names from the handler list + # then initialize it with the handler names from the handler list for handler in handler_list: self._notified_handlers[handler.get_name()] = [] diff --git a/v2/ansible/module_utils/database.py b/v2/ansible/module_utils/database.py index 0dd1990d3e7b6c..6170614e9073d9 100644 --- a/v2/ansible/module_utils/database.py +++ b/v2/ansible/module_utils/database.py @@ -33,7 +33,7 @@ class UnclosedQuoteError(SQLParseError): pass # maps a type of identifier to the maximum number of dot levels that are -# allowed to specifiy that identifier. For example, a database column can be +# allowed to specify that identifier. For example, a database column can be # specified by up to 4 levels: database.schema.table.column _PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1) _MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1) diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index 4689dd2da9e907..7ded70242170a4 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -1494,7 +1494,7 @@ def get_dmi_facts(self): class HPUX(Hardware): """ - HP-UX-specifig subclass of Hardware. Defines memory and CPU facts: + HP-UX-specific subclass of Hardware. Defines memory and CPU facts: - memfree_mb - memtotal_mb - swapfree_mb @@ -2062,7 +2062,7 @@ def parse_options_line(self, words, current_if, ips): current_if['options'] = self.get_options(words[0]) def parse_nd6_line(self, words, current_if, ips): - # FreBSD has options like this... + # FreeBSD has options like this... current_if['options'] = self.get_options(words[1]) def parse_ether_line(self, words, current_if, ips): @@ -2642,7 +2642,7 @@ def get_virtual_facts(self): rc, out, err = module.run_command("/usr/sbin/virtinfo -p") # The output contains multiple lines with different keys like this: # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false - # The output may also be not formated and the returncode is set to 0 regardless of the error condition: + # The output may also be not formatted and the returncode is set to 0 regardless of the error condition: # virtinfo can only be run from the global zone try: for line in out.split('\n'): diff --git a/v2/ansible/parsing/__init__.py b/v2/ansible/parsing/__init__.py index bf96fba84202c0..9551343fbf4399 100644 --- a/v2/ansible/parsing/__init__.py +++ b/v2/ansible/parsing/__init__.py @@ -149,12 +149,12 @@ def _get_file_contents(self, file_name): show_content = False return (data, show_content) except (IOError, OSError) as e: - raise AnsibleParserError("an error occured while trying to read the file '%s': %s" % (file_name, str(e))) + raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e))) def _handle_error(self, yaml_exc, file_name, show_content): ''' Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the - file name/position where a YAML exception occured, and raises an AnsibleParserError + file name/position where a YAML exception occurred, and raises an AnsibleParserError to display the syntax exception information. ''' diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index e3fdba093d43a1..f46b525c663c32 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -55,7 +55,7 @@ class ModuleArgsParser: dest: b # extra gross, but also legal. in this case, the args specified - # will act as 'defaults' and will be overriden by any args specified + # will act as 'defaults' and will be overridden by any args specified # in one of the other formats (complex args under the action, or # parsed from the k=v string - command: 'pwd' diff --git a/v2/ansible/playbook/become.py b/v2/ansible/playbook/become.py index 272976929a7493..daa8c80ba943ac 100644 --- a/v2/ansible/playbook/become.py +++ b/v2/ansible/playbook/become.py @@ -37,7 +37,7 @@ def __init__(self): def _detect_privilege_escalation_conflict(self, ds): - # Fail out if user specifies conflicting privelege escalations + # Fail out if user specifies conflicting privilege escalations has_become = 'become' in ds or 'become_user'in ds has_sudo = 'sudo' in ds or 'sudo_user' in ds has_su = 'su' in ds or 'su_user' in ds diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index aead23503719b3..4265a8a5b2a3b9 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -354,9 +354,9 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ # FIXME: async stuff here? #if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES): if remote_module_path: - debug("transfering module to remote") + debug("transferring module to remote") self._transfer_data(remote_module_path, module_data) - debug("done transfering module to remote") + debug("done transferring module to remote") environment_string = self._compute_environment_string() diff --git a/v2/ansible/plugins/action/patch.py b/v2/ansible/plugins/action/patch.py index 717cc359f4e806..bf2af1be1ecbd6 100644 --- a/v2/ansible/plugins/action/patch.py +++ b/v2/ansible/plugins/action/patch.py @@ -34,7 +34,7 @@ def run(self, tmp=None, task_vars=dict()): if src is None: return dict(failed=True, msg="src is required") elif remote_src: - # everyting is remote, so we just execute the module + # everything is remote, so we just execute the module # without changing any of the module arguments return self._execute_module() diff --git a/v2/ansible/plugins/connections/libvirt_lxc.py b/v2/ansible/plugins/connections/libvirt_lxc.py index 392436073b78ee..1905eb6a665e31 100644 --- a/v2/ansible/plugins/connections/libvirt_lxc.py +++ b/v2/ansible/plugins/connections/libvirt_lxc.py @@ -78,7 +78,7 @@ def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/ if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We ignore privelege escalation! + # We ignore privilege escalation! local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.lxc) diff --git a/v2/ansible/plugins/connections/zone.py b/v2/ansible/plugins/connections/zone.py index a4f8c1a027c231..f7e19c3bb4471f 100644 --- a/v2/ansible/plugins/connections/zone.py +++ b/v2/ansible/plugins/connections/zone.py @@ -111,7 +111,7 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We happily ignore privelege escalation + # We happily ignore privilege escalation if executable == '/bin/sh': executable = None local_cmd = self._generate_cmd(executable, cmd) diff --git a/v2/ansible/plugins/lookup/url.py b/v2/ansible/plugins/lookup/url.py index 4361b1192d20f3..9f1a89f772ce54 100644 --- a/v2/ansible/plugins/lookup/url.py +++ b/v2/ansible/plugins/lookup/url.py @@ -37,7 +37,7 @@ def run(self, terms, inject=None, **kwargs): utils.warnings("Failed lookup url for %s : %s" % (term, str(e))) continue except HTTPError as e: - utils.warnings("Recieved HTTP error for %s : %s" % (term, str(e))) + utils.warnings("Received HTTP error for %s : %s" % (term, str(e))) continue for line in response.read().splitlines(): diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py index 6c41ad3cf40697..3e61028d8d0b5c 100644 --- a/v2/ansible/template/__init__.py +++ b/v2/ansible/template/__init__.py @@ -138,7 +138,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single - # var. In this case we don't wont to accidentally change the type of the variable + # var. In this case we don't want to accidentally change the type of the variable # to a string by using the jinja template renderer. We just want to pass it. only_one = SINGLE_VAR.match(variable) if only_one: diff --git a/v2/hacking/module_formatter.py b/v2/hacking/module_formatter.py index 7ff081c31341e9..e70eb982de041f 100755 --- a/v2/hacking/module_formatter.py +++ b/v2/hacking/module_formatter.py @@ -384,7 +384,7 @@ def process_category(category, categories, options, env, template, outputname): category_file.write("""\n\n .. note:: - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. - - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not neccessarily) less activity maintained than 'core' modules. + - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less activity maintained than 'core' modules. - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_ """ % (DEPRECATED, NOTCORE)) category_file.close() From 6a8062baad3d62613d054d6159b3bd2e2b3aad56 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 28 Apr 2015 10:16:14 -0400 Subject: [PATCH 0438/3617] accidentally 'fixes' ubuntu distribution parsing, this order should not matter, need followup to figure out why this is the case --- lib/ansible/module_utils/facts.py | 4 ++-- v2/ansible/module_utils/facts.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4689dd2da9e907..125dbee1411cb6 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -99,8 +99,8 @@ class Facts(object): ('/etc/os-release', 'SuSE'), ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), - ('/etc/lsb-release', 'Mandriva'), - ('/etc/os-release', 'NA') ) + ('/etc/os-release', 'NA'), + ('/etc/lsb-release', 'Mandriva')) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one diff --git a/v2/ansible/module_utils/facts.py b/v2/ansible/module_utils/facts.py index 4689dd2da9e907..125dbee1411cb6 100644 --- a/v2/ansible/module_utils/facts.py +++ b/v2/ansible/module_utils/facts.py @@ -99,8 +99,8 @@ class Facts(object): ('/etc/os-release', 'SuSE'), ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), - ('/etc/lsb-release', 'Mandriva'), - ('/etc/os-release', 'NA') ) + ('/etc/os-release', 'NA'), + ('/etc/lsb-release', 'Mandriva')) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one From 1bf5224f8210141b24f98c8e432ae28b6a9a6eb5 Mon Sep 17 00:00:00 2001 From: Devin Christensen Date: Wed, 26 Nov 2014 17:58:45 -0700 Subject: [PATCH 0439/3617] Enable writing plugins for jinja2 tests --- lib/ansible/constants.py | 1 + lib/ansible/runner/filter_plugins/core.py | 86 ------------- .../runner/filter_plugins/mathstuff.py | 8 -- lib/ansible/runner/test_plugins/__init__.py | 0 lib/ansible/runner/test_plugins/core.py | 113 ++++++++++++++++++ lib/ansible/runner/test_plugins/math.py | 36 ++++++ lib/ansible/utils/__init__.py | 6 +- lib/ansible/utils/plugins.py | 7 ++ lib/ansible/utils/template.py | 19 +++ v2/ansible/constants.py | 1 + v2/ansible/plugins/__init__.py | 7 ++ v2/ansible/template/__init__.py | 23 +++- v2/ansible/template/safe_eval.py | 8 +- 13 files changed, 216 insertions(+), 99 deletions(-) create mode 100644 lib/ansible/runner/test_plugins/__init__.py create mode 100644 lib/ansible/runner/test_plugins/core.py create mode 100644 lib/ansible/runner/test_plugins/math.py diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 089de5b7c5bf15..5dbb9e2383f0b9 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -156,6 +156,7 @@ def shell_expand_path(path): DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') +DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py index bdf45509c3a610..c527bc529f3b71 100644 --- a/lib/ansible/runner/filter_plugins/core.py +++ b/lib/ansible/runner/filter_plugins/core.py @@ -74,55 +74,6 @@ def to_nice_json(a, *args, **kw): return to_json(a, *args, **kw) return json.dumps(a, indent=4, sort_keys=True, *args, **kw) -def failed(*a, **kw): - ''' Test if task result yields failed ''' - item = a[0] - if type(item) != dict: - raise errors.AnsibleFilterError("|failed expects a dictionary") - rc = item.get('rc',0) - failed = item.get('failed',False) - if rc != 0 or failed: - return True - else: - return False - -def success(*a, **kw): - ''' Test if task result yields success ''' - return not failed(*a, **kw) - -def changed(*a, **kw): - ''' Test if task result yields changed ''' - item = a[0] - if type(item) != dict: - raise errors.AnsibleFilterError("|changed expects a dictionary") - if not 'changed' in item: - changed = False - if ('results' in item # some modules return a 'results' key - and type(item['results']) == list - and type(item['results'][0]) == dict): - for result in item['results']: - changed = changed or result.get('changed', False) - else: - changed = item.get('changed', False) - return changed - -def skipped(*a, **kw): - ''' Test if task result yields skipped ''' - item = a[0] - if type(item) != dict: - raise errors.AnsibleFilterError("|skipped expects a dictionary") - skipped = item.get('skipped', False) - return skipped - -def mandatory(a): - ''' Make a variable mandatory ''' - try: - a - except NameError: - raise errors.AnsibleFilterError('Mandatory variable not defined.') - else: - return a - def bool(a): ''' return a bool for the arg ''' if a is None or type(a) == bool: @@ -142,27 +93,6 @@ def fileglob(pathname): ''' return list of matched files for glob ''' return glob.glob(pathname) -def regex(value='', pattern='', ignorecase=False, match_type='search'): - ''' Expose `re` as a boolean filter using the `search` method by default. - This is likely only useful for `search` and `match` which already - have their own filters. - ''' - if ignorecase: - flags = re.I - else: - flags = 0 - _re = re.compile(pattern, flags=flags) - _bool = __builtins__.get('bool') - return _bool(getattr(_re, match_type, 'search')(value)) - -def match(value, pattern='', ignorecase=False): - ''' Perform a `re.match` returning a boolean ''' - return regex(value, pattern, ignorecase, 'match') - -def search(value, pattern='', ignorecase=False): - ''' Perform a `re.search` returning a boolean ''' - return regex(value, pattern, ignorecase, 'search') - def regex_replace(value='', pattern='', replacement='', ignorecase=False): ''' Perform a `re.sub` returning a string ''' @@ -299,19 +229,6 @@ def filters(self): 'realpath': partial(unicode_wrap, os.path.realpath), 'relpath': partial(unicode_wrap, os.path.relpath), - # failure testing - 'failed' : failed, - 'success' : success, - - # changed testing - 'changed' : changed, - - # skip testing - 'skipped' : skipped, - - # variable existence - 'mandatory': mandatory, - # value as boolean 'bool': bool, @@ -333,9 +250,6 @@ def filters(self): 'fileglob': fileglob, # regex - 'match': match, - 'search': search, - 'regex': regex, 'regex_replace': regex_replace, # ? : ; diff --git a/lib/ansible/runner/filter_plugins/mathstuff.py b/lib/ansible/runner/filter_plugins/mathstuff.py index c6a49485a40bfd..a841c6e457c296 100644 --- a/lib/ansible/runner/filter_plugins/mathstuff.py +++ b/lib/ansible/runner/filter_plugins/mathstuff.py @@ -67,13 +67,6 @@ def max(a): _max = __builtins__.get('max') return _max(a); -def isnotanumber(x): - try: - return math.isnan(x) - except TypeError: - return False - - def logarithm(x, base=math.e): try: if base == 10: @@ -107,7 +100,6 @@ class FilterModule(object): def filters(self): return { # general math - 'isnan': isnotanumber, 'min' : min, 'max' : max, diff --git a/lib/ansible/runner/test_plugins/__init__.py b/lib/ansible/runner/test_plugins/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/lib/ansible/runner/test_plugins/core.py b/lib/ansible/runner/test_plugins/core.py new file mode 100644 index 00000000000000..cc8c702d7547c1 --- /dev/null +++ b/lib/ansible/runner/test_plugins/core.py @@ -0,0 +1,113 @@ +# (c) 2012, Jeroen Hoekx +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import re +from ansible import errors + +def failed(*a, **kw): + ''' Test if task result yields failed ''' + item = a[0] + if type(item) != dict: + raise errors.AnsibleFilterError("|failed expects a dictionary") + rc = item.get('rc',0) + failed = item.get('failed',False) + if rc != 0 or failed: + return True + else: + return False + +def success(*a, **kw): + ''' Test if task result yields success ''' + return not failed(*a, **kw) + +def changed(*a, **kw): + ''' Test if task result yields changed ''' + item = a[0] + if type(item) != dict: + raise errors.AnsibleFilterError("|changed expects a dictionary") + if not 'changed' in item: + changed = False + if ('results' in item # some modules return a 'results' key + and type(item['results']) == list + and type(item['results'][0]) == dict): + for result in item['results']: + changed = changed or result.get('changed', False) + else: + changed = item.get('changed', False) + return changed + +def skipped(*a, **kw): + ''' Test if task result yields skipped ''' + item = a[0] + if type(item) != dict: + raise errors.AnsibleFilterError("|skipped expects a dictionary") + skipped = item.get('skipped', False) + return skipped + +def mandatory(a): + ''' Make a variable mandatory ''' + try: + a + except NameError: + raise errors.AnsibleFilterError('Mandatory variable not defined.') + else: + return a + +def regex(value='', pattern='', ignorecase=False, match_type='search'): + ''' Expose `re` as a boolean filter using the `search` method by default. + This is likely only useful for `search` and `match` which already + have their own filters. + ''' + if ignorecase: + flags = re.I + else: + flags = 0 + _re = re.compile(pattern, flags=flags) + _bool = __builtins__.get('bool') + return _bool(getattr(_re, match_type, 'search')(value)) + +def match(value, pattern='', ignorecase=False): + ''' Perform a `re.match` returning a boolean ''' + return regex(value, pattern, ignorecase, 'match') + +def search(value, pattern='', ignorecase=False): + ''' Perform a `re.search` returning a boolean ''' + return regex(value, pattern, ignorecase, 'search') + +class TestModule(object): + ''' Ansible core jinja2 tests ''' + + def tests(self): + return { + # failure testing + 'failed' : failed, + 'success' : success, + + # changed testing + 'changed' : changed, + + # skip testing + 'skipped' : skipped, + + # variable existence + 'mandatory': mandatory, + + # regex + 'match': match, + 'search': search, + 'regex': regex, + } diff --git a/lib/ansible/runner/test_plugins/math.py b/lib/ansible/runner/test_plugins/math.py new file mode 100644 index 00000000000000..3ac871c43575d4 --- /dev/null +++ b/lib/ansible/runner/test_plugins/math.py @@ -0,0 +1,36 @@ +# (c) 2014, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import + +import math +from ansible import errors + +def isnotanumber(x): + try: + return math.isnan(x) + except TypeError: + return False + +class TestModule(object): + ''' Ansible math jinja2 tests ''' + + def tests(self): + return { + # general math + 'isnan': isnotanumber, + } diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 7ed07a54c840d3..17790d63c59b7a 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1403,7 +1403,11 @@ def safe_eval(expr, locals={}, include_exceptions=False): for filter in filter_loader.all(): filter_list.extend(filter.filters().keys()) - CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list = [] + for test in test_loader.all(): + test_list.extend(test.tests().keys()) + + CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list class CleansingNodeVisitor(ast.NodeVisitor): def generic_visit(self, node, inside_call=False): diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/utils/plugins.py index 14953d8f44a128..c50ebcb9ce7102 100644 --- a/lib/ansible/utils/plugins.py +++ b/lib/ansible/utils/plugins.py @@ -296,6 +296,13 @@ def all(self, *args, **kwargs): 'filter_plugins' ) +test_loader = PluginLoader( + 'TestModule', + 'ansible.runner.test_plugins', + C.DEFAULT_TEST_PLUGIN_PATH, + 'test_plugins' +) + fragment_loader = PluginLoader( 'ModuleDocFragment', 'ansible.utils.module_docs_fragments', diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py index 5f712b2675ea9e..043ad0c4192a0e 100644 --- a/lib/ansible/utils/template.py +++ b/lib/ansible/utils/template.py @@ -39,6 +39,7 @@ class Globals(object): FILTERS = None + TESTS = None def __init__(self): pass @@ -54,10 +55,26 @@ def _get_filters(): filters = {} for fp in plugins: filters.update(fp.filters()) + filters.update(_get_tests()) Globals.FILTERS = filters return Globals.FILTERS +def _get_tests(): + ''' return test plugin instances ''' + + if Globals.TESTS is not None: + return Globals.TESTS + + from ansible import utils + plugins = [ x for x in utils.plugins.test_loader.all()] + tests = {} + for tp in plugins: + tests.update(tp.tests()) + Globals.TESTS = tests + + return Globals.TESTS + def _get_extensions(): ''' return jinja2 extensions to load ''' @@ -237,6 +254,7 @@ def my_finalize(thing): environment = jinja2.Environment(loader=loader, trim_blocks=True, extensions=_get_extensions()) environment.filters.update(_get_filters()) + environment.tests.update(_get_tests()) environment.globals['lookup'] = my_lookup environment.globals['finalize'] = my_finalize if fail_on_undefined: @@ -351,6 +369,7 @@ def my_finalize(thing): environment = jinja2.Environment(trim_blocks=True, undefined=StrictUndefined, extensions=_get_extensions(), finalize=my_finalize) environment.filters.update(_get_filters()) + environment.tests.update(_get_tests()) environment.template_class = J2Template if '_original_file' in vars: diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 913df310c15446..2fbb4d39c531f7 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -162,6 +162,7 @@ def shell_expand_path(path): DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') +DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test_plugins:/usr/share/ansible_plugins/test_plugins') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index d16eecd3c39921..1c445c3f5a1674 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -311,6 +311,13 @@ def all(self, *args, **kwargs): 'filter_plugins' ) +test_loader = PluginLoader( + 'TestModule', + 'ansible.plugins.test', + C.DEFAULT_TEST_PLUGIN_PATH, + 'test_plugins' +) + fragment_loader = PluginLoader( 'ModuleDocFragment', 'ansible.utils.module_docs_fragments', diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py index 6c41ad3cf40697..9e15bb3bd82d11 100644 --- a/v2/ansible/template/__init__.py +++ b/v2/ansible/template/__init__.py @@ -28,7 +28,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable -from ansible.plugins import filter_loader, lookup_loader +from ansible.plugins import filter_loader, lookup_loader, test_loader from ansible.template.safe_eval import safe_eval from ansible.template.template import AnsibleJ2Template from ansible.template.vars import AnsibleJ2Vars @@ -57,6 +57,7 @@ def __init__(self, loader, variables=dict(), fail_on_undefined=C.DEFAULT_UNDEFIN self._loader = loader self._basedir = loader.get_basedir() self._filters = None + self._tests = None self._available_variables = variables # flags to determine whether certain failures during templating @@ -93,11 +94,28 @@ def _get_filters(self): self._filters = dict() for fp in plugins: self._filters.update(fp.filters()) + self._filters.update(self._get_tests()) return self._filters.copy() + def _get_tests(self): + ''' + Returns tests plugins, after loading and caching them if need be + ''' + + if self._tests is not None: + return self._tests.copy() + + plugins = [x for x in test_loader.all()] + + self._tests = dict() + for fp in plugins: + self._tests.update(fp.tests()) + + return self._tests.copy() + def _get_extensions(self): - ''' + ''' Return jinja2 extensions to load. If some extensions are set via jinja_extensions in ansible.cfg, we try @@ -229,6 +247,7 @@ def _do_template(self, data, preserve_trailing_newlines=False): environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize) environment.filters.update(self._get_filters()) + environment.tests.update(self._get_tests()) environment.template_class = AnsibleJ2Template # FIXME: may not be required anymore, as the basedir stuff will diff --git a/v2/ansible/template/safe_eval.py b/v2/ansible/template/safe_eval.py index 268994950443d4..5e2d1e1fe38cfc 100644 --- a/v2/ansible/template/safe_eval.py +++ b/v2/ansible/template/safe_eval.py @@ -23,7 +23,7 @@ from six.moves import builtins from ansible import constants as C -from ansible.plugins import filter_loader +from ansible.plugins import filter_loader, test_loader def safe_eval(expr, locals={}, include_exceptions=False): ''' @@ -77,7 +77,11 @@ def safe_eval(expr, locals={}, include_exceptions=False): for filter in filter_loader.all(): filter_list.extend(filter.filters().keys()) - CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list = [] + for test in test_loader.all(): + test_list.extend(test.tests().keys()) + + CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list class CleansingNodeVisitor(ast.NodeVisitor): def generic_visit(self, node, inside_call=False): From bf003d31e0298023e9e46096c081dcd6ed54eb03 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Apr 2015 08:57:09 -0700 Subject: [PATCH 0440/3617] Not a full port to v2's api, just a few fixups --- v2/ansible/plugins/connections/accelerate.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/v2/ansible/plugins/connections/accelerate.py b/v2/ansible/plugins/connections/accelerate.py index 1095ed049c8858..d0bd5ad3d1efa3 100644 --- a/v2/ansible/plugins/connections/accelerate.py +++ b/v2/ansible/plugins/connections/accelerate.py @@ -26,8 +26,9 @@ import time from ansible.callbacks import vvv, vvvv from ansible.errors import AnsibleError, AnsibleFileNotFound -from ansible.runner.connection_plugins.ssh import Connection as SSHConnection -from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection +from . import ConnectionBase +from .ssh import Connection as SSHConnection +from .paramiko_ssh import Connection as ParamikoConnection from ansible import utils from ansible import constants @@ -38,7 +39,7 @@ # multiple of the value to speed up file reads. CHUNK_SIZE=1044*20 -class Connection(object): +class Connection(ConnectionBase): ''' raw socket accelerated connection ''' def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs): @@ -91,6 +92,11 @@ def __init__(self, runner, host, port, user, password, private_key_file, *args, if getattr(self.runner, 'aes_keys', None): utils.AES_KEYS = self.runner.aes_keys + @property + def transport(self): + """String used to identify this Connection class from other classes""" + return 'accelerate' + def _execute_accelerate_module(self): args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % ( base64.b64encode(self.key.__str__()), From 50da8812d6431a16c213587b9c39787b3d6357fd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 27 Apr 2015 10:50:08 -0700 Subject: [PATCH 0441/3617] Fix up connection plugin test for new_stdin parameter --- v2/test/plugins/test_connection.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/v2/test/plugins/test_connection.py b/v2/test/plugins/test_connection.py index bf78a08c89d534..0ed888ac95d7c4 100644 --- a/v2/test/plugins/test_connection.py +++ b/v2/test/plugins/test_connection.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from six import StringIO + from ansible.compat.tests import unittest from ansible.executor.connection_info import ConnectionInformation @@ -29,7 +31,7 @@ #from ansible.plugins.connections.jail import Connection as JailConnection #from ansible.plugins.connections.libvirt_lxc import Connection as LibvirtLXCConnection from ansible.plugins.connections.local import Connection as LocalConnection -#from ansible.plugins.connections.paramiko_ssh import Connection as ParamikoConnection +from ansible.plugins.connections.paramiko_ssh import Connection as ParamikoConnection from ansible.plugins.connections.ssh import Connection as SSHConnection #from ansible.plugins.connections.winrm import Connection as WinRmConnection @@ -37,6 +39,7 @@ class TestConnectionBaseClass(unittest.TestCase): def setUp(self): self.conn_info = ConnectionInformation() + self.in_stream = StringIO() def tearDown(self): pass @@ -69,7 +72,7 @@ def fetch_file(self): pass def close(self): pass - self.assertIsInstance(ConnectionModule3(self.conn_info), ConnectionModule3) + self.assertIsInstance(ConnectionModule3(self.conn_info, self.in_stream), ConnectionModule3) # def test_accelerate_connection_module(self): # self.assertIsInstance(AccelerateConnection(), AccelerateConnection) @@ -87,13 +90,13 @@ def close(self): # self.assertIsInstance(LibvirtLXCConnection(), LibvirtLXCConnection) def test_local_connection_module(self): - self.assertIsInstance(LocalConnection(self.conn_info), LocalConnection) + self.assertIsInstance(LocalConnection(self.conn_info, self.in_stream), LocalConnection) -# def test_paramiko_connection_module(self): -# self.assertIsInstance(ParamikoConnection(self.conn_info), ParamikoConnection) + def test_paramiko_connection_module(self): + self.assertIsInstance(ParamikoConnection(self.conn_info, self.in_stream), ParamikoConnection) def test_ssh_connection_module(self): - self.assertIsInstance(SSHConnection(self.conn_info), SSHConnection) + self.assertIsInstance(SSHConnection(self.conn_info, self.in_stream), SSHConnection) # def test_winrm_connection_module(self): # self.assertIsInstance(WinRmConnection(), WinRmConnection) From 223c2a27216e414a707a60d722f8be6171a9dae1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Apr 2015 10:44:43 -0700 Subject: [PATCH 0442/3617] Update submodules --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- v2/ansible/modules/extras | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a19fa6ba48bf09..e95c0b2df33cf8 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a19fa6ba48bf092b574eb6ee40f38f06500d767d +Subproject commit e95c0b2df33cf84c517366b9a674454447ce6c3a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index df7fcc90d9a179..bef4eee0aa33d5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit df7fcc90d9a17956ec156066e8fc31e5ed8106e6 +Subproject commit bef4eee0aa33d555381bb14946ce9b5c9faefb7b diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras index df7fcc90d9a179..bef4eee0aa33d5 160000 --- a/v2/ansible/modules/extras +++ b/v2/ansible/modules/extras @@ -1 +1 @@ -Subproject commit df7fcc90d9a17956ec156066e8fc31e5ed8106e6 +Subproject commit bef4eee0aa33d555381bb14946ce9b5c9faefb7b From 39650efc38558a4819c04f8ce3e99536386e092a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Apr 2015 11:11:49 -0700 Subject: [PATCH 0443/3617] Fix title underline for rst --- docsite/rst/become.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 83f8ce1bb8a759..42484d9816afd8 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -44,7 +44,7 @@ ansible_become_pass New command line options ------------------------ +------------------------ --ask-become-pass ask for privilege escalation password From 8b620640b04049226f8a36664c821437d3039bc1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Apr 2015 11:13:29 -0700 Subject: [PATCH 0444/3617] Update extras submodule refs to pick up docs fixes --- lib/ansible/modules/extras | 2 +- v2/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index bef4eee0aa33d5..764a0e26b6df02 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit bef4eee0aa33d555381bb14946ce9b5c9faefb7b +Subproject commit 764a0e26b6df02cf2924254589a065918b6ca5d6 diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras index bef4eee0aa33d5..764a0e26b6df02 160000 --- a/v2/ansible/modules/extras +++ b/v2/ansible/modules/extras @@ -1 +1 @@ -Subproject commit bef4eee0aa33d555381bb14946ce9b5c9faefb7b +Subproject commit 764a0e26b6df02cf2924254589a065918b6ca5d6 From 47c3d75c3cac67875e6711e992a3d95c4351cad3 Mon Sep 17 00:00:00 2001 From: Jeff Bachtel Date: Tue, 28 Apr 2015 14:17:53 -0400 Subject: [PATCH 0445/3617] Add test for https://github.com/ansible/ansible/issues/9851 --- .../roles/test_filters/files/9851.txt | 3 +++ .../roles/test_filters/tasks/main.yml | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 test/integration/roles/test_filters/files/9851.txt diff --git a/test/integration/roles/test_filters/files/9851.txt b/test/integration/roles/test_filters/files/9851.txt new file mode 100644 index 00000000000000..70b12793e1394d --- /dev/null +++ b/test/integration/roles/test_filters/files/9851.txt @@ -0,0 +1,3 @@ + [{ + "k": "Quotes \"'\n" +}] diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index 3d1ee322e30e96..c4872b50375981 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -25,6 +25,25 @@ - name: Verify that we workaround a py26 json bug template: src=py26json.j2 dest={{output_dir}}/py26json.templated mode=0644 +- name: 9851 - Verify that we don't trigger https://github.com/ansible/ansible/issues/9851 + copy: + content: " [{{item|to_nice_json}}]" + dest: "{{output_dir}}/9851.out" + with_items: + - {"k": "Quotes \"'\n"} + +- name: 9851 - copy known good output into place + copy: src=9851.txt dest={{output_dir}}/9851.txt + +- name: 9851 - Compare generated json to known good + shell: diff {{output_dir}}/9851.out {{output_dir}}/9851.txt + register: 9851_diff_result + +- name: 9851 - verify generated file matches known good + assert: + that: + - '9851_diff_result.stdout == ""' + - name: fill in a basic template template: src=foo.j2 dest={{output_dir}}/foo.templated mode=0644 register: template_result From 2bf95aaa2d33ee9a1d95bc5c84dd39ccfc62a956 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 28 Apr 2015 14:18:57 -0400 Subject: [PATCH 0446/3617] fixed default become user to be 'root' --- v2/ansible/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 913df310c15446..eaca382a98e5d2 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -147,7 +147,7 @@ def shell_expand_path(path): BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() -DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', None) +DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # need to rethink impementing these 2 DEFAULT_BECOME_EXE = None From 84fe6655d109396c629b1219c58b7bbc681c8155 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 28 Apr 2015 13:26:05 -0500 Subject: [PATCH 0447/3617] Fixing option order in connection info (v2) --- v2/ansible/executor/connection_info.py | 7 ++----- v2/samples/test_sudo.yml | 7 +++++++ 2 files changed, 9 insertions(+), 5 deletions(-) create mode 100644 v2/samples/test_sudo.yml diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 05fd5e8784cffe..7c9c9892ba5dcd 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -65,14 +65,13 @@ def __init__(self, play=None, options=None, passwords=None): self.no_log = False self.check_mode = False - if play: - self.set_play(play) - #TODO: just pull options setup to above? # set options before play to allow play to override them if options: self.set_options(options) + if play: + self.set_play(play) def __repr__(self): value = "CONNECTION INFO:\n" @@ -136,8 +135,6 @@ def set_options(self, options): if options.check: self.check_mode = boolean(options.check) - - # get the tag info from options, converting a comma-separated list # of values into a proper list if need be. We check to see if the # options have the attribute, as it is not always added via the CLI diff --git a/v2/samples/test_sudo.yml b/v2/samples/test_sudo.yml new file mode 100644 index 00000000000000..b8f7e168d07352 --- /dev/null +++ b/v2/samples/test_sudo.yml @@ -0,0 +1,7 @@ +- hosts: ubuntu1404 + gather_facts: no + remote_user: testing + tasks: + - command: whoami + - apt: update_cache=yes + sudo: yes From dc12669c405e91e5545b3d6d2b7e044d6440425f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Apr 2015 11:41:58 -0700 Subject: [PATCH 0448/3617] Another test case for testing splitter parsing --- v2/test/parsing/test_splitter.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/v2/test/parsing/test_splitter.py b/v2/test/parsing/test_splitter.py index fc2c05d36fb1f0..1f648c8f6a8a8f 100644 --- a/v2/test/parsing/test_splitter.py +++ b/v2/test/parsing/test_splitter.py @@ -84,6 +84,9 @@ class TestSplitter_Gen: (u'a={{jinja}} b={{jinja2}}', [u'a={{jinja}}', u'b={{jinja2}}'], {u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}), + (u'a="{{jinja}}\n" b="{{jinja2}}\n"', + [u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'], + {u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}), (u'a="café eñyei"', [u'a="café eñyei"'], {u'a': u'café eñyei'}), From 38465283669829a4b9255976a889d9d7aef093bc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 28 Apr 2015 16:38:53 -0400 Subject: [PATCH 0449/3617] clarify role spec, dependencies and galaxy involvment. fixes #10832 --- docsite/rst/playbooks_roles.rst | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 3ffabe835d3828..b46474a89a26ba 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -301,12 +301,8 @@ Role dependencies can also be specified as a full path, just like top level role dependencies: - { role: '/path/to/common/roles/foo', x: 1 } -Role dependencies can also be installed from source control repos or tar files, using a comma separated format of path, an optional version (tag, commit, branch etc) and optional friendly role name (an attempt is made to derive a role name from the repo name or archive filename):: +Role dependencies can also be installed from source control repos or tar files (via `galaxy`) using comma separated format of path, an optional version (tag, commit, branch etc) and optional friendly role name (an attempt is made to derive a role name from the repo name or archive filename). Both through the command line or via a requirements.yml passed to ansible-galaxy. - --- - dependencies: - - { role: 'git+http://git.example.com/repos/role-foo,v1.1,foo' } - - { role: '/path/to/tar/file.tgz,,friendly-name' } Roles dependencies are always executed before the role that includes them, and are recursive. By default, roles can also only be added as a dependency once - if another role also lists it as a dependency it will From cf3f7b0043bed07415b6fab9578894a91cdf75b4 Mon Sep 17 00:00:00 2001 From: Daniel Farrell Date: Tue, 28 Apr 2015 18:24:01 -0400 Subject: [PATCH 0450/3617] Correct minor grammar error in Playbook intro docs Signed-off-by: Daniel Farrell --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index a27285b4a9ff18..3899502ed475cf 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -148,7 +148,7 @@ Remote users can also be defined per task:: The `remote_user` parameter for tasks was added in 1.4. -Support for running things from as another user is also available (see :doc:`become`):: +Support for running things as another user is also available (see :doc:`become`):: --- - hosts: webservers From 4bb37b82c4f97a586ed0932d423d622bae1515c0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 29 Apr 2015 01:06:33 -0500 Subject: [PATCH 0451/3617] Fix duplicate callback issue in v2 All v2+ callbacks can now optionally define a CALLBACK_TYPE, which when set to 'stdout' will limit those callbacks which are used for primary output to a single callback plugin (specified to the TaskQueueManager object and configurable in ansible.cfg/environment) --- v2/ansible/constants.py | 1 + v2/ansible/executor/playbook_executor.py | 2 +- v2/ansible/executor/task_queue_manager.py | 47 ++++++++++++++++++----- v2/ansible/plugins/__init__.py | 7 +++- v2/ansible/plugins/callback/default.py | 1 + v2/ansible/plugins/callback/minimal.py | 1 + v2/bin/ansible | 2 +- 7 files changed, 48 insertions(+), 13 deletions(-) diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index eaca382a98e5d2..09935693ace2f8 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -162,6 +162,7 @@ def shell_expand_path(path): DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') +DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 6f0bf31f3374a8..777587f7536f3a 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -48,7 +48,7 @@ def __init__(self, playbooks, inventory, variable_manager, loader, display, opti if options.listhosts or options.listtasks or options.listtags: self._tqm = None else: - self._tqm = TaskQueueManager(inventory=inventory, callback='default', variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords) + self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords) def run(self): diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index e13930c6df8414..5f09e7ff8a89ab 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -24,6 +24,7 @@ import socket import sys +from ansible import constants as C from ansible.errors import AnsibleError from ansible.executor.connection_info import ConnectionInformation from ansible.executor.play_iterator import PlayIterator @@ -48,7 +49,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, callback, variable_manager, loader, display, options, passwords): + def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None): self._inventory = inventory self._variable_manager = variable_manager @@ -70,14 +71,8 @@ def __init__(self, inventory, callback, variable_manager, loader, display, optio self._final_q = multiprocessing.Queue() - # load all available callback plugins - # FIXME: we need an option to white-list callback plugins - self._callback_plugins = [] - for callback_plugin in callback_loader.all(class_only=True): - if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: - self._callback_plugins.append(callback_plugin(self._display)) - else: - self._callback_plugins.append(callback_plugin()) + # load callback plugins + self._callback_plugins = self._load_callbacks(stdout_callback) # create the pool of worker threads, based on the number of forks specified try: @@ -120,6 +115,40 @@ def _initialize_notified_handlers(self, handlers): for handler in handler_list: self._notified_handlers[handler.get_name()] = [] + def _load_callbacks(self, stdout_callback): + ''' + Loads all available callbacks, with the exception of those which + utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout', + only one such callback plugin will be loaded. + ''' + + loaded_plugins = [] + + stdout_callback_loaded = False + if stdout_callback is None: + stdout_callback = C.DEFAULT_STDOUT_CALLBACK + + if stdout_callback not in callback_loader: + raise AnsibleError("Invalid callback for stdout specified: %s" % stdout_callback) + + for callback_plugin in callback_loader.all(class_only=True): + if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: + # we only allow one callback of type 'stdout' to be loaded, so check + # the name of the current plugin and type to see if we need to skip + # loading this callback plugin + callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None) + (callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path)) + if callback_type == 'stdout': + if callback_name != stdout_callback or stdout_callback_loaded: + continue + stdout_callback_loaded = True + + loaded_plugins.append(callback_plugin(self._display)) + else: + loaded_plugins.append(callback_plugin()) + + return loaded_plugins + def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index d16eecd3c39921..f81f8c9d387b16 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -243,9 +243,12 @@ def all(self, *args, **kwargs): if path not in self._module_cache: self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) if kwargs.get('class_only', False): - yield getattr(self._module_cache[path], self.class_name) + obj = getattr(self._module_cache[path], self.class_name) else: - yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + # set extra info on the module, in case we want it later + setattr(obj, '_original_path', path) + yield obj action_loader = PluginLoader( 'ActionModule', diff --git a/v2/ansible/plugins/callback/default.py b/v2/ansible/plugins/callback/default.py index bb87dc4a942c41..262303dc57049e 100644 --- a/v2/ansible/plugins/callback/default.py +++ b/v2/ansible/plugins/callback/default.py @@ -31,6 +31,7 @@ class CallbackModule(CallbackBase): ''' CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' def v2_on_any(self, *args, **kwargs): pass diff --git a/v2/ansible/plugins/callback/minimal.py b/v2/ansible/plugins/callback/minimal.py index 95dfaee87850c2..4e9c8fffd2d35d 100644 --- a/v2/ansible/plugins/callback/minimal.py +++ b/v2/ansible/plugins/callback/minimal.py @@ -32,6 +32,7 @@ class CallbackModule(CallbackBase): ''' CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' def v2_on_any(self, *args, **kwargs): pass diff --git a/v2/bin/ansible b/v2/bin/ansible index d269790983ea22..8966b4bc65fa72 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -150,7 +150,7 @@ class Cli(object): # now create a task queue manager to execute the play try: display = Display() - tqm = TaskQueueManager(inventory=inventory, callback='minimal', variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords) + tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords, stdout_callback='minimal') result = tqm.run(play) tqm.cleanup() except AnsibleError: From 288fe1179a827e0457f36d3b465d5e12bd48162d Mon Sep 17 00:00:00 2001 From: jaypei Date: Wed, 29 Apr 2015 19:39:39 +0800 Subject: [PATCH 0452/3617] Add lineinfile integration tests for quoted string Reference #10864 --- .../roles/test_lineinfile/tasks/main.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index d809bf1983ea61..0c018ccaa59419 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -355,4 +355,22 @@ that: - "result.stat.checksum == '73b271c2cc1cef5663713bc0f00444b4bf9f4543'" +- name: insert a line into the quoted file with many double quotation strings + lineinfile: dest={{output_dir}}/test_quoting.txt line="\"quote\" and \"unquote\"" + register: result + +- name: assert that the quoted file was changed + assert: + that: + - result.changed + +- name: stat the quote test file + stat: path={{output_dir}}/test_quoting.txt + register: result + +- name: assert test checksum matches after backref line was replaced + assert: + that: + - "result.stat.checksum == 'b10ab2a3c3b6492680c8d0b1d6f35aa6b8f9e731'" + ################################################################### From b08e35bb8a4729993d22e97ba967f2bac21513e4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 29 Apr 2015 09:47:11 -0500 Subject: [PATCH 0453/3617] Fixing tag logic in v2 --- v2/ansible/playbook/block.py | 11 ----------- v2/ansible/playbook/taggable.py | 9 +++++++++ v2/ansible/playbook/task.py | 6 ------ 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index f8fc683694074f..b80deec6ed1e22 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -235,17 +235,6 @@ def evaluate_conditional(self, all_vars): return False return super(Block, self).evaluate_conditional(all_vars) - def evaluate_tags(self, only_tags, skip_tags, all_vars): - result = False - if len(self._dep_chain): - for dep in self._dep_chain: - result |= dep.evaluate_tags(only_tags=only_tags, skip_tags=skip_tags, all_vars=all_vars) - if self._parent_block is not None: - result |= self._parent_block.evaluate_tags(only_tags=only_tags, skip_tags=skip_tags, all_vars=all_vars) - elif self._role is not None: - result |= self._role.evaluate_tags(only_tags=only_tags, skip_tags=skip_tags, all_vars=all_vars) - return result | super(Block, self).evaluate_tags(only_tags=only_tags, skip_tags=skip_tags, all_vars=all_vars) - def set_loader(self, loader): self._loader = loader if self._parent_block: diff --git a/v2/ansible/playbook/taggable.py b/v2/ansible/playbook/taggable.py index ce1bdfcf8a7ff3..f721cd195f4cd5 100644 --- a/v2/ansible/playbook/taggable.py +++ b/v2/ansible/playbook/taggable.py @@ -39,6 +39,15 @@ def _load_tags(self, attr, ds): else: raise AnsibleError('tags must be specified as a list', obj=ds) + def _get_attr_tags(self): + ''' + Override for the 'tags' getattr fetcher, used from Base. + ''' + tags = self._attributes['tags'] + if hasattr(self, '_get_parent_attribute'): + tags.extend(self._get_parent_attribute('tags')) + return list(set(tags)) + def evaluate_tags(self, only_tags, skip_tags, all_vars): ''' this checks if the current item should be executed depending on tag options ''' diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 2c92dd4674ae42..bdffc13eb8097d 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -285,12 +285,6 @@ def evaluate_conditional(self, all_vars): return False return super(Task, self).evaluate_conditional(all_vars) - def evaluate_tags(self, only_tags, skip_tags, all_vars): - result = False - if self._block is not None: - result |= self._block.evaluate_tags(only_tags=only_tags, skip_tags=skip_tags, all_vars=all_vars) - return result | super(Task, self).evaluate_tags(only_tags=only_tags, skip_tags=skip_tags, all_vars=all_vars) - def set_loader(self, loader): ''' Sets the loader on this object and recursively on parent, child objects. From a346507b26c608d019b345874019b9982a282176 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 16:20:29 -0400 Subject: [PATCH 0454/3617] added os_server_facts to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 202174c23a15be..6d50354a18f3a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ New Modules: * cloudstack: cs_securitygroup_rule * cloudstack: cs_vmsnapshot * maven_artifact + * openstack: os_server_facts * pushover * zabbix_host * zabbix_hostmacro From 1ff83b43ae321dcc08a6296c5a0dea4f64cdd7af Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 22:58:24 -0400 Subject: [PATCH 0455/3617] added error --- v2/ansible/utils/display.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index 0881627c4bf445..221c8bba699ccf 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -35,6 +35,7 @@ def __init__(self, verbosity=0): # list of all deprecation messages to prevent duplicate display self._deprecations = {} self._warns = {} + self._errors = {} def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False): msg2 = msg @@ -130,3 +131,12 @@ def banner(self, msg, color=None): star_len = 3 stars = "*" * star_len self.display("\n%s %s" % (msg, stars), color=color) + + def error(self, msg): + new_msg = "\n[ERROR]: %s" % msg + wrapped = textwrap.wrap(new_msg, 79) + new_msg = "\n".join(wrapped) + "\n" + if new_msg not in self._errors: + self.display(new_msg, color='bright red', stderr=True) + self._errors[new_msg] = 1 + From 532aefc2c87dcbfd601f7785c8e35ecee3c09fd4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 23:02:15 -0400 Subject: [PATCH 0456/3617] verbose is only to screen --- v2/ansible/utils/display.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index 221c8bba699ccf..4a41974d939da5 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -23,7 +23,7 @@ import sys from ansible import constants as C -from ansible.errors import * +from ansible.errors import AnsibleError from ansible.utils.color import stringc class Display: @@ -84,7 +84,7 @@ def verbose(self, msg, host=None, caplevel=2): if host is None: self.display(msg, color='blue') else: - self.display("<%s> %s" % (host, msg), color='blue') + self.display("<%s> %s" % (host, msg), color='blue', screen_only=True) def deprecated(self, msg, version, removed=False): ''' used to print out a deprecation message.''' From 522c3feab977a1a4d42d2ddc667a0a993d70edea Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 23:10:46 -0400 Subject: [PATCH 0457/3617] made error color red from bright red --- v2/ansible/utils/display.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/utils/display.py b/v2/ansible/utils/display.py index 4a41974d939da5..d5b6ad71a93fd1 100644 --- a/v2/ansible/utils/display.py +++ b/v2/ansible/utils/display.py @@ -137,6 +137,6 @@ def error(self, msg): wrapped = textwrap.wrap(new_msg, 79) new_msg = "\n".join(wrapped) + "\n" if new_msg not in self._errors: - self.display(new_msg, color='bright red', stderr=True) + self.display(new_msg, color='red', stderr=True) self._errors[new_msg] = 1 From 14fb4383f3679f7bfb885de1169a32d794430144 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 23:11:02 -0400 Subject: [PATCH 0458/3617] now uses display.error --- v2/bin/ansible | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/bin/ansible b/v2/bin/ansible index 8966b4bc65fa72..b4f651ffdaa1b9 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -183,8 +183,8 @@ if __name__ == '__main__': (options, args) = cli.parse() sys.exit(cli.run(options, args)) except AnsibleError as e: - display.display("[ERROR]: %s" % e, color='red', stderr=True) + display.error(str(e)) sys.exit(1) except KeyboardInterrupt: - display.display("[ERROR]: interrupted", color='red', stderr=True) + display.error("interrupted") sys.exit(1) From 9898522a00c9d436545183b443e8c2abae0d421e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 23:12:37 -0400 Subject: [PATCH 0459/3617] now all cli use display.error --- v2/bin/ansible-playbook | 4 ++-- v2/bin/ansible-vault | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index a182f629aaa507..d9247fef1c745f 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -191,8 +191,8 @@ if __name__ == "__main__": try: sys.exit(main(display, sys.argv[1:])) except AnsibleError as e: - display.display("[ERROR]: %s" % e, color='red', stderr=True) + display.error(str(e)) sys.exit(1) except KeyboardInterrupt: - display.display("[ERROR]: interrupted", color='red', stderr=True) + display.error("interrupted") sys.exit(1) diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault index 506402ee15f935..638d80ba9ed8ec 100755 --- a/v2/bin/ansible-vault +++ b/v2/bin/ansible-vault @@ -186,8 +186,8 @@ if __name__ == "__main__": (options, args) = cli.parse() sys.exit(cli.run(options, args)) except AnsibleError as e: - display.display("[ERROR]: %s" % e, color='red', stderr=True) + display.error(str(e)) sys.exit(1) except KeyboardInterrupt: - display.display("[ERROR]: interrupted", color='red', stderr=True) + display.error("interrupted") sys.exit(1) From 75b969e2d7d03834551bbfef04e3643284dc5ef7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 22 Apr 2015 23:41:05 -0400 Subject: [PATCH 0460/3617] initial galaxy port to v2 --- v2/ansible/constants.py | 5 + v2/ansible/galaxy/__init__.py | 48 ++ v2/ansible/galaxy/api.py | 139 +++++ v2/ansible/galaxy/data/metadata_template.j2 | 45 ++ v2/ansible/galaxy/data/readme | 38 ++ v2/ansible/galaxy/role.py | 290 ++++++++++ v2/bin/ansible-galaxy | 560 ++++++++++++++++++++ 7 files changed, 1125 insertions(+) create mode 100644 v2/ansible/galaxy/__init__.py create mode 100755 v2/ansible/galaxy/api.py create mode 100644 v2/ansible/galaxy/data/metadata_template.j2 create mode 100644 v2/ansible/galaxy/data/readme create mode 100644 v2/ansible/galaxy/role.py create mode 100755 v2/bin/ansible-galaxy diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 09935693ace2f8..12eb8db413b741 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -203,6 +203,11 @@ def shell_expand_path(path): ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) +# galaxy related +DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') +# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated +GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True) + # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" diff --git a/v2/ansible/galaxy/__init__.py b/v2/ansible/galaxy/__init__.py new file mode 100644 index 00000000000000..c3d37fe22e91e6 --- /dev/null +++ b/v2/ansible/galaxy/__init__.py @@ -0,0 +1,48 @@ +######################################################################## +# +# (C) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## +''' This manages remote shared Ansible objects, mainly roles''' + +import os + +from ansible.errors import AnsibleError +from ansible.utils.display import Display + +class Galaxy(object): + ''' Keeps global galaxy info ''' + + def __init__(self, options, display=None): + + if display is None: + self.display = Display() + else: + self.display = display + + self.options = options + self.roles_path = os.path.expanduser(self.options.roles_path) + + self.roles = {} + + def add_role(self, role): + self.roles[role.name] = role + + def remove_role(self, role_name): + del self.roles[role_name] + diff --git a/v2/ansible/galaxy/api.py b/v2/ansible/galaxy/api.py new file mode 100755 index 00000000000000..a9d1566e049bad --- /dev/null +++ b/v2/ansible/galaxy/api.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2013, James Cammarata +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## +import json +from urllib2 import urlopen, quote as urlquote +from urlparse import urlparse + +from ansible.errors import AnsibleError + +class GalaxyAPI(object): + ''' This class is meant to be used as a API client for an Ansible Galaxy server ''' + + SUPPORTED_VERSIONS = ['v1'] + + def __init__(self, galaxy, api_server): + + self.galaxy = galaxy + + try: + urlparse(api_server, scheme='https') + except: + raise AnsibleError("Invalid server API url passed: %s" % self.galaxy.api_server) + + server_version = self.get_server_api_version(api_server) + self.galaxy.display.vvvvv("Server version: %s" % server_version) + if server_version in self.SUPPORTED_VERSIONS: + self.baseurl = '%s/api/%s' % (api_server, server_version) + self.version = server_version # for future use + self.galaxy.display.vvvvv("Base API: %s" % self.baseurl) + else: + raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) + + def get_server_api_version(self, api_server): + """ + Fetches the Galaxy API current version to ensure + the API server is up and reachable. + """ + + try: + self.galaxy.display.vvvvv("Querying server version: %s" % api_server) + data = json.load(urlopen(api_server)) + if not data.get("current_version", None): + return None + else: + return data + except: + return None + + def lookup_role_by_name(self, role_name, notify=True): + """ + Find a role by name + """ + + role_name = urlquote(role_name) + + try: + parts = role_name.split(".") + user_name = ".".join(parts[0:-1]) + role_name = parts[-1] + if notify: + self.galaxy.display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) + except: + raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name) + + url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name) + try: + data = json.load(urlopen(url)) + if len(data["results"]) != 0: + return data["results"][0] + except: + # TODO: report on connection/availability errors + pass + + return None + + def fetch_role_related(self, related, role_id): + """ + Fetch the list of related items for the given role. + The url comes from the 'related' field of the role. + """ + + try: + url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related) + data = json.load(urlopen(url)) + results = data['results'] + done = (data.get('next', None) == None) + while not done: + url = '%s%s' % (self.baseurl, data['next']) + self.galaxy.display.display(url) + data = json.load(urlopen(url)) + results += data['results'] + done = (data.get('next', None) == None) + return results + except: + return None + + def get_list(self, what): + """ + Fetch the list of items specified. + """ + + try: + url = '%s/%s/?page_size' % (self.baseurl, what) + data = json.load(urlopen(url)) + if "results" in data: + results = data['results'] + else: + results = data + done = True + if "next" in data: + done = (data.get('next', None) == None) + while not done: + url = '%s%s' % (self.baseurl, data['next']) + self.galaxy.display.display(url) + data = json.load(urlopen(url)) + results += data['results'] + done = (data.get('next', None) == None) + return results + except Exception as error: + raise AnsibleError("Failed to download the %s list: %s" % (what, str(error))) diff --git a/v2/ansible/galaxy/data/metadata_template.j2 b/v2/ansible/galaxy/data/metadata_template.j2 new file mode 100644 index 00000000000000..328e13a814c480 --- /dev/null +++ b/v2/ansible/galaxy/data/metadata_template.j2 @@ -0,0 +1,45 @@ +galaxy_info: + author: {{ author }} + description: {{description}} + company: {{ company }} + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: {{ license }} + min_ansible_version: {{ min_ansible_version }} + # + # Below are all platforms currently available. Just uncomment + # the ones that apply to your role. If you don't see your + # platform on this list, let us know and we'll get it added! + # + #platforms: + {%- for platform,versions in platforms.iteritems() %} + #- name: {{ platform }} + # versions: + # - all + {%- for version in versions %} + # - {{ version }} + {%- endfor %} + {%- endfor %} + # + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + # + #categories: + {%- for category in categories %} + #- {{ category.name }} + {%- endfor %} +dependencies: [] + # List your role dependencies here, one per line. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + {% for dependency in dependencies %} + #- {{ dependency }} + {% endfor %} diff --git a/v2/ansible/galaxy/data/readme b/v2/ansible/galaxy/data/readme new file mode 100644 index 00000000000000..225dd44b9fc5b3 --- /dev/null +++ b/v2/ansible/galaxy/data/readme @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/v2/ansible/galaxy/role.py b/v2/ansible/galaxy/role.py new file mode 100644 index 00000000000000..89d8399b2da02b --- /dev/null +++ b/v2/ansible/galaxy/role.py @@ -0,0 +1,290 @@ +######################################################################## +# +# (C) 2015, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## + +import datetime +import os +import subprocess +import tarfile +import tempfile +import yaml +from shutil import rmtree +from urllib2 import urlopen + +from ansible import constants as C +from ansible.errors import AnsibleError + +class GalaxyRole(object): + + SUPPORTED_SCMS = set(['git', 'hg']) + META_MAIN = os.path.join('meta', 'main.yml') + META_INSTALL = os.path.join('meta', '.galaxy_install_info') + + def __init__(self, galaxy, role_name, role_version=None, role_url=None): + + self.options = galaxy.options + self.display = galaxy.display + + self.name = role_name + self.meta_data = None + self.install_info = None + self.role_path = (os.path.join(self.roles_path, self.name)) + + # TODO: possibly parse version and url from role_name + self.version = role_version + self.url = role_url + if self.url is None and '://' in self.name: + self.url = self.name + + if C.GALAXY_SCMS: + self.scms = self.SUPPORTED_SCMS.intersection(set(C.GALAXY_SCMS)) + else: + self.scms = self.SUPPORTED_SCMS + + if not self.scms: + self.display.warning("No valid SCMs configured for Galaxy.") + + + def fetch_from_scm_archive(self, scm, role_url, role_version): + + # this can be configured to prevent unwanted SCMS but cannot add new ones unless the code is also updated + if scm not in self.scms: + self.display.display("The %s scm is not currently supported" % scm) + return False + + tempdir = tempfile.mkdtemp() + clone_cmd = [scm, 'clone', role_url, self.name] + with open('/dev/null', 'w') as devnull: + try: + self.display.display("- executing: %s" % " ".join(clone_cmd)) + popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull) + except: + raise AnsibleError("error executing: %s" % " ".join(clone_cmd)) + rc = popen.wait() + if rc != 0: + self.display.display("- command %s failed" % ' '.join(clone_cmd)) + self.display.display(" in directory %s" % tempdir) + return False + + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar') + if scm == 'hg': + archive_cmd = ['hg', 'archive', '--prefix', "%s/" % self.name] + if role_version: + archive_cmd.extend(['-r', role_version]) + archive_cmd.append(temp_file.name) + if scm == 'git': + archive_cmd = ['git', 'archive', '--prefix=%s/' % self.name, '--output=%s' % temp_file.name] + if role_version: + archive_cmd.append(role_version) + else: + archive_cmd.append('HEAD') + + with open('/dev/null', 'w') as devnull: + self.display.display("- executing: %s" % " ".join(archive_cmd)) + popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, self.name), + stderr=devnull, stdout=devnull) + rc = popen.wait() + if rc != 0: + self.display.display("- command %s failed" % ' '.join(archive_cmd)) + self.display.display(" in directory %s" % tempdir) + return False + + rmtree(tempdir, ignore_errors=True) + + return temp_file.name + + + + def read_metadata(self): + """ + Reads the metadata as YAML, if the file 'meta/main.yml' exists + """ + meta_path = os.path.join(self.role_path, self.META_MAIN) + if os.path.isfile(meta_path): + try: + f = open(meta_path, 'r') + self.meta_data = yaml.safe_load(f) + except: + self.display.vvvvv("Unable to load metadata for %s" % self.name) + return False + finally: + f.close() + + return True + + def read_galaxy_install_info(self): + """ + Returns the YAML data contained in 'meta/.galaxy_install_info', + if it exists. + """ + + info_path = os.path.join(self.role_path, self.META_INSTALL) + if os.path.isfile(info_path): + try: + f = open(info_path, 'r') + self.install_info = yaml.safe_load(f) + except: + self.display.vvvvv("Unable to load Galaxy install info for %s" % self.name) + return False + finally: + f.close() + + return True + + def write_galaxy_install_info(self): + """ + Writes a YAML-formatted file to the role's meta/ directory + (named .galaxy_install_info) which contains some information + we can use later for commands like 'list' and 'info'. + """ + + info = dict( + version=self.version, + install_date=datetime.datetime.utcnow().strftime("%c"), + ) + info_path = os.path.join(self.role_path, self.META_INSTALL) + try: + f = open(info_path, 'w+') + self.install_info = yaml.safe_dump(info, f) + except: + return False + finally: + f.close() + + return True + + def remove(self): + """ + Removes the specified role from the roles path. There is a + sanity check to make sure there's a meta/main.yml file at this + path so the user doesn't blow away random directories + """ + if self.read_metadata(): + try: + rmtree(self.role_path) + return True + except: + pass + + return False + + def fetch(self, target, role_data): + """ + Downloads the archived role from github to a temp location, extracts + it, and then copies the extracted role to the role library path. + """ + + # first grab the file and save it to a temp location + if self.url: + archive_url = self.url + else: + archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target) + self.display.display("- downloading role from %s" % archive_url) + + try: + url_file = urlopen(archive_url) + temp_file = tempfile.NamedTemporaryFile(delete=False) + data = url_file.read() + while data: + temp_file.write(data) + data = url_file.read() + temp_file.close() + return temp_file.name + except: + # TODO: better urllib2 error handling for error + # messages that are more exact + self.display.error("failed to download the file.") + return False + + def install(self, role_version, role_filename): + # the file is a tar, so open it that way and extract it + # to the specified (or default) roles directory + + if not tarfile.is_tarfile(role_filename): + self.display.error("the file downloaded was not a tar.gz") + return False + else: + if role_filename.endswith('.gz'): + role_tar_file = tarfile.open(role_filename, "r:gz") + else: + role_tar_file = tarfile.open(role_filename, "r") + # verify the role's meta file + meta_file = None + members = role_tar_file.getmembers() + # next find the metadata file + for member in members: + if self.META_MAIN in member.name: + meta_file = member + break + if not meta_file: + self.display.error("this role does not appear to have a meta/main.yml file.") + return False + else: + try: + self.meta_data = yaml.safe_load(role_tar_file.extractfile(meta_file)) + except: + self.display.error("this role does not appear to have a valid meta/main.yml file.") + return False + + # we strip off the top-level directory for all of the files contained within + # the tar file here, since the default is 'github_repo-target', and change it + # to the specified role's name + self.display.display("- extracting %s to %s" % (self.name, self.role_path)) + try: + if os.path.exists(self.role_path): + if not os.path.isdir(self.role_path): + self.display.error("the specified roles path exists and is not a directory.") + return False + elif not getattr(self.options, "force", False): + self.display.error("the specified role %s appears to already exist. Use --force to replace it." % self.name) + return False + else: + # using --force, remove the old path + if not self.remove(): + self.display.error("%s doesn't appear to contain a role." % self.role_path) + self.display.error(" please remove this directory manually if you really want to put the role here.") + return False + else: + os.makedirs(self.role_path) + + # now we do the actual extraction to the role_path + for member in members: + # we only extract files, and remove any relative path + # bits that might be in the file for security purposes + # and drop the leading directory, as mentioned above + if member.isreg() or member.issym(): + parts = member.name.split(os.sep)[1:] + final_parts = [] + for part in parts: + if part != '..' and '~' not in part and '$' not in part: + final_parts.append(part) + member.name = os.path.join(*final_parts) + role_tar_file.extract(member, self.role_path) + + # write out the install info file for later use + self.version = role_version + self.write_galaxy_install_info() + except OSError as e: + self.display.error("Could not update files in %s: %s" % (self.role_path, str(e))) + return False + + # return the parsed yaml metadata + self.display.display("- %s was installed successfully" % self.role_name) + return True diff --git a/v2/bin/ansible-galaxy b/v2/bin/ansible-galaxy new file mode 100755 index 00000000000000..1c8215b944fee1 --- /dev/null +++ b/v2/bin/ansible-galaxy @@ -0,0 +1,560 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2013, James Cammarata +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## + +import datetime +import json +import os +import os.path +import shutil +import subprocess +import sys +import tarfile +import tempfile +import urllib +import urllib2 +import yaml + +from collections import defaultdict +from distutils.version import LooseVersion +from jinja2 import Environment +from optparse import OptionParser + +import ansible.constants as C +import ansible.utils +import ansible.galaxy +from ansible.errors import AnsibleError + +class Cli(object): + + VALID_ACTIONS = ("init", "info", "install", "list", "remove") + SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) + + def __init__(self): + + if display is None: + self.display = Display() + else: + self.display = display + self.action = None + + def set_action(args): + """ + Get the action the user wants to execute from the + sys argv list. + """ + for i in range(0,len(args)): + arg = args[i] + if arg in VALID_ACTIONS: + del args[i] + self.action = arg + + + def parse(self): + ''' create an options parser for bin/ansible ''' + usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS) + epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) + OptionParser.format_epilog = lambda self, formatter: self.epilog + parser = OptionParser(usage=usage, epilog=epilog) + + if not self.action: + parser.print_help() + sys.exit(1) + + # options specific to actions + if self.action == "info": + parser.set_usage("usage: %prog info [options] role_name[,version]") + elif self.action == "init": + parser.set_usage("usage: %prog init [options] role_name") + parser.add_option( + '-p', '--init-path', dest='init_path', default="./", + help='The path in which the skeleton role will be created. ' + 'The default is the current working directory.') + parser.add_option( + '--offline', dest='offline', default=False, action='store_true', + help="Don't query the galaxy API when creating roles") + elif self.action == "install": + parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") + parser.add_option( + '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, + help='Ignore errors and continue with the next specified role.') + parser.add_option( + '-n', '--no-deps', dest='no_deps', action='store_true', default=False, + help='Don\'t download roles listed as dependencies') + parser.add_option( + '-r', '--role-file', dest='role_file', + help='A file containing a list of roles to be imported') + elif self.action == "remove": + parser.set_usage("usage: %prog remove role1 role2 ...") + elif self.action == "list": + parser.set_usage("usage: %prog list [role_name]") + + # options that apply to more than one action + if self.action != "init": + parser.add_option( + '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, + help='The path to the directory containing your roles. ' + 'The default is the roles_path configured in your ' + 'ansible.cfg file (/etc/ansible/roles if not configured)') + + if self.action in ("info","init","install"): + parser.add_option( + '-s', '--server', dest='api_server', default="galaxy.ansible.com", + help='The API server destination') + + if self.action in ("init","install"): + parser.add_option( + '-f', '--force', dest='force', action='store_true', default=False, + help='Force overwriting an existing role') + + # done, return the parser + options, args = parser.parse_args() + + if len(args) == 0 or len(args) > 1: + parser.print_help() + sys.exit(1) + + display.verbosity = options.verbosity + + return (options, args) + + def run(options, args): + + # execute the desired action + fn = getattr(self, "execute_%s" % self.action) + fn(args, options) + + def get_opt(options, k, defval=""): + """ + Returns an option from an Optparse values instance. + """ + try: + data = getattr(options, k) + except: + return defval + if k == "roles_path": + if os.pathsep in data: + data = data.split(os.pathsep)[0] + return data + + def exit_without_ignore(options, rc=1): + """ + Exits with the specified return code unless the + option --ignore-errors was specified + """ + + if not get_opt(options, "ignore_errors", False): + print '- you can use --ignore-errors to skip failed roles.' + sys.exit(rc) + + + + def execute_init(args, options, parser): + """ + Executes the init action, which creates the skeleton framework + of a role that complies with the galaxy metadata format. + """ + + init_path = get_opt(options, 'init_path', './') + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + force = get_opt(options, 'force', False) + offline = get_opt(options, 'offline', False) + + if not offline: + api_config = api_get_config(api_server) + if not api_config: + print "- the API server (%s) is not responding, please try again later." % api_server + sys.exit(1) + + try: + role_name = args.pop(0).strip() + if role_name == "": + raise Exception("") + role_path = os.path.join(init_path, role_name) + if os.path.exists(role_path): + if os.path.isfile(role_path): + print "- the path %s already exists, but is a file - aborting" % role_path + sys.exit(1) + elif not force: + print "- the directory %s already exists." % role_path + print " you can use --force to re-initialize this directory,\n" + \ + " however it will reset any main.yml files that may have\n" + \ + " been modified there already." + sys.exit(1) + except Exception, e: + parser.print_help() + print "- no role name specified for init" + sys.exit(1) + + ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') + + # create the default README.md + if not os.path.exists(role_path): + os.makedirs(role_path) + readme_path = os.path.join(role_path, "README.md") + f = open(readme_path, "wb") + f.write(default_readme_template) + f.close + + for dir in ROLE_DIRS: + dir_path = os.path.join(init_path, role_name, dir) + main_yml_path = os.path.join(dir_path, 'main.yml') + # create the directory if it doesn't exist already + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + # now create the main.yml file for that directory + if dir == "meta": + # create a skeleton meta/main.yml with a valid galaxy_info + # datastructure in place, plus with all of the available + # tags/platforms included (but commented out) and the + # dependencies section + platforms = [] + if not offline: + platforms = api_get_list(api_server, "platforms") or [] + categories = [] + if not offline: + categories = api_get_list(api_server, "categories") or [] + + # group the list of platforms from the api based + # on their names, with the release field being + # appended to a list of versions + platform_groups = defaultdict(list) + for platform in platforms: + platform_groups[platform['name']].append(platform['release']) + platform_groups[platform['name']].sort() + + inject = dict( + author = 'your name', + company = 'your company (optional)', + license = 'license (GPLv2, CC-BY, etc)', + issue_tracker_url = 'http://example.com/issue/tracker', + min_ansible_version = '1.2', + platforms = platform_groups, + categories = categories, + ) + rendered_meta = Environment().from_string(default_meta_template).render(inject) + f = open(main_yml_path, 'w') + f.write(rendered_meta) + f.close() + pass + elif dir not in ('files','templates'): + # just write a (mostly) empty YAML file for main.yml + f = open(main_yml_path, 'w') + f.write('---\n# %s file for %s\n' % (dir,role_name)) + f.close() + print "- %s was created successfully" % role_name + + def execute_info(args, options, parser): + """ + Executes the info action. This action prints out detailed + information about an installed role as well as info available + from the galaxy API. + """ + + if len(args) == 0: + # the user needs to specify a role + parser.print_help() + print "- you must specify a user/role name" + sys.exit(1) + + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + api_config = api_get_config(api_server) + roles_path = get_opt(options, "roles_path") + + for role in args: + + role_info = {} + + install_info = get_galaxy_install_info(role, options) + if install_info: + if 'version' in install_info: + install_info['intalled_version'] = install_info['version'] + del install_info['version'] + role_info.update(install_info) + + remote_data = api_lookup_role_by_name(api_server, role, False) + if remote_data: + role_info.update(remote_data) + + metadata = get_role_metadata(role, options) + if metadata: + role_info.update(metadata) + + role_spec = ansible.utils.role_spec_parse(role) + if role_spec: + role_info.update(role_spec) + + if role_info: + print "- %s:" % (role) + for k in sorted(role_info.keys()): + + if k in SKIP_INFO_KEYS: + continue + + if isinstance(role_info[k], dict): + print "\t%s: " % (k) + for key in sorted(role_info[k].keys()): + if key in SKIP_INFO_KEYS: + continue + print "\t\t%s: %s" % (key, role_info[k][key]) + else: + print "\t%s: %s" % (k, role_info[k]) + else: + print "- the role %s was not found" % role + + def execute_install(args, options, parser): + """ + Executes the installation action. The args list contains the + roles to be installed, unless -f was specified. The list of roles + can be a name (which will be downloaded via the galaxy API and github), + or it can be a local .tar.gz file. + """ + + role_file = get_opt(options, "role_file", None) + + if len(args) == 0 and role_file is None: + # the user needs to specify one of either --role-file + # or specify a single user/role name + parser.print_help() + print "- you must specify a user/role name or a roles file" + sys.exit() + elif len(args) == 1 and not role_file is None: + # using a role file is mutually exclusive of specifying + # the role name on the command line + parser.print_help() + print "- please specify a user/role name, or a roles file, but not both" + sys.exit(1) + + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + no_deps = get_opt(options, "no_deps", False) + roles_path = get_opt(options, "roles_path") + + roles_done = [] + if role_file: + f = open(role_file, 'r') + if role_file.endswith('.yaml') or role_file.endswith('.yml'): + roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) + else: + # roles listed in a file, one per line + roles_left = map(ansible.utils.role_spec_parse, f.readlines()) + f.close() + else: + # roles were specified directly, so we'll just go out grab them + # (and their dependencies, unless the user doesn't want us to). + roles_left = map(ansible.utils.role_spec_parse, args) + + while len(roles_left) > 0: + # query the galaxy API for the role data + role_data = None + role = roles_left.pop(0) + role_src = role.get("src") + role_scm = role.get("scm") + role_path = role.get("path") + + if role_path: + options.roles_path = role_path + else: + options.roles_path = roles_path + + if os.path.isfile(role_src): + # installing a local tar.gz + tmp_file = role_src + else: + if role_scm: + # create tar file from scm url + tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name")) + elif '://' in role_src: + # just download a URL - version will probably be in the URL + tmp_file = fetch_role(role_src, None, None, options) + else: + # installing from galaxy + api_config = api_get_config(api_server) + if not api_config: + print "- the API server (%s) is not responding, please try again later." % api_server + sys.exit(1) + + role_data = api_lookup_role_by_name(api_server, role_src) + if not role_data: + print "- sorry, %s was not found on %s." % (role_src, api_server) + exit_without_ignore(options) + continue + + role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) + if "version" not in role or role['version'] == '': + # convert the version names to LooseVersion objects + # and sort them to get the latest version. If there + # are no versions in the list, we'll grab the head + # of the master branch + if len(role_versions) > 0: + loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] + loose_versions.sort() + role["version"] = str(loose_versions[-1]) + else: + role["version"] = 'master' + elif role['version'] != 'master': + if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]: + print 'role is %s' % role + print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions) + exit_without_ignore(options) + continue + + # download the role. if --no-deps was specified, we stop here, + # otherwise we recursively grab roles and all of their deps. + tmp_file = fetch_role(role_src, role["version"], role_data, options) + installed = False + if tmp_file: + installed = install_role(role.get("name"), role.get("version"), tmp_file, options) + # we're done with the temp file, clean it up + if tmp_file != role_src: + os.unlink(tmp_file) + # install dependencies, if we want them + if not no_deps and installed: + if not role_data: + role_data = get_role_metadata(role.get("name"), options) + role_dependencies = role_data['dependencies'] + else: + role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) + for dep in role_dependencies: + if isinstance(dep, basestring): + dep = ansible.utils.role_spec_parse(dep) + else: + dep = ansible.utils.role_yaml_parse(dep) + if not get_role_metadata(dep["name"], options): + if dep not in roles_left: + print '- adding dependency: %s' % dep["name"] + roles_left.append(dep) + else: + print '- dependency %s already pending installation.' % dep["name"] + else: + print '- dependency %s is already installed, skipping.' % dep["name"] + if not tmp_file or not installed: + print "- %s was NOT installed successfully." % role.get("name") + exit_without_ignore(options) + sys.exit(0) + + def execute_remove(args, options, parser): + """ + Executes the remove action. The args list contains the list + of roles to be removed. This list can contain more than one role. + """ + + if len(args) == 0: + parser.print_help() + print '- you must specify at least one role to remove.' + sys.exit() + + for role in args: + if get_role_metadata(role, options): + if remove_role(role, options): + print '- successfully removed %s' % role + else: + print "- failed to remove role: %s" % role + else: + print '- %s is not installed, skipping.' % role + sys.exit(0) + + def execute_list(args, options, parser): + """ + Executes the list action. The args list can contain zero + or one role. If one is specified, only that role will be + shown, otherwise all roles in the specified directory will + be shown. + """ + + if len(args) > 1: + print "- please specify only one role to list, or specify no roles to see a full list" + sys.exit(1) + + if len(args) == 1: + # show only the request role, if it exists + role_name = args[0] + metadata = get_role_metadata(role_name, options) + if metadata: + install_info = get_galaxy_install_info(role_name, options) + version = None + if install_info: + version = install_info.get("version", None) + if not version: + version = "(unknown version)" + # show some more info about single roles here + print "- %s, %s" % (role_name, version) + else: + print "- the role %s was not found" % role_name + else: + # show all valid roles in the roles_path directory + roles_path = get_opt(options, 'roles_path') + roles_path = os.path.expanduser(roles_path) + if not os.path.exists(roles_path): + parser.print_help() + print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path + sys.exit(1) + elif not os.path.isdir(roles_path): + print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path + parser.print_help() + sys.exit(1) + path_files = os.listdir(roles_path) + for path_file in path_files: + if get_role_metadata(path_file, options): + install_info = get_galaxy_install_info(path_file, options) + version = None + if install_info: + version = install_info.get("version", None) + if not version: + version = "(unknown version)" + print "- %s, %s" % (path_file, version) + sys.exit(0) + +#------------------------------------------------------------------------------------- +# The main entry point +#------------------------------------------------------------------------------------- + +#def main(): +# # parse the CLI options +# action = get_action(sys.argv) +# parser = build_option_parser(action) +# (options, args) = parser.parse_args() +# +# # execute the desired action +# if 1: #try: +# fn = globals()["execute_%s" % action] +# fn(args, options, parser) +# #except KeyError, e: +# # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS)) +# # sys.exit(1) + + +if __name__ == '__main__': + + display = Display() + + try: + cli = Cli(display=display) + cli.set_action(sys.argv) + (options, args) = cli.parse() + sys.exit(cli.run(options, args)) + except AnsibleError as e: + display.error(str(e)) + sys.exit(1) + except KeyboardInterrupt: + display.error("interrupted") + sys.exit(1) From 950aa8511a1bbdbdfea3fd35179d7b93f1bdc5a5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 24 Apr 2015 21:49:28 -0400 Subject: [PATCH 0461/3617] no exceptions with less than 3 'v's --- v2/ansible/plugins/callback/default.py | 2 ++ v2/ansible/plugins/callback/minimal.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/v2/ansible/plugins/callback/default.py b/v2/ansible/plugins/callback/default.py index 262303dc57049e..de6548ef188cba 100644 --- a/v2/ansible/plugins/callback/default.py +++ b/v2/ansible/plugins/callback/default.py @@ -37,6 +37,8 @@ def v2_on_any(self, *args, **kwargs): pass def v2_runner_on_failed(self, result, ignore_errors=False): + if 'exception' in result._result and self._display.verbosity < 3: + del result._result['exception'] self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red') def v2_runner_on_ok(self, result): diff --git a/v2/ansible/plugins/callback/minimal.py b/v2/ansible/plugins/callback/minimal.py index 4e9c8fffd2d35d..c6b2282e62fd76 100644 --- a/v2/ansible/plugins/callback/minimal.py +++ b/v2/ansible/plugins/callback/minimal.py @@ -38,6 +38,8 @@ def v2_on_any(self, *args, **kwargs): pass def v2_runner_on_failed(self, result, ignore_errors=False): + if 'exception' in result._result and self._display.verbosity < 3: + del result._result['exception'] self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red') def v2_runner_on_ok(self, result): From 900b992ba9b0960a5416dc619df1e847d3044773 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 24 Apr 2015 22:31:06 -0400 Subject: [PATCH 0462/3617] fixed var name for ansible vault editing existing data --- v2/ansible/parsing/vault/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/parsing/vault/__init__.py b/v2/ansible/parsing/vault/__init__.py index 80c48a3b69c1a6..e45fddc197056c 100644 --- a/v2/ansible/parsing/vault/__init__.py +++ b/v2/ansible/parsing/vault/__init__.py @@ -203,7 +203,7 @@ def _edit_file_helper(self, existing_data=None, cipher=None): _, tmp_path = tempfile.mkstemp() if existing_data: - self.write_data(data, tmp_path) + self.write_data(existing_data, tmp_path) # drop the user into an editor on the tmp file call(self._editor_shell_command(tmp_path)) From cdefeb6d84499d86bf6fef8352b06d626c1bf4ae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 27 Apr 2015 07:31:41 -0400 Subject: [PATCH 0463/3617] refactored most binaries added AnsibleOptionsError removed pulicate parser error class --- v2/ansible/constants.py | 19 +- v2/ansible/errors/__init__.py | 16 +- v2/ansible/galaxy/role.py | 117 +++++++-- v2/ansible/utils/cli.py | 478 ++++++++++++++++++++-------------- v2/bin/ansible | 116 ++++----- v2/bin/ansible-galaxy | 309 +++++++++------------- v2/bin/ansible-playbook | 277 ++++++++++---------- v2/bin/ansible-vault | 159 +++++------ 8 files changed, 779 insertions(+), 712 deletions(-) diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 12eb8db413b741..6f35751b506c47 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -40,13 +40,15 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False, ''' return a configuration variable with casting ''' value = _get_config(p, section, key, env_var, default) if boolean: - return mk_boolean(value) - if value and integer: - return int(value) - if value and floating: - return float(value) - if value and islist: - return [x.strip() for x in value.split(',')] + value = mk_boolean(value) + if value: + if integer: + value = int(value) + if floating: + value = float(value) + if islist: + if isinstance(value, basestring): + value = [x.strip() for x in value.split(',')] return value def _get_config(p, section, key, env_var, default): @@ -104,7 +106,7 @@ def shell_expand_path(path): # configurable things DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) -DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts'))) +DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts'))) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') @@ -212,6 +214,7 @@ def shell_expand_path(path): DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things +MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None diff --git a/v2/ansible/errors/__init__.py b/v2/ansible/errors/__init__.py index 453e63de6e3c3b..63fb8ef023a596 100644 --- a/v2/ansible/errors/__init__.py +++ b/v2/ansible/errors/__init__.py @@ -140,6 +140,10 @@ def _get_extended_error(self): return error_message +class AnsibleOptionsError(AnsibleError): + ''' bad or incomplete options passed ''' + pass + class AnsibleParserError(AnsibleError): ''' something was detected early that is wrong about a playbook or data file ''' pass @@ -164,6 +168,14 @@ class AnsibleFilterError(AnsibleRuntimeError): ''' a templating failure ''' pass +class AnsibleLookupError(AnsibleRuntimeError): + ''' a lookup failure ''' + pass + +class AnsibleCallbackError(AnsibleRuntimeError): + ''' a callback failure ''' + pass + class AnsibleUndefinedVariable(AnsibleRuntimeError): ''' a templating failure ''' pass @@ -171,7 +183,3 @@ class AnsibleUndefinedVariable(AnsibleRuntimeError): class AnsibleFileNotFound(AnsibleRuntimeError): ''' a file missing failure ''' pass - -class AnsibleParserError(AnsibleRuntimeError): - ''' a parser error ''' - pass diff --git a/v2/ansible/galaxy/role.py b/v2/ansible/galaxy/role.py index 89d8399b2da02b..0d13233e6a4483 100644 --- a/v2/ansible/galaxy/role.py +++ b/v2/ansible/galaxy/role.py @@ -36,6 +36,8 @@ class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = os.path.join('meta', 'main.yml') META_INSTALL = os.path.join('meta', '.galaxy_install_info') + ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') + def __init__(self, galaxy, role_name, role_version=None, role_url=None): @@ -45,13 +47,13 @@ def __init__(self, galaxy, role_name, role_version=None, role_url=None): self.name = role_name self.meta_data = None self.install_info = None - self.role_path = (os.path.join(self.roles_path, self.name)) + self.path = (os.path.join(galaxy.roles_path, self.name)) # TODO: possibly parse version and url from role_name self.version = role_version self.url = role_url - if self.url is None and '://' in self.name: - self.url = self.name + if self.url is None: + self._spec_parse() if C.GALAXY_SCMS: self.scms = self.SUPPORTED_SCMS.intersection(set(C.GALAXY_SCMS)) @@ -62,7 +64,7 @@ def __init__(self, galaxy, role_name, role_version=None, role_url=None): self.display.warning("No valid SCMs configured for Galaxy.") - def fetch_from_scm_archive(self, scm, role_url, role_version): + def fetch_from_scm_archive(self): # this can be configured to prevent unwanted SCMS but cannot add new ones unless the code is also updated if scm not in self.scms: @@ -111,12 +113,21 @@ def fetch_from_scm_archive(self, scm, role_url, role_version): return temp_file.name + def get_metadata(self): + """ + Returns role metadata + """ + if self.meta_data is None: + self._read_metadata + + return self.meta_data - def read_metadata(self): + + def _read_metadata(self): """ Reads the metadata as YAML, if the file 'meta/main.yml' exists """ - meta_path = os.path.join(self.role_path, self.META_MAIN) + meta_path = os.path.join(self.path, self.META_MAIN) if os.path.isfile(meta_path): try: f = open(meta_path, 'r') @@ -127,15 +138,24 @@ def read_metadata(self): finally: f.close() - return True - def read_galaxy_install_info(self): + def get_galaxy_install_info(self): + """ + Returns role install info + """ + if self.install_info is None: + self._read_galaxy_isntall_info() + + return self.install_info + + + def _read_galaxy_install_info(self): """ Returns the YAML data contained in 'meta/.galaxy_install_info', if it exists. """ - info_path = os.path.join(self.role_path, self.META_INSTALL) + info_path = os.path.join(self.path, self.META_INSTALL) if os.path.isfile(info_path): try: f = open(info_path, 'r') @@ -146,9 +166,7 @@ def read_galaxy_install_info(self): finally: f.close() - return True - - def write_galaxy_install_info(self): + def _write_galaxy_install_info(self): """ Writes a YAML-formatted file to the role's meta/ directory (named .galaxy_install_info) which contains some information @@ -159,7 +177,7 @@ def write_galaxy_install_info(self): version=self.version, install_date=datetime.datetime.utcnow().strftime("%c"), ) - info_path = os.path.join(self.role_path, self.META_INSTALL) + info_path = os.path.join(self.path, self.META_INSTALL) try: f = open(info_path, 'w+') self.install_info = yaml.safe_dump(info, f) @@ -178,7 +196,7 @@ def remove(self): """ if self.read_metadata(): try: - rmtree(self.role_path) + rmtree(self.path) return True except: pass @@ -213,7 +231,7 @@ def fetch(self, target, role_data): self.display.error("failed to download the file.") return False - def install(self, role_version, role_filename): + def install(self, role_filename): # the file is a tar, so open it that way and extract it # to the specified (or default) roles directory @@ -246,10 +264,10 @@ def install(self, role_version, role_filename): # we strip off the top-level directory for all of the files contained within # the tar file here, since the default is 'github_repo-target', and change it # to the specified role's name - self.display.display("- extracting %s to %s" % (self.name, self.role_path)) + self.display.display("- extracting %s to %s" % (self.name, self.path)) try: - if os.path.exists(self.role_path): - if not os.path.isdir(self.role_path): + if os.path.exists(self.path): + if not os.path.isdir(self.path): self.display.error("the specified roles path exists and is not a directory.") return False elif not getattr(self.options, "force", False): @@ -258,13 +276,13 @@ def install(self, role_version, role_filename): else: # using --force, remove the old path if not self.remove(): - self.display.error("%s doesn't appear to contain a role." % self.role_path) + self.display.error("%s doesn't appear to contain a role." % self.path) self.display.error(" please remove this directory manually if you really want to put the role here.") return False else: - os.makedirs(self.role_path) + os.makedirs(self.path) - # now we do the actual extraction to the role_path + # now we do the actual extraction to the path for member in members: # we only extract files, and remove any relative path # bits that might be in the file for security purposes @@ -276,15 +294,62 @@ def install(self, role_version, role_filename): if part != '..' and '~' not in part and '$' not in part: final_parts.append(part) member.name = os.path.join(*final_parts) - role_tar_file.extract(member, self.role_path) + role_tar_file.extract(member, self.path) # write out the install info file for later use - self.version = role_version - self.write_galaxy_install_info() + self._write_galaxy_install_info() except OSError as e: - self.display.error("Could not update files in %s: %s" % (self.role_path, str(e))) + self.display.error("Could not update files in %s: %s" % (self.path, str(e))) return False # return the parsed yaml metadata - self.display.display("- %s was installed successfully" % self.role_name) + self.display.display("- %s was installed successfully" % self.name) return True + + def get_spec(self): + """ + Returns role spec info + { + 'scm': 'git', + 'src': 'http://git.example.com/repos/repo.git', + 'version': 'v1.0', + 'name': 'repo' + } + """ + if self.scm is None and self.url is None: + self._read_galaxy_isntall_info() + + return dict(scm=self.scm, src=self.url, version=self.version, role_name=self.name) + + def _spec_parse(self): + ''' creates separated parts of role spec ''' + default_role_versions = dict(git='master', hg='tip') + + if not self.url and '://' in self.name: + role_spec = self.name.strip() + + if role_spec == "" or role_spec.startswith("#"): + return + + tokens = [s.strip() for s in role_spec.split(',')] + + # assume https://github.com URLs are git+https:// URLs and not tarballs unless they end in '.zip' + if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): + tokens[0] = 'git+' + tokens[0] + + if '+' in tokens[0]: + (self.scm, self.url) = tokens[0].split('+') + else: + self.scm = None + self.url = tokens[0] + + if len(tokens) >= 2: + self.version = tokens[1] + + if len(tokens) == 3: + self.name = tokens[2] + else: + self.name = self._repo_url_to_role_name(tokens[0]) + + if self.scm and not self.version: + self.version = default_role_versions.get(scm, '') diff --git a/v2/ansible/utils/cli.py b/v2/ansible/utils/cli.py index 6500234c74125e..0cceab01968174 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/utils/cli.py @@ -28,6 +28,7 @@ from ansible import __version__ from ansible import constants as C +from ansible.errors import AnsibleError from ansible.utils.unicode import to_bytes # FIXME: documentation for methods here, which have mostly been @@ -40,141 +41,286 @@ def format_help(self, formatter=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) return optparse.OptionParser.format_help(self, formatter=None) -def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, - async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): - ''' create an options parser for any ansible script ''' - - parser = SortedOptParser(usage, version=version("%prog")) - - parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', - help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) - parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", - help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") - parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', - help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) - parser.add_option('-i', '--inventory-file', dest='inventory', - help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, - default=C.DEFAULT_HOST_LIST) - parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', - help='ask for connection password') - parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', - help='use this file to authenticate the connection') - parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', - help='ask for vault password') - parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, - dest='vault_password_file', help="vault password file") - parser.add_option('--list-hosts', dest='listhosts', action='store_true', - help='outputs a list of matching hosts; does not execute anything else') - parser.add_option('-M', '--module-path', dest='module_path', - help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, - default=None) - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) - - if subset_opts: - parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', - help='further limit selected hosts to an additional pattern') - parser.add_option('-t', '--tags', dest='tags', default='all', - help="only run plays and tasks tagged with these values") - parser.add_option('--skip-tags', dest='skip_tags', - help="only run plays and tasks whose tags do not match these values") - - if output_opts: - parser.add_option('-o', '--one-line', dest='one_line', action='store_true', - help='condense output') - parser.add_option('-t', '--tree', dest='tree', default=None, - help='log output to this directory') - - if runas_opts: - # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', - help='ask for su password (deprecated, use become)') - parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', - help="run operations with sudo (nopasswd) (deprecated, use become)") - parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, - help='desired sudo user (default=root) (deprecated, use become)') - parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', - help='run operations with su (deprecated, use become)') - parser.add_option('-R', '--su-user', default=None, - help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER) - - # consolidated privilege escalation (become) - parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', - help="run operations with become (nopasswd implied)") - parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string', - help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) - parser.add_option('--become-user', default=None, dest='become_user', type='string', - help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) - parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', - help='ask for privilege escalation password') - - - if connect_opts: - parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, - help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) - parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', - help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) - - - if async_opts: - parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', - dest='poll_interval', - help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL) - parser.add_option('-B', '--background', dest='seconds', type='int', default=0, - help='run asynchronously, failing after X seconds (default=N/A)') - - if check_opts: - parser.add_option("-C", "--check", default=False, dest='check', action='store_true', - help="don't make any changes; instead, try to predict some of the changes that may occur") - parser.add_option('--syntax-check', dest='syntax', action='store_true', - help="perform a syntax check on the playbook, but do not execute it") - - if diff_opts: - parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', - help="when changing (small) files and templates, show the differences in those files; works great with --check" - ) - - if meta_opts: - parser.add_option('--force-handlers', dest='force_handlers', action='store_true', - help="run handlers even if a task fails") - parser.add_option('--flush-cache', dest='flush_cache', action='store_true', - help="clear the fact cache") - - return parser - -def version(prog): - result = "{0} {1}".format(prog, __version__) - gitinfo = _gitinfo() - if gitinfo: - result = result + " {0}".format(gitinfo) - result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH - return result +#TODO: move many cli only functions in this file into the CLI class +class CLI(object): + ''' code behind bin/ansible* programs ''' -def version_info(gitinfo=False): - if gitinfo: - # expensive call, user with care - ansible_version_string = version('') - else: - ansible_version_string = __version__ - ansible_version = ansible_version_string.split()[0] - ansible_versions = ansible_version.split('.') - for counter in range(len(ansible_versions)): - if ansible_versions[counter] == "": - ansible_versions[counter] = 0 - try: - ansible_versions[counter] = int(ansible_versions[counter]) - except: + VALID_ACTIONS = ['No Actions'] + + def __init__(self, args, display=None): + """ + Base init method for all command line programs + """ + + self.args = args + self.options = None + self.parser = None + self.action = None + + if display is None: + self.display = Display() + else: + self.display = display + + def set_action(self): + """ + Get the action the user wants to execute from the sys argv list. + """ + for i in range(0,len(self.args)): + arg = self.args[i] + if arg in self.VALID_ACTIONS: + self.action = arg + del self.args[i] + break + + if not self.action: + self.parser.print_help() + raise AnsibleError("Missing required action") + + def execute(self): + """ + Actually runs a child defined method using the execute_ pattern + """ + fn = getattr(self, "execute_%s" % self.action) + fn() + + def parse(self): + raise Exception("Need to implement!") + + def run(self): + raise Exception("Need to implement!") + + @staticmethod + def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False): + + vault_pass = None + new_vault_pass = None + + if ask_vault_pass: + vault_pass = getpass.getpass(prompt="Vault password: ") + + if ask_vault_pass and confirm_vault: + vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ") + if vault_pass != vault_pass2: + raise errors.AnsibleError("Passwords do not match") + + if ask_new_vault_pass: + new_vault_pass = getpass.getpass(prompt="New Vault password: ") + + if ask_new_vault_pass and confirm_new: + new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") + if new_vault_pass != new_vault_pass2: + raise errors.AnsibleError("Passwords do not match") + + # enforce no newline chars at the end of passwords + if vault_pass: + vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip() + if new_vault_pass: + new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip() + + return vault_pass, new_vault_pass + + + def ask_passwords(self): + + op = self.options + sshpass = None + becomepass = None + become_prompt = '' + + if op.ask_pass: + sshpass = getpass.getpass(prompt="SSH password: ") + become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper() + if sshpass: + sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') + else: + become_prompt = "%s password: " % op.become_method.upper() + + if op.become_ask_pass: + becomepass = getpass.getpass(prompt=become_prompt) + if op.ask_pass and becomepass == '': + becomepass = sshpass + if becomepass: + becomepass = to_bytes(becomepass) + + return (sshpass, becomepass) + + + def normalize_become_options(self): + ''' this keeps backwards compatibility with sudo/su self.options ''' + self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS + self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER + + if self.options.become: pass - if len(ansible_versions) < 3: - for counter in range(len(ansible_versions), 3): - ansible_versions.append(0) - return {'string': ansible_version_string.strip(), - 'full': ansible_version, - 'major': ansible_versions[0], - 'minor': ansible_versions[1], - 'revision': ansible_versions[2]} + elif self.options.sudo: + self.options.become = True + self.options.become_method = 'sudo' + elif self.options.su: + self.options.become = True + options.become_method = 'su' + + + def validate_conflicts(self): + + op = self.options + + # Check for vault related conflicts + if (op.ask_vault_pass and op.vault_password_file): + self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") + + + # Check for privilege escalation conflicts + if (op.su or op.su_user or op.ask_su_pass) and \ + (op.sudo or op.sudo_user or op.ask_sudo_pass) or \ + (op.su or op.su_user or op.ask_su_pass) and \ + (op.become or op.become_user or op.become_ask_pass) or \ + (op.sudo or op.sudo_user or op.ask_sudo_pass) and \ + (op.become or op.become_user or op.become_ask_pass): + + self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " + "and su arguments ('-su', '--su-user', and '--ask-su-pass') " + "and become arguments ('--become', '--become-user', and '--ask-become-pass')" + " are exclusive of each other") + + @staticmethod + def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, + async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): + ''' create an options parser for any ansible script ''' + + parser = SortedOptParser(usage, version=CLI.version("%prog")) + + parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', + help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) + parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", + help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") + parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', + help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) + parser.add_option('-i', '--inventory-file', dest='inventory', + help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, + default=C.DEFAULT_HOST_LIST) + parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', + help='ask for connection password') + parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', + help='use this file to authenticate the connection') + parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', + help='ask for vault password') + parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, + dest='vault_password_file', help="vault password file") + parser.add_option('--list-hosts', dest='listhosts', action='store_true', + help='outputs a list of matching hosts; does not execute anything else') + parser.add_option('-M', '--module-path', dest='module_path', + help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, + default=None) + parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", + help="set additional variables as key=value or YAML/JSON", default=[]) + + if subset_opts: + parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', + help='further limit selected hosts to an additional pattern') + parser.add_option('-t', '--tags', dest='tags', default='all', + help="only run plays and tasks tagged with these values") + parser.add_option('--skip-tags', dest='skip_tags', + help="only run plays and tasks whose tags do not match these values") + + if output_opts: + parser.add_option('-o', '--one-line', dest='one_line', action='store_true', + help='condense output') + parser.add_option('-t', '--tree', dest='tree', default=None, + help='log output to this directory') + + if runas_opts: + # priv user defaults to root later on to enable detecting when this option was given here + parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password (deprecated, use become)') + parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + help='ask for su password (deprecated, use become)') + parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', + help="run operations with sudo (nopasswd) (deprecated, use become)") + parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, + help='desired sudo user (default=root) (deprecated, use become)') + parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true', + help='run operations with su (deprecated, use become)') + parser.add_option('-R', '--su-user', default=None, + help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER) + + # consolidated privilege escalation (become) + parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become', + help="run operations with become (nopasswd implied)") + parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string', + help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) + parser.add_option('--become-user', default=None, dest='become_user', type='string', + help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) + parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', + help='ask for privilege escalation password') + + + if connect_opts: + parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, + help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) + parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', + help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) + + + if async_opts: + parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', + dest='poll_interval', + help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL) + parser.add_option('-B', '--background', dest='seconds', type='int', default=0, + help='run asynchronously, failing after X seconds (default=N/A)') + + if check_opts: + parser.add_option("-C", "--check", default=False, dest='check', action='store_true', + help="don't make any changes; instead, try to predict some of the changes that may occur") + parser.add_option('--syntax-check', dest='syntax', action='store_true', + help="perform a syntax check on the playbook, but do not execute it") + + if diff_opts: + parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', + help="when changing (small) files and templates, show the differences in those files; works great with --check" + ) + + if meta_opts: + parser.add_option('--force-handlers', dest='force_handlers', action='store_true', + help="run handlers even if a task fails") + parser.add_option('--flush-cache', dest='flush_cache', action='store_true', + help="clear the fact cache") + + return parser + + @staticmethod + def version(prog): + result = "{0} {1}".format(prog, __version__) + gitinfo = _gitinfo() + if gitinfo: + result = result + " {0}".format(gitinfo) + result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH + return result + + @staticmethod + def version_info(gitinfo=False): + if gitinfo: + # expensive call, user with care + ansible_version_string = version('') + else: + ansible_version_string = __version__ + ansible_version = ansible_version_string.split()[0] + ansible_versions = ansible_version.split('.') + for counter in range(len(ansible_versions)): + if ansible_versions[counter] == "": + ansible_versions[counter] = 0 + try: + ansible_versions[counter] = int(ansible_versions[counter]) + except: + pass + if len(ansible_versions) < 3: + for counter in range(len(ansible_versions), 3): + ansible_versions.append(0) + return {'string': ansible_version_string.strip(), + 'full': ansible_version, + 'major': ansible_versions[0], + 'minor': ansible_versions[1], + 'revision': ansible_versions[2]} def _git_repo_info(repo_path): ''' returns a string containing git branch, commit id and commit date ''' @@ -234,69 +380,3 @@ def _gitinfo(): result += "\n {0}: {1}".format(submodule_path, submodule_info) f.close() return result - - -def ask_passwords(options): - sshpass = None - becomepass = None - vaultpass = None - become_prompt = '' - - if options.ask_pass: - sshpass = getpass.getpass(prompt="SSH password: ") - become_prompt = "%s password[defaults to SSH password]: " % options.become_method.upper() - if sshpass: - sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') - else: - become_prompt = "%s password: " % options.become_method.upper() - - if options.become_ask_pass: - becomepass = getpass.getpass(prompt=become_prompt) - if options.ask_pass and becomepass == '': - becomepass = sshpass - if becomepass: - becomepass = to_bytes(becomepass) - - if options.ask_vault_pass: - vaultpass = getpass.getpass(prompt="Vault password: ") - if vaultpass: - vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip() - - return (sshpass, becomepass, vaultpass) - - -def normalize_become_options(options): - ''' this keeps backwards compatibility with sudo/su options ''' - options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS - options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER - - if options.become: - pass - elif options.sudo: - options.become = True - options.become_method = 'sudo' - elif options.su: - options.become = True - options.become_method = 'su' - - -def validate_conflicts(parser, options): - - # Check for vault related conflicts - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - - - # Check for privilege escalation conflicts - if (options.su or options.su_user or options.ask_su_pass) and \ - (options.sudo or options.sudo_user or options.ask_sudo_pass) or \ - (options.su or options.su_user or options.ask_su_pass) and \ - (options.become or options.become_user or options.become_ask_pass) or \ - (options.sudo or options.sudo_user or options.ask_sudo_pass) and \ - (options.become or options.become_user or options.become_ask_pass): - - parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') " - "and become arguments ('--become', '--become-user', and '--ask-become-pass')" - " are exclusive of each other") - diff --git a/v2/bin/ansible b/v2/bin/ansible index b4f651ffdaa1b9..77446338da09d8 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -40,28 +40,20 @@ from ansible.inventory import Inventory from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play -from ansible.utils.display import Display -from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords +from ansible.utils.cli import CLI from ansible.utils.display import Display from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager ######################################################## -class Cli(object): - ''' code behind bin/ansible ''' - - def __init__(self, display=None): - - if display is None: - self.display = Display() - else: - self.display = display +class AdHocCli(CLI): + ''' code behind ansible ad-hoc cli''' def parse(self): ''' create an options parser for bin/ansible ''' - parser = base_parser( + self.parser = CLI.base_parser( usage='%prog [options]', runas_opts=True, async_opts=True, @@ -71,102 +63,110 @@ class Cli(object): ) # options unique to ansible ad-hoc - parser.add_option('-a', '--args', dest='module_args', + self.parser.add_option('-a', '--args', dest='module_args', help="module arguments", default=C.DEFAULT_MODULE_ARGS) - parser.add_option('-m', '--module-name', dest='module_name', + self.parser.add_option('-m', '--module-name', dest='module_name', help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, default=C.DEFAULT_MODULE_NAME) - options, args = parser.parse_args() + self.options, self.args = self.parser.parse_args() - if len(args) == 0 or len(args) > 1: - parser.print_help() + if len(self.args) != 1: + self.parser.print_help() sys.exit(1) - display.verbosity = options.verbosity - validate_conflicts(parser,options) + self.display.verbosity = self.options.verbosity + self.validate_conflicts() - return (options, args) + return True - # ---------------------------------------------- - def run(self, options, args): + def run(self): ''' use Runner lib to do SSH things ''' - pattern = args[0] + # only thing left should be host pattern + pattern = self.args[0] - if options.connection == "local": - options.ask_pass = False + # ignore connection password cause we are local + if self.options.connection == "local": + self.options.ask_pass = False sshpass = None becomepass = None vault_pass = None - normalize_become_options(options) - (sshpass, becomepass, vault_pass) = ask_passwords(options) + self.normalize_become_options() + (sshpass, becomepass) = self.ask_passwords() passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } - if options.vault_password_file: - # read vault_pass from a file - vault_pass = read_vault_file(options.vault_password_file) + if self.options.vault_password_file: + # read vault_pass from a file + vault_pass = read_vault_file(self.options.vault_password_file) + elif self.options.ask_vault_pass: + vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0] loader = DataLoader(vault_password=vault_pass) variable_manager = VariableManager() - inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory) + inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) hosts = inventory.list_hosts(pattern) if len(hosts) == 0: - d = Display() - d.warning("provided hosts list is empty, only localhost is available") + self.display.warning("provided hosts list is empty, only localhost is available") - if options.listhosts: + if self.options.listhosts: for host in hosts: self.display.display(' %s' % host.name) - sys.exit(0) + return 0 - if ((options.module_name == 'command' or options.module_name == 'shell') and not options.module_args): - raise AnsibleError("No argument passed to %s module" % options.module_name) + if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: + raise AnsibleError("No argument passed to %s module" % self.options.module_name) - # FIXME: async support needed - #if options.seconds: + #TODO: implement async support + #if self.options.seconds: # callbacks.display("background launch...\n\n", color='cyan') - # results, poller = runner.run_async(options.seconds) - # results = self.poll_while_needed(poller, options) + # results, poller = runner.run_async(self.options.seconds) + # results = self.poll_while_needed(poller) #else: # results = runner.run() # create a pseudo-play to execute the specified module via a single task play_ds = dict( + name = "Ansible Ad-Hoc", hosts = pattern, gather_facts = 'no', - tasks = [ - dict(action=dict(module=options.module_name, args=parse_kv(options.module_args))), - ] + tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args))), ] ) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # now create a task queue manager to execute the play try: - display = Display() - tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords, stdout_callback='minimal') + tqm = TaskQueueManager( + inventory=inventory, + callback='minimal', + variable_manager=variable_manager, + loader=loader, + display=self.display, + options=self.options, + passwords=passwords, + stdout_callback='minimal', + ) result = tqm.run(play) - tqm.cleanup() - except AnsibleError: - tqm.cleanup() - raise + finally: + if tqm: + tqm.cleanup() return result # ---------------------------------------------- - def poll_while_needed(self, poller, options): + def poll_while_needed(self, poller): ''' summarize results from Runner ''' # BACKGROUND POLL LOGIC when -B and -P are specified - if options.seconds and options.poll_interval > 0: - poller.wait(options.seconds, options.poll_interval) + if self.options.seconds and self.options.poll_interval > 0: + poller.wait(self.options.seconds, self.options.poll_interval) return poller.results @@ -176,14 +176,12 @@ class Cli(object): if __name__ == '__main__': display = Display() - #display.display(" ".join(sys.argv)) - try: - cli = Cli(display=display) - (options, args) = cli.parse() - sys.exit(cli.run(options, args)) + cli = AdHocCli(sys.argv, display=display) + cli.parse() + sys.exit(cli.run()) except AnsibleError as e: - display.error(str(e)) + display.display(str(e), stderr=True, color='red') sys.exit(1) except KeyboardInterrupt: display.error("interrupted") diff --git a/v2/bin/ansible-galaxy b/v2/bin/ansible-galaxy index 1c8215b944fee1..cca1dd9d8356df 100755 --- a/v2/bin/ansible-galaxy +++ b/v2/bin/ansible-galaxy @@ -42,113 +42,109 @@ from optparse import OptionParser import ansible.constants as C import ansible.utils import ansible.galaxy -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.galaxy import Galaxy +from ansible.galaxy.api import GalaxyAPI +from ansible.galaxy.role import GalaxyRole +from ansible.utils.display import Display +from ansible.utils.cli import CLI -class Cli(object): +class GalaxyCLI(CLI): VALID_ACTIONS = ("init", "info", "install", "list", "remove") SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - def __init__(self): - - if display is None: - self.display = Display() - else: - self.display = display - self.action = None - - def set_action(args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - self.action = arg + def __init__(self, args, display=None): + self.api = None + self.galaxy = None + super(GalaxyCLI, self).__init__(args, display) def parse(self): ''' create an options parser for bin/ansible ''' - usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS) + + usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS) epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) OptionParser.format_epilog = lambda self, formatter: self.epilog parser = OptionParser(usage=usage, epilog=epilog) - if not self.action: - parser.print_help() - sys.exit(1) + self.parser = parser + self.set_action() # options specific to actions if self.action == "info": - parser.set_usage("usage: %prog info [options] role_name[,version]") + self.parser.set_usage("usage: %prog info [options] role_name[,version]") elif self.action == "init": - parser.set_usage("usage: %prog init [options] role_name") - parser.add_option( + self.parser.set_usage("usage: %prog init [options] role_name") + self.parser.add_option( '-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. ' 'The default is the current working directory.') - parser.add_option( + self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles") elif self.action == "install": - parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") - parser.add_option( + self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") + self.parser.add_option( '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.') - parser.add_option( + self.parser.add_option( '-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') - parser.add_option( + self.parser.add_option( '-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported') elif self.action == "remove": - parser.set_usage("usage: %prog remove role1 role2 ...") + self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": - parser.set_usage("usage: %prog list [role_name]") + self.parser.set_usage("usage: %prog list [role_name]") # options that apply to more than one action if self.action != "init": - parser.add_option( + self.parser.add_option( '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' 'ansible.cfg file (/etc/ansible/roles if not configured)') if self.action in ("info","init","install"): - parser.add_option( + self.parser.add_option( '-s', '--server', dest='api_server', default="galaxy.ansible.com", help='The API server destination') if self.action in ("init","install"): - parser.add_option( + self.parser.add_option( '-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') - # done, return the parser - options, args = parser.parse_args() + # get options, args and galaxy object + self.options, self.args =self.parser.parse_args() + self.galaxy = Galaxy(self.options, self.display) + + if len(self.args) != 1: + raise AnsibleOptionsError("Missing arguments") - if len(args) == 0 or len(args) > 1: - parser.print_help() - sys.exit(1) + return True - display.verbosity = options.verbosity + def run(self): - return (options, args) + #self.display.verbosity = self.options.verbosity + api_server = self.get_opt("api_server", "galaxy.ansible.com") - def run(options, args): + # if not offline, get connect to galaxy api + if self.action == 'init' and not self.options.offline: + self.api = GalaxyAPI(self.galaxy, api_server) + if not self.api: + raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server) - # execute the desired action - fn = getattr(self, "execute_%s" % self.action) - fn(args, options) + self.execute() - def get_opt(options, k, defval=""): + def get_opt(self, k, defval=""): """ Returns an option from an Optparse values instance. """ try: - data = getattr(options, k) + data = getattr(self.options, k) except: return defval if k == "roles_path": @@ -156,56 +152,40 @@ class Cli(object): data = data.split(os.pathsep)[0] return data - def exit_without_ignore(options, rc=1): + def exit_without_ignore(self, rc=1): """ Exits with the specified return code unless the option --ignore-errors was specified """ - if not get_opt(options, "ignore_errors", False): - print '- you can use --ignore-errors to skip failed roles.' - sys.exit(rc) + if not self.get_opt("ignore_errors", False): + self.display.error('- you can use --ignore-errors to skip failed tasks/roles.') + return rc - def execute_init(args, options, parser): + def execute_init(self): """ Executes the init action, which creates the skeleton framework of a role that complies with the galaxy metadata format. """ - init_path = get_opt(options, 'init_path', './') - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - force = get_opt(options, 'force', False) - offline = get_opt(options, 'offline', False) - - if not offline: - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - try: - role_name = args.pop(0).strip() - if role_name == "": - raise Exception("") - role_path = os.path.join(init_path, role_name) - if os.path.exists(role_path): - if os.path.isfile(role_path): - print "- the path %s already exists, but is a file - aborting" % role_path - sys.exit(1) - elif not force: - print "- the directory %s already exists." % role_path - print " you can use --force to re-initialize this directory,\n" + \ - " however it will reset any main.yml files that may have\n" + \ - " been modified there already." - sys.exit(1) - except Exception, e: - parser.print_help() - print "- no role name specified for init" - sys.exit(1) - - ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') + init_path = self.get_opt('init_path', './') + force = self.get_opt('force', False) + offline = self.get_opt('offline', False) + + role_name = self.args.pop(0).strip() + if role_name == "": + raise AnsibleOptionsError("- no role name specified for init") + role_path = os.path.join(init_path, role_name) + if os.path.exists(role_path): + if os.path.isfile(role_path): + raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path) + elif not force: + raise AnsibleError("- the directory %s already exists." % role_path + \ + "you can use --force to re-initialize this directory,\n" + \ + "however it will reset any main.yml files that may have\n" + \ + "been modified there already.") # create the default README.md if not os.path.exists(role_path): @@ -215,7 +195,7 @@ class Cli(object): f.write(default_readme_template) f.close - for dir in ROLE_DIRS: + for dir in self.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') # create the directory if it doesn't exist already @@ -229,11 +209,11 @@ class Cli(object): # tags/platforms included (but commented out) and the # dependencies section platforms = [] - if not offline: - platforms = api_get_list(api_server, "platforms") or [] + if not offline and self.api: + platforms = self.api.get_list("platforms") or [] categories = [] - if not offline: - categories = api_get_list(api_server, "categories") or [] + if not offline and self.api: + categories = self.api.get_list("categories") or [] # group the list of platforms from the api based # on their names, with the release field being @@ -264,24 +244,20 @@ class Cli(object): f.close() print "- %s was created successfully" % role_name - def execute_info(args, options, parser): + def execute_info(self): """ Executes the info action. This action prints out detailed information about an installed role as well as info available from the galaxy API. """ - if len(args) == 0: + if len(self.args) == 0: # the user needs to specify a role - parser.print_help() - print "- you must specify a user/role name" - sys.exit(1) + raise AnsibleOptionsError("- you must specify a user/role name") - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - api_config = api_get_config(api_server) - roles_path = get_opt(options, "roles_path") + roles_path = self.get_opt("roles_path") - for role in args: + for role in self.args: role_info = {} @@ -292,11 +268,11 @@ class Cli(object): del install_info['version'] role_info.update(install_info) - remote_data = api_lookup_role_by_name(api_server, role, False) + remote_data = self.api.lookup_role_by_name(role, False) if remote_data: role_info.update(remote_data) - metadata = get_role_metadata(role, options) + metadata = get_metadata(role, options) if metadata: role_info.update(metadata) @@ -322,7 +298,7 @@ class Cli(object): else: print "- the role %s was not found" % role - def execute_install(args, options, parser): + def execute_install(self): """ Executes the installation action. The args list contains the roles to be installed, unless -f was specified. The list of roles @@ -330,24 +306,19 @@ class Cli(object): or it can be a local .tar.gz file. """ - role_file = get_opt(options, "role_file", None) + role_file = self.get_opt("role_file", None) - if len(args) == 0 and role_file is None: + if len(self.args) == 0 and role_file is None: # the user needs to specify one of either --role-file # or specify a single user/role name - parser.print_help() - print "- you must specify a user/role name or a roles file" - sys.exit() - elif len(args) == 1 and not role_file is None: + raise AnsibleOptionsError("- you must specify a user/role name or a roles file") + elif len(self.args) == 1 and not role_file is None: # using a role file is mutually exclusive of specifying # the role name on the command line - parser.print_help() - print "- please specify a user/role name, or a roles file, but not both" - sys.exit(1) + raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both") - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - no_deps = get_opt(options, "no_deps", False) - roles_path = get_opt(options, "roles_path") + no_deps = self.get_opt("no_deps", False) + roles_path = self.get_opt("roles_path") roles_done = [] if role_file: @@ -356,12 +327,12 @@ class Cli(object): roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) else: # roles listed in a file, one per line - roles_left = map(ansible.utils.role_spec_parse, f.readlines()) + roles_left = map(gr.get_spec, f.readlines()) f.close() else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). - roles_left = map(ansible.utils.role_spec_parse, args) + roles_left = map(gr.get_spec, self.args) while len(roles_left) > 0: # query the galaxy API for the role data @@ -387,19 +358,13 @@ class Cli(object): # just download a URL - version will probably be in the URL tmp_file = fetch_role(role_src, None, None, options) else: - # installing from galaxy - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - role_data = api_lookup_role_by_name(api_server, role_src) + role_data = self.api.lookup_role_by_name(role_src) if not role_data: - print "- sorry, %s was not found on %s." % (role_src, api_server) + print "- sorry, %s was not found on %s." % (role_src, self.options.api_server) exit_without_ignore(options) continue - role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) + role_versions = self.api.fetch_role_related('versions', role_data['id']) if "version" not in role or role['version'] == '': # convert the version names to LooseVersion objects # and sort them to get the latest version. If there @@ -430,7 +395,7 @@ class Cli(object): # install dependencies, if we want them if not no_deps and installed: if not role_data: - role_data = get_role_metadata(role.get("name"), options) + role_data = gr.get_metadata(role.get("name"), options) role_dependencies = role_data['dependencies'] else: role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) @@ -450,30 +415,28 @@ class Cli(object): if not tmp_file or not installed: print "- %s was NOT installed successfully." % role.get("name") exit_without_ignore(options) - sys.exit(0) + return 0 - def execute_remove(args, options, parser): + def execute_remove(self): """ Executes the remove action. The args list contains the list of roles to be removed. This list can contain more than one role. """ - if len(args) == 0: - parser.print_help() - print '- you must specify at least one role to remove.' - sys.exit() + if len(self.args) == 0: + raise AnsibleOptionsError('- you must specify at least one role to remove.') - for role in args: + for role in self.args: if get_role_metadata(role, options): if remove_role(role, options): - print '- successfully removed %s' % role + self.display.display('- successfully removed %s' % role) else: - print "- failed to remove role: %s" % role + self.display.display("- failed to remove role: %s" % role) else: - print '- %s is not installed, skipping.' % role - sys.exit(0) + self.display.display('- %s is not installed, skipping.' % role) + return 0 - def execute_list(args, options, parser): + def execute_list(self): """ Executes the list action. The args list can contain zero or one role. If one is specified, only that role will be @@ -481,37 +444,33 @@ class Cli(object): be shown. """ - if len(args) > 1: - print "- please specify only one role to list, or specify no roles to see a full list" - sys.exit(1) + if len(self.args) > 1: + raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list") - if len(args) == 1: + if len(self.args) == 1: # show only the request role, if it exists - role_name = args[0] - metadata = get_role_metadata(role_name, options) + role_name = self.args[0] + gr = GalaxyRole(self.galaxy, role_name) + metadata = gr.get_metadata() if metadata: - install_info = get_galaxy_install_info(role_name, options) + install_info = gr.get_galaxy_install_info() version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" # show some more info about single roles here - print "- %s, %s" % (role_name, version) + self.display.display("- %s, %s" % (role_name, version)) else: - print "- the role %s was not found" % role_name + self.display.display("- the role %s was not found" % role_name) else: # show all valid roles in the roles_path directory - roles_path = get_opt(options, 'roles_path') + roles_path = self.get_opt('roles_path') roles_path = os.path.expanduser(roles_path) if not os.path.exists(roles_path): - parser.print_help() - print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path - sys.exit(1) + raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path) elif not os.path.isdir(roles_path): - print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path - parser.print_help() - sys.exit(1) + raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path) path_files = os.listdir(roles_path) for path_file in path_files: if get_role_metadata(path_file, options): @@ -521,40 +480,26 @@ class Cli(object): version = install_info.get("version", None) if not version: version = "(unknown version)" - print "- %s, %s" % (path_file, version) - sys.exit(0) + self.display.display("- %s, %s" % (path_file, version)) + return 0 #------------------------------------------------------------------------------------- # The main entry point #------------------------------------------------------------------------------------- - -#def main(): -# # parse the CLI options -# action = get_action(sys.argv) -# parser = build_option_parser(action) -# (options, args) = parser.parse_args() -# -# # execute the desired action -# if 1: #try: -# fn = globals()["execute_%s" % action] -# fn(args, options, parser) -# #except KeyError, e: -# # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS)) -# # sys.exit(1) - - if __name__ == '__main__': display = Display() - try: - cli = Cli(display=display) - cli.set_action(sys.argv) - (options, args) = cli.parse() - sys.exit(cli.run(options, args)) - except AnsibleError as e: - display.error(str(e)) + cli = GalaxyCLI(sys.argv, display=display) + cli.parse() + sys.exit(cli.run()) + except AnsibleOptionsError as e: + cli.parser.print_help() + display.display(str(e), stderr=True, color='red') sys.exit(1) + except AnsibleError as e: + display.display(str(e), stderr=True, color='red') + sys.exit(2) except KeyboardInterrupt: display.error("interrupted") - sys.exit(1) + sys.exit(3) diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index d9247fef1c745f..700538cb56c20c 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -44,7 +44,7 @@ from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook import Playbook from ansible.playbook.task import Task -from ansible.utils.cli import base_parser, validate_conflicts, normalize_become_options, ask_passwords +from ansible.utils.cli import CLI from ansible.utils.display import Display from ansible.utils.unicode import to_unicode from ansible.utils.vars import combine_vars @@ -53,145 +53,156 @@ from ansible.vars import VariableManager #--------------------------------------------------------------------------------------------------- -def main(display, args): - ''' run ansible-playbook operations ''' - - # create parser for CLI options - parser = base_parser( - usage = "%prog playbook.yml", - connect_opts=True, - meta_opts=True, - runas_opts=True, - subset_opts=True, - check_opts=True, - diff_opts=True, - ) - - # ansible playbook specific opts - parser.add_option('--list-tasks', dest='listtasks', action='store_true', - help="list all tasks that would be executed") - parser.add_option('--step', dest='step', action='store_true', - help="one-step-at-a-time: confirm each task before running") - parser.add_option('--start-at-task', dest='start_at', - help="start the playbook at the task matching this name") - parser.add_option('--list-tags', dest='listtags', action='store_true', - help="list all available tags") - - options, args = parser.parse_args(args) - - if len(args) == 0: - parser.print_help(file=sys.stderr) - return 1 - - display.verbosity = options.verbosity - validate_conflicts(parser,options) - - # Note: slightly wrong, this is written so that implicit localhost - # Manage passwords - sshpass = None - becomepass = None - vault_pass = None - - # don't deal with privilege escalation when we don't need to - if not options.listhosts and not options.listtasks and not options.listtags: - normalize_become_options(options) - (sshpass, becomepass, vault_pass) = ask_passwords(options) - passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } - - if options.vault_password_file: - # read vault_pass from a file - vault_pass = read_vault_file(options.vault_password_file) - - loader = DataLoader(vault_password=vault_pass) - - extra_vars = {} - for extra_vars_opt in options.extra_vars: - extra_vars_opt = to_unicode(extra_vars_opt, errors='strict') - if extra_vars_opt.startswith(u"@"): - # Argument is a YAML file (JSON is a subset of YAML) - data = loader.load_from_file(extra_vars_opt[1:]) - elif extra_vars_opt and extra_vars_opt[0] in u'[{': - # Arguments as YAML - data = loader.load(extra_vars_opt) +class PlaybookCLI(CLI): + ''' code behind ansible playbook cli''' + + def parse(self): + + # create parser for CLI options + parser = CLI.base_parser( + usage = "%prog playbook.yml", + connect_opts=True, + meta_opts=True, + runas_opts=True, + subset_opts=True, + check_opts=True, + diff_opts=True, + ) + + # ansible playbook specific opts + parser.add_option('--list-tasks', dest='listtasks', action='store_true', + help="list all tasks that would be executed") + parser.add_option('--step', dest='step', action='store_true', + help="one-step-at-a-time: confirm each task before running") + parser.add_option('--start-at-task', dest='start_at', + help="start the playbook at the task matching this name") + parser.add_option('--list-tags', dest='listtags', action='store_true', + help="list all available tags") + + self.options, self.args = parser.parse_args() + + if len(self.args) == 0: + parser.print_help(file=sys.stderr) + raise AnsibleError("You must specify a playbook file to run") + + self.parser = parser + + self.display.verbosity = self.options.verbosity + self.validate_conflicts() + + def run(self): + + # Note: slightly wrong, this is written so that implicit localhost + # Manage passwords + sshpass = None + becomepass = None + vault_pass = None + passwords = {} + + # don't deal with privilege escalation or passwords when we don't need to + if not self.options.listhosts and not self.options.listtasks and not self.options.listtags: + self.normalize_become_options() + (sshpass, becomepass) = self.ask_passwords() + passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } + + if self.options.vault_password_file: + # read vault_pass from a file + vault_pass = read_vault_file(self.options.vault_password_file) + elif self.options.ask_vault_pass: + vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0] + + loader = DataLoader(vault_password=vault_pass) + + extra_vars = {} + for extra_vars_opt in self.options.extra_vars: + extra_vars_opt = to_unicode(extra_vars_opt, errors='strict') + if extra_vars_opt.startswith(u"@"): + # Argument is a YAML file (JSON is a subset of YAML) + data = loader.load_from_file(extra_vars_opt[1:]) + elif extra_vars_opt and extra_vars_opt[0] in u'[{': + # Arguments as YAML + data = loader.load(extra_vars_opt) + else: + # Arguments as Key-value + data = parse_kv(extra_vars_opt) + extra_vars = combine_vars(extra_vars, data) + + # FIXME: this should be moved inside the playbook executor code + only_tags = self.options.tags.split(",") + skip_tags = self.options.skip_tags + if self.options.skip_tags is not None: + skip_tags = self.ptions.skip_tags.split(",") + + # initial error check, to make sure all specified playbooks are accessible + # before we start running anything through the playbook executor + for playbook in self.args: + if not os.path.exists(playbook): + raise AnsibleError("the playbook: %s could not be found" % playbook) + if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): + raise AnsibleError("the playbook: %s does not appear to be a file" % playbook) + + # create the variable manager, which will be shared throughout + # the code, ensuring a consistent view of global variables + variable_manager = VariableManager() + variable_manager.set_extra_vars(extra_vars) + + # create the inventory, and filter it based on the subset specified (if any) + inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) + variable_manager.set_inventory(inventory) + + # (which is not returned in list_hosts()) is taken into account for + # warning if inventory is empty. But it can't be taken into account for + # checking if limit doesn't match any hosts. Instead we don't worry about + # limit if only implicit localhost was in inventory to start with. + # + # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) + no_hosts = False + if len(inventory.list_hosts()) == 0: + # Empty inventory + self.display.warning("provided hosts list is empty, only localhost is available") + no_hosts = True + inventory.subset(self.options.subset) + if len(inventory.list_hosts()) == 0 and no_hosts is False: + # Invalid limit + raise AnsibleError("Specified --limit does not match any hosts") + + # create the playbook executor, which manages running the plays via a task queue manager + pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=self.display, options=self.options, passwords=passwords) + + results = pbex.run() + + if isinstance(results, list): + for p in results: + + self.display.display('\nplaybook: %s\n' % p['playbook']) + for play in p['plays']: + if self.options.listhosts: + self.display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts']))) + for host in play['hosts']: + self.display.display(" %s" % host) + if self.options.listtasks: #TODO: do we want to display block info? + self.display.display("\n %s" % (play['name'])) + for task in play['tasks']: + self.display.display(" %s" % task) + if self.options.listtags: #TODO: fix once we figure out block handling above + self.display.display("\n %s: tags count=%d" % (play['name'], len(play['tags']))) + for tag in play['tags']: + self.display.display(" %s" % tag) + return 0 else: - # Arguments as Key-value - data = parse_kv(extra_vars_opt) - extra_vars = combine_vars(extra_vars, data) - - # FIXME: this should be moved inside the playbook executor code - only_tags = options.tags.split(",") - skip_tags = options.skip_tags - if options.skip_tags is not None: - skip_tags = options.skip_tags.split(",") - - # initial error check, to make sure all specified playbooks are accessible - # before we start running anything through the playbook executor - for playbook in args: - if not os.path.exists(playbook): - raise AnsibleError("the playbook: %s could not be found" % playbook) - if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): - raise AnsibleError("the playbook: %s does not appear to be a file" % playbook) - - # create the variable manager, which will be shared throughout - # the code, ensuring a consistent view of global variables - variable_manager = VariableManager() - variable_manager.set_extra_vars(extra_vars) - - # create the inventory, and filter it based on the subset specified (if any) - inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=options.inventory) - variable_manager.set_inventory(inventory) - - # (which is not returned in list_hosts()) is taken into account for - # warning if inventory is empty. But it can't be taken into account for - # checking if limit doesn't match any hosts. Instead we don't worry about - # limit if only implicit localhost was in inventory to start with. - # - # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts()) - no_hosts = False - if len(inventory.list_hosts()) == 0: - # Empty inventory - display.warning("provided hosts list is empty, only localhost is available") - no_hosts = True - inventory.subset(options.subset) - if len(inventory.list_hosts()) == 0 and no_hosts is False: - # Invalid limit - raise errors.AnsibleError("Specified --limit does not match any hosts") - - # create the playbook executor, which manages running the plays via a task queue manager - pbex = PlaybookExecutor(playbooks=args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=passwords) - - results = pbex.run() - - if isinstance(results, list): - for p in results: - - display.display('\nplaybook: %s\n' % p['playbook']) - for play in p['plays']: - if options.listhosts: - display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts']))) - for host in play['hosts']: - display.display(" %s" % host) - if options.listtasks: #TODO: do we want to display block info? - display.display("\n %s" % (play['name'])) - for task in play['tasks']: - display.display(" %s" % task) - if options.listtags: #TODO: fix once we figure out block handling above - display.display("\n %s: tags count=%d" % (play['name'], len(play['tags']))) - for tag in play['tags']: - display.display(" %s" % tag) - return 0 - else: - return results + return results + +######################################################## if __name__ == "__main__": display = Display() - #display.display(" ".join(sys.argv), log_only=True) - try: - sys.exit(main(display, sys.argv[1:])) + cli = PlaybookCLI(sys.argv, display=display) + cli.parse() + sys.exit(cli.run()) except AnsibleError as e: - display.error(str(e)) + display.display(str(e), stderr=True, color='red') sys.exit(1) except KeyboardInterrupt: display.error("interrupted") diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault index 638d80ba9ed8ec..78686b6839a034 100755 --- a/v2/bin/ansible-vault +++ b/v2/bin/ansible-vault @@ -35,141 +35,100 @@ import traceback from ansible.errors import AnsibleError from ansible.parsing.vault import VaultEditor -from ansible.utils.cli import base_parser, ask_vault_passwords +from ansible.utils.cli import CLI +from ansible.utils.display import Display -#------------------------------------------------------------------------------------- -# Utility functions for parsing actions/options -#------------------------------------------------------------------------------------- - - - -class Cli(object): +class VaultCli(CLI): + """ Vault command line class """ VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") + CIPHER = 'AES256' - - def __init__(self, display=None): + def __init__(self, args, display=None): self.vault_pass = None - - if display is None: - self.display = Display() - else: - self.display = display - + super(VaultCli, self).__init__(args, display) def parse(self): # create parser for CLI options - parser = base_parser( + self.parser = CLI.base_parser( usage = "%prog vaultfile.yml", ) - return parser.parse_args() + self.set_action() + + # options specific to self.actions + if self.action == "create": + self.parser.set_usage("usage: %prog create [options] file_name") + elif self.action == "decrypt": + self.parser.set_usage("usage: %prog decrypt [options] file_name") + elif self.action == "edit": + self.parser.set_usage("usage: %prog edit [options] file_name") + elif self.action == "view": + self.parser.set_usage("usage: %prog view [options] file_name") + elif self.action == "encrypt": + self.parser.set_usage("usage: %prog encrypt [options] file_name") + elif action == "rekey": + self.parser.set_usage("usage: %prog rekey [options] file_name") - def run(self, options, args): + self.options, self.args = self.parser.parse_args() - action = self.get_action(args) + if len(self.args) == 0 or len(self.args) > 1: + self.parser.print_help() + raise AnsibleError("Vault requires a single filename as a parameter") - if not action: - parser.print_help() - raise AnsibleError("missing required action") + def run(self): - # options specific to actions - if action == "create": - parser.set_usage("usage: %prog create [options] file_name") - elif action == "decrypt": - parser.set_usage("usage: %prog decrypt [options] file_name") - elif action == "edit": - parser.set_usage("usage: %prog edit [options] file_name") - elif action == "view": - parser.set_usage("usage: %prog view [options] file_name") - elif action == "encrypt": - parser.set_usage("usage: %prog encrypt [options] file_name") - elif action == "rekey": - parser.set_usage("usage: %prog rekey [options] file_name") + if self.options.vault_password_file: + # read vault_pass from a file + self.vault_pass = read_vault_file(self.options.vault_password_file) + elif self.options.ask_vault_pass: + self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False) - if len(args) == 0 or len(args) > 1: - parser.print_help() - raise AnsibleError("Vault requires a single filename as a parameter") + self.execute() - if options.vault_password_file: - # read vault_pass from a file - self.vault_pass = read_vault_file(options.vault_password_file) - else: - self.vault_pass, _= ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False) - - # execute the desired action - fn = getattr(self, "execute_%s" % action) - fn(args, options) - - def get_action(self, args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - return arg - return None - - def execute_create(args, options): - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - this_editor = VaultEditor(cipher, self.vault_pass, args[0]) - this_editor.create_file() + def execute_create(self): - def execute_decrypt(args, options): + cipher = getattr(self.options, 'cipher', self.CIPHER) + this_editor = VaultEditor(cipher, self.vault_pass, self.args[0]) + this_editor.create_file() - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher + def execute_decrypt(self): - for f in args: + cipher = getattr(self.options, 'cipher', self.CIPHER) + for f in self.args: this_editor = VaultEditor(cipher, self.vault_pass, f) this_editor.decrypt_file() self.display.display("Decryption successful") - def execute_edit(args, options): + def execute_edit(self): - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, self.vault_pass, f) + for f in self.args: + this_editor = VaultEditor(None, self.vault_pass, f) this_editor.edit_file() - def execute_view(args, options): + def execute_view(self): - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, self.vault_pass, f) + for f in self.args: + this_editor = VaultEditor(None, self.vault_pass, f) this_editor.view_file() - def execute_encrypt(args, options): - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher + def execute_encrypt(self): - for f in args: + cipher = getattr(self.options, 'cipher', self.CIPHER) + for f in self.args: this_editor = VaultEditor(cipher, self.vault_pass, f) this_editor.encrypt_file() self.display.display("Encryption successful") - def execute_rekey(args, options ): - __, new_password = ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True) + def execute_rekey(self): + __, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True) - cipher = None - for f in args: - this_editor = VaultEditor(cipher, self.vault_pass, f) + for f in self.args: + this_editor = VaultEditor(None, self.vault_pass, f) this_editor.rekey_file(new_password) self.display.display("Rekey successful") @@ -179,14 +138,12 @@ class Cli(object): if __name__ == "__main__": display = Display() - #display.display(" ".join(sys.argv), log_only=True) - try: - cli = Cli(display=display) - (options, args) = cli.parse() - sys.exit(cli.run(options, args)) + cli = VaultCli(sys.argv, display=display) + cli.parse() + sys.exit(cli.run()) except AnsibleError as e: - display.error(str(e)) + display.display(str(e), stderr=True, color='red') sys.exit(1) except KeyboardInterrupt: display.error("interrupted") From cec4d0889bf9a7dcc22ad18caa084a7c4c998746 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 27 Apr 2015 12:13:17 -0400 Subject: [PATCH 0464/3617] now prevents option override --- v2/ansible/constants.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 6f35751b506c47..456beb8bbc40f4 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -44,9 +44,9 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False, if value: if integer: value = int(value) - if floating: + elif floating: value = float(value) - if islist: + elif islist: if isinstance(value, basestring): value = [x.strip() for x in value.split(',')] return value From b11cd73df1ac11b4718c882f4db3f8180f3121bf Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 29 Apr 2015 21:06:58 +0200 Subject: [PATCH 0465/3617] cloudstack: add tag support in utils --- lib/ansible/module_utils/cloudstack.py | 61 ++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 0c7da28e2a7798..518ef7a7326681 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -41,6 +41,10 @@ def __init__(self, module): if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") + self.result = { + 'changed': False, + } + self.module = module self._connect() @@ -237,6 +241,63 @@ def get_hypervisor(self): self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor) + def get_tags(self, resource=None): + existing_tags = self.cs.listTags(resourceid=resource['id']) + if existing_tags: + return existing_tags['tag'] + return [] + + + def _delete_tags(self, resource, resource_type, tags): + existing_tags = resource['tags'] + tags_to_delete = [] + for existing_tag in existing_tags: + if existing_tag['key'] in tags: + if existing_tag['value'] != tags[key]: + tags_to_delete.append(existing_tag) + else: + tags_to_delete.append(existing_tag) + if tags_to_delete: + self.result['changed'] = True + if not self.module.check_mode: + args = {} + args['resourceids'] = resource['id'] + args['resourcetype'] = resource_type + args['tags'] = tags_to_delete + self.cs.deleteTags(**args) + + + def _create_tags(self, resource, resource_type, tags): + tags_to_create = [] + for i, tag_entry in enumerate(tags): + tag = { + 'key': tag_entry['key'], + 'value': tag_entry['value'], + } + tags_to_create.append(tag) + if tags_to_create: + self.result['changed'] = True + if not self.module.check_mode: + args = {} + args['resourceids'] = resource['id'] + args['resourcetype'] = resource_type + args['tags'] = tags_to_create + self.cs.createTags(**args) + + + def ensure_tags(self, resource, resource_type=None): + if not resource_type or not resource: + self.module.fail_json(msg="Error: Missing resource or resource_type for tags.") + + if 'tags' in resource: + tags = self.module.params.get('tags') + if tags is not None: + self._delete_tags(resource, resource_type, tags) + self._create_tags(resource, resource_type, tags) + resource['tags'] = self.get_tags(resource) + return resource + + def get_capabilities(self, key=None): if self.capabilities: return self._get_by_key(key, self.capabilities) From be15d74935fbb4b822cb9f74ac50b0c8427a2e0f Mon Sep 17 00:00:00 2001 From: Nick Hammond Date: Wed, 29 Apr 2015 21:22:24 -0500 Subject: [PATCH 0466/3617] Add become docs to the intro configuration for #10881 --- docsite/rst/intro_configuration.rst | 43 +++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 91be8a98da242c..73d8fd0f0d6920 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -600,6 +600,49 @@ Configures the path to the Vault password file as an alternative to specifying ` As of 1.7 this file can also be a script. If you are using a script instead of a flat file, ensure that it is marked as executable, and that the password is printed to standard output. If your script needs to prompt for data, prompts can be sent to standard error. +.. _privilege_escalation: + +Privilege Escalation Settings +----------------------------- + +Ansible can use existing privilege escalation systems to allow a user to execute tasks as another. As of 1.9 ‘become’ supersedes the old sudo/su, while still being backwards compatible. Settings live under the [privilege_escalation] header. + +.. _become: + +become +====== + +The equivalent of adding sudo: or su: to a play or task, set to true/yes to activate privilege escalation. The default behavior is no:: + + become=True + +.. _become_method: + +become_method +============= + +Set the privilege escalation method. The default is ``sudo``, other options are ``su``, ``pbrun``, ``pfexec``:: + + become_method=su + +.. _become_user: + +become_user +============= + +The equivalent to ansible_sudo_user or ansible_su_user, allows to set the user you become through privilege escalation. The default is 'root':: + + become_user=root + +.. _become_ask_pass: + +become_ask_pass +=============== + +Ask for privilege escalation password, the default is False:: + + become_ask_pass=True + .. _paramiko_settings: Paramiko Specific Settings From ccc9a33b562de73adf4e5e2b94ec87d26e1237aa Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 29 Apr 2015 23:55:44 -0400 Subject: [PATCH 0467/3617] most of galaxy is working, install still needs work normalized return codes in bin refactored galaxy classes a bit, ignoring 'compatct' role spec --- v2/ansible/galaxy/__init__.py | 24 +++- v2/ansible/galaxy/api.py | 22 ++-- v2/ansible/galaxy/role.py | 150 +++++++--------------- v2/bin/ansible | 8 +- v2/bin/ansible-galaxy | 229 ++++++++++++++++++---------------- v2/bin/ansible-playbook | 9 +- v2/bin/ansible-vault | 10 +- 7 files changed, 221 insertions(+), 231 deletions(-) diff --git a/v2/ansible/galaxy/__init__.py b/v2/ansible/galaxy/__init__.py index c3d37fe22e91e6..3b89dac8472fe0 100644 --- a/v2/ansible/galaxy/__init__.py +++ b/v2/ansible/galaxy/__init__.py @@ -25,6 +25,10 @@ from ansible.errors import AnsibleError from ansible.utils.display import Display +# default_readme_template +# default_meta_template + + class Galaxy(object): ''' Keeps global galaxy info ''' @@ -36,13 +40,31 @@ def __init__(self, options, display=None): self.display = display self.options = options - self.roles_path = os.path.expanduser(self.options.roles_path) + self.roles_path = getattr(self.options, 'roles_path', None) + if self.roles_path: + self.roles_path = os.path.expanduser(self.roles_path) self.roles = {} + # load data path for resource usage + this_dir, this_filename = os.path.split(__file__) + self.DATA_PATH = os.path.join(this_dir, "data") + + #TODO: move to getter for lazy loading + self.default_readme = self._str_from_data_file('readme') + self.default_meta = self._str_from_data_file('metadata_template.j2') + def add_role(self, role): self.roles[role.name] = role def remove_role(self, role_name): del self.roles[role_name] + + def _str_from_data_file(self, filename): + myfile = os.path.join(self.DATA_PATH, filename) + try: + return open(myfile).read() + except Exception as e: + raise AnsibleError("Could not open %s: %s" % (filename, str(e))) + diff --git a/v2/ansible/galaxy/api.py b/v2/ansible/galaxy/api.py index a9d1566e049bad..f14afc52d3a3a8 100755 --- a/v2/ansible/galaxy/api.py +++ b/v2/ansible/galaxy/api.py @@ -38,10 +38,12 @@ def __init__(self, galaxy, api_server): try: urlparse(api_server, scheme='https') except: - raise AnsibleError("Invalid server API url passed: %s" % self.galaxy.api_server) + raise AnsibleError("Invalid server API url passed: %s" % api_server) + + server_version = self.get_server_api_version('%s/api/' % (api_server)) + if not server_version: + raise AnsibleError("Could not retrieve server API version: %s" % api_server) - server_version = self.get_server_api_version(api_server) - self.galaxy.display.vvvvv("Server version: %s" % server_version) if server_version in self.SUPPORTED_VERSIONS: self.baseurl = '%s/api/%s' % (api_server, server_version) self.version = server_version # for future use @@ -54,22 +56,21 @@ def get_server_api_version(self, api_server): Fetches the Galaxy API current version to ensure the API server is up and reachable. """ + #TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1) + # also should set baseurl using supported_versions which has path + return 'v1' try: - self.galaxy.display.vvvvv("Querying server version: %s" % api_server) data = json.load(urlopen(api_server)) - if not data.get("current_version", None): - return None - else: - return data - except: + return data.get("current_version", 'v1') + except Exception as e: + # TODO: report error return None def lookup_role_by_name(self, role_name, notify=True): """ Find a role by name """ - role_name = urlquote(role_name) try: @@ -82,6 +83,7 @@ def lookup_role_by_name(self, role_name, notify=True): raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name) url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name) + self.galaxy.display.vvvv("- %s" % (url)) try: data = json.load(urlopen(url)) if len(data["results"]) != 0: diff --git a/v2/ansible/galaxy/role.py b/v2/ansible/galaxy/role.py index 0d13233e6a4483..b5a628726f5951 100644 --- a/v2/ansible/galaxy/role.py +++ b/v2/ansible/galaxy/role.py @@ -39,30 +39,20 @@ class GalaxyRole(object): ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') - def __init__(self, galaxy, role_name, role_version=None, role_url=None): + def __init__(self, galaxy, name, src=None, version=None, scm=None): + + self._metadata = None + self._install_info = None self.options = galaxy.options self.display = galaxy.display - self.name = role_name - self.meta_data = None - self.install_info = None - self.path = (os.path.join(galaxy.roles_path, self.name)) - - # TODO: possibly parse version and url from role_name - self.version = role_version - self.url = role_url - if self.url is None: - self._spec_parse() - - if C.GALAXY_SCMS: - self.scms = self.SUPPORTED_SCMS.intersection(set(C.GALAXY_SCMS)) - else: - self.scms = self.SUPPORTED_SCMS - - if not self.scms: - self.display.warning("No valid SCMs configured for Galaxy.") + self.name = name + self.version = version + self.src = src + self.scm = scm + self.path = (os.path.join(galaxy.roles_path, self.name)) def fetch_from_scm_archive(self): @@ -112,59 +102,44 @@ def fetch_from_scm_archive(self): return temp_file.name - - def get_metadata(self): + @property + def metadata(self): """ Returns role metadata """ - if self.meta_data is None: - self._read_metadata - - return self.meta_data - + if self._metadata is None: + meta_path = os.path.join(self.path, self.META_MAIN) + if os.path.isfile(meta_path): + try: + f = open(meta_path, 'r') + self._metadata = yaml.safe_load(f) + except: + self.display.vvvvv("Unable to load metadata for %s" % self.name) + return False + finally: + f.close() - def _read_metadata(self): - """ - Reads the metadata as YAML, if the file 'meta/main.yml' exists - """ - meta_path = os.path.join(self.path, self.META_MAIN) - if os.path.isfile(meta_path): - try: - f = open(meta_path, 'r') - self.meta_data = yaml.safe_load(f) - except: - self.display.vvvvv("Unable to load metadata for %s" % self.name) - return False - finally: - f.close() + return self._metadata - def get_galaxy_install_info(self): + @property + def install_info(self): """ Returns role install info """ - if self.install_info is None: - self._read_galaxy_isntall_info() - - return self.install_info - - - def _read_galaxy_install_info(self): - """ - Returns the YAML data contained in 'meta/.galaxy_install_info', - if it exists. - """ + if self._install_info is None: - info_path = os.path.join(self.path, self.META_INSTALL) - if os.path.isfile(info_path): - try: - f = open(info_path, 'r') - self.install_info = yaml.safe_load(f) - except: - self.display.vvvvv("Unable to load Galaxy install info for %s" % self.name) - return False - finally: - f.close() + info_path = os.path.join(self.path, self.META_INSTALL) + if os.path.isfile(info_path): + try: + f = open(info_path, 'r') + self._install_info = yaml.safe_load(f) + except: + self.display.vvvvv("Unable to load Galaxy install info for %s" % self.name) + return False + finally: + f.close() + return self._install_info def _write_galaxy_install_info(self): """ @@ -180,7 +155,7 @@ def _write_galaxy_install_info(self): info_path = os.path.join(self.path, self.META_INSTALL) try: f = open(info_path, 'w+') - self.install_info = yaml.safe_dump(info, f) + self._install_info = yaml.safe_dump(info, f) except: return False finally: @@ -194,7 +169,7 @@ def remove(self): sanity check to make sure there's a meta/main.yml file at this path so the user doesn't blow away random directories """ - if self.read_metadata(): + if self.metadata: try: rmtree(self.path) return True @@ -210,8 +185,8 @@ def fetch(self, target, role_data): """ # first grab the file and save it to a temp location - if self.url: - archive_url = self.url + if self.src: + archive_url = self.src else: archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target) self.display.display("- downloading role from %s" % archive_url) @@ -256,7 +231,7 @@ def install(self, role_filename): return False else: try: - self.meta_data = yaml.safe_load(role_tar_file.extractfile(meta_file)) + self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file)) except: self.display.error("this role does not appear to have a valid meta/main.yml file.") return False @@ -306,7 +281,8 @@ def install(self, role_filename): self.display.display("- %s was installed successfully" % self.name) return True - def get_spec(self): + @property + def spec(self): """ Returns role spec info { @@ -316,40 +292,4 @@ def get_spec(self): 'name': 'repo' } """ - if self.scm is None and self.url is None: - self._read_galaxy_isntall_info() - - return dict(scm=self.scm, src=self.url, version=self.version, role_name=self.name) - - def _spec_parse(self): - ''' creates separated parts of role spec ''' - default_role_versions = dict(git='master', hg='tip') - - if not self.url and '://' in self.name: - role_spec = self.name.strip() - - if role_spec == "" or role_spec.startswith("#"): - return - - tokens = [s.strip() for s in role_spec.split(',')] - - # assume https://github.com URLs are git+https:// URLs and not tarballs unless they end in '.zip' - if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): - tokens[0] = 'git+' + tokens[0] - - if '+' in tokens[0]: - (self.scm, self.url) = tokens[0].split('+') - else: - self.scm = None - self.url = tokens[0] - - if len(tokens) >= 2: - self.version = tokens[1] - - if len(tokens) == 3: - self.name = tokens[2] - else: - self.name = self._repo_url_to_role_name(tokens[0]) - - if self.scm and not self.version: - self.version = default_role_versions.get(scm, '') + return dict(scm=self.scm, src=self.src, version=self.version, name=self.name) diff --git a/v2/bin/ansible b/v2/bin/ansible index 77446338da09d8..d08fd5ce5c6519 100755 --- a/v2/bin/ansible +++ b/v2/bin/ansible @@ -180,9 +180,13 @@ if __name__ == '__main__': cli = AdHocCli(sys.argv, display=display) cli.parse() sys.exit(cli.run()) - except AnsibleError as e: + except AnsibleOptionsError as e: + cli.parser.print_help() display.display(str(e), stderr=True, color='red') sys.exit(1) + except AnsibleError as e: + display.display(str(e), stderr=True, color='red') + sys.exit(2) except KeyboardInterrupt: display.error("interrupted") - sys.exit(1) + sys.exit(4) diff --git a/v2/bin/ansible-galaxy b/v2/bin/ansible-galaxy index cca1dd9d8356df..30b97535c9d0fb 100755 --- a/v2/bin/ansible-galaxy +++ b/v2/bin/ansible-galaxy @@ -46,6 +46,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.role import GalaxyRole +from ansible.playbook.role.requirement import RoleRequirement from ansible.utils.display import Display from ansible.utils.cli import CLI @@ -71,6 +72,10 @@ class GalaxyCLI(CLI): self.parser = parser self.set_action() + # verbose + self.parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", + help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") + # options specific to actions if self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") @@ -108,8 +113,7 @@ class GalaxyCLI(CLI): 'ansible.cfg file (/etc/ansible/roles if not configured)') if self.action in ("info","init","install"): - self.parser.add_option( - '-s', '--server', dest='api_server', default="galaxy.ansible.com", + self.parser.add_option( '-s', '--server', dest='api_server', default="https://galaxy.ansible.com", help='The API server destination') if self.action in ("init","install"): @@ -119,20 +123,16 @@ class GalaxyCLI(CLI): # get options, args and galaxy object self.options, self.args =self.parser.parse_args() + self.display.verbosity = self.options.verbosity self.galaxy = Galaxy(self.options, self.display) - if len(self.args) != 1: - raise AnsibleOptionsError("Missing arguments") - return True def run(self): - #self.display.verbosity = self.options.verbosity - api_server = self.get_opt("api_server", "galaxy.ansible.com") - # if not offline, get connect to galaxy api - if self.action == 'init' and not self.options.offline: + if self.action in ("info","install") or (self.action == 'init' and not self.options.offline): + api_server = self.options.api_server self.api = GalaxyAPI(self.galaxy, api_server) if not self.api: raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server) @@ -157,13 +157,10 @@ class GalaxyCLI(CLI): Exits with the specified return code unless the option --ignore-errors was specified """ - if not self.get_opt("ignore_errors", False): - self.display.error('- you can use --ignore-errors to skip failed tasks/roles.') + self.display.error('- you can use --ignore-errors to skip failed roles and finish processing the list.') return rc - - def execute_init(self): """ Executes the init action, which creates the skeleton framework @@ -192,10 +189,10 @@ class GalaxyCLI(CLI): os.makedirs(role_path) readme_path = os.path.join(role_path, "README.md") f = open(readme_path, "wb") - f.write(default_readme_template) + f.write(self.galaxy.default_readme) f.close - for dir in self.ROLE_DIRS: + for dir in GalaxyRole.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') # create the directory if it doesn't exist already @@ -232,7 +229,7 @@ class GalaxyCLI(CLI): platforms = platform_groups, categories = categories, ) - rendered_meta = Environment().from_string(default_meta_template).render(inject) + rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject) f = open(main_yml_path, 'w') f.write(rendered_meta) f.close() @@ -242,7 +239,7 @@ class GalaxyCLI(CLI): f = open(main_yml_path, 'w') f.write('---\n# %s file for %s\n' % (dir,role_name)) f.close() - print "- %s was created successfully" % role_name + self.display.display("- %s was created successfully" % role_name) def execute_info(self): """ @@ -260,43 +257,48 @@ class GalaxyCLI(CLI): for role in self.args: role_info = {} + gr = GalaxyRole(self.galaxy, role) + #self.galaxy.add_role(gr) - install_info = get_galaxy_install_info(role, options) + install_info = gr.install_info if install_info: if 'version' in install_info: install_info['intalled_version'] = install_info['version'] del install_info['version'] role_info.update(install_info) - remote_data = self.api.lookup_role_by_name(role, False) + remote_data = False + if self.api: + remote_data = self.api.lookup_role_by_name(role, False) + if remote_data: role_info.update(remote_data) - metadata = get_metadata(role, options) - if metadata: - role_info.update(metadata) + if gr.metadata: + role_info.update(gr.metadata) - role_spec = ansible.utils.role_spec_parse(role) + req = RoleRequirement() + __, __, role_spec= req.parse({'role': role}) if role_spec: role_info.update(role_spec) if role_info: - print "- %s:" % (role) + self.display.display("- %s:" % (role)) for k in sorted(role_info.keys()): - if k in SKIP_INFO_KEYS: + if k in self.SKIP_INFO_KEYS: continue if isinstance(role_info[k], dict): - print "\t%s: " % (k) + self.display.display("\t%s: " % (k)) for key in sorted(role_info[k].keys()): - if key in SKIP_INFO_KEYS: + if key in self.SKIP_INFO_KEYS: continue - print "\t\t%s: %s" % (key, role_info[k][key]) + self.display.display("\t\t%s: %s" % (key, role_info[k][key])) else: - print "\t%s: %s" % (k, role_info[k]) + self.display.display("\t%s: %s" % (k, role_info[k])) else: - print "- the role %s was not found" % role + self.display.display("- the role %s was not found" % role) def execute_install(self): """ @@ -321,100 +323,111 @@ class GalaxyCLI(CLI): roles_path = self.get_opt("roles_path") roles_done = [] + roles_left = [] + role_name = self.args.pop(0).strip() + + gr = GalaxyRole(self.galaxy, role_name) if role_file: f = open(role_file, 'r') if role_file.endswith('.yaml') or role_file.endswith('.yml'): roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) else: # roles listed in a file, one per line - roles_left = map(gr.get_spec, f.readlines()) + for rname in f.readlines(): + roles_left.append(GalaxyRole(self.galaxy, rname)) f.close() else: # roles were specified directly, so we'll just go out grab them # (and their dependencies, unless the user doesn't want us to). - roles_left = map(gr.get_spec, self.args) + for rname in self.args: + roles_left.append(GalaxyRole(self.galaxy, rname)) while len(roles_left) > 0: # query the galaxy API for the role data role_data = None role = roles_left.pop(0) - role_src = role.get("src") - role_scm = role.get("scm") - role_path = role.get("path") + role_src = role.src + role_scm = role.scm + role_path = role.path if role_path: - options.roles_path = role_path + self.options.roles_path = role_path else: - options.roles_path = roles_path + self.options.roles_path = roles_path - if os.path.isfile(role_src): + tmp_file = None + if role_src and os.path.isfile(role_src): # installing a local tar.gz tmp_file = role_src else: if role_scm: # create tar file from scm url - tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name")) - elif '://' in role_src: - # just download a URL - version will probably be in the URL - tmp_file = fetch_role(role_src, None, None, options) - else: - role_data = self.api.lookup_role_by_name(role_src) - if not role_data: - print "- sorry, %s was not found on %s." % (role_src, self.options.api_server) - exit_without_ignore(options) - continue - - role_versions = self.api.fetch_role_related('versions', role_data['id']) - if "version" not in role or role['version'] == '': - # convert the version names to LooseVersion objects - # and sort them to get the latest version. If there - # are no versions in the list, we'll grab the head - # of the master branch - if len(role_versions) > 0: - loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] - loose_versions.sort() - role["version"] = str(loose_versions[-1]) - else: - role["version"] = 'master' - elif role['version'] != 'master': - if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]: - print 'role is %s' % role - print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions) - exit_without_ignore(options) + tmp_file = scm_archive_role(role_scm, role_src, role.version, role.name) + if role_src: + if '://' in role_src: + # just download a URL - version will probably be in the URL + tmp_file = gr.fetch() + else: + role_data = self.api.lookup_role_by_name(role_src) + if not role_data: + self.display.warning("- sorry, %s was not found on %s." % (role_src, self.options.api_server)) + self.exit_without_ignore() continue - # download the role. if --no-deps was specified, we stop here, - # otherwise we recursively grab roles and all of their deps. - tmp_file = fetch_role(role_src, role["version"], role_data, options) + role_versions = self.api.fetch_role_related('versions', role_data['id']) + if not role.version: + # convert the version names to LooseVersion objects + # and sort them to get the latest version. If there + # are no versions in the list, we'll grab the head + # of the master branch + if len(role_versions) > 0: + loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] + loose_versions.sort() + role["version"] = str(loose_versions[-1]) + else: + role["version"] = 'master' + elif role['version'] != 'master': + if role_versions and role.version not in [a.get('name', None) for a in role_versions]: + self.display.warning('role is %s' % role) + self.display.warning("- the specified version (%s) was not found in the list of available versions (%s)." % (role.version, role_versions)) + self.exit_without_ignore() + continue + + # download the role. if --no-deps was specified, we stop here, + # otherwise we recursively grab roles and all of their deps. + tmp_file = gr.fetch(role_data) installed = False if tmp_file: - installed = install_role(role.get("name"), role.get("version"), tmp_file, options) + installed = install_role(role.name, role.version, tmp_file, options) # we're done with the temp file, clean it up if tmp_file != role_src: os.unlink(tmp_file) # install dependencies, if we want them - if not no_deps and installed: - if not role_data: - role_data = gr.get_metadata(role.get("name"), options) - role_dependencies = role_data['dependencies'] - else: - role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) - for dep in role_dependencies: - if isinstance(dep, basestring): - dep = ansible.utils.role_spec_parse(dep) - else: - dep = ansible.utils.role_yaml_parse(dep) - if not get_role_metadata(dep["name"], options): - if dep not in roles_left: - print '- adding dependency: %s' % dep["name"] - roles_left.append(dep) - else: - print '- dependency %s already pending installation.' % dep["name"] - else: - print '- dependency %s is already installed, skipping.' % dep["name"] + + # this should use new roledepenencies code + #if not no_deps and installed: + # if not role_data: + # role_data = gr.get_metadata(role.get("name"), options) + # role_dependencies = role_data['dependencies'] + # else: + # role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) + # for dep in role_dependencies: + # if isinstance(dep, basestring): + # dep = ansible.utils.role_spec_parse(dep) + # else: + # dep = ansible.utils.role_yaml_parse(dep) + # if not get_role_metadata(dep["name"], options): + # if dep not in roles_left: + # print '- adding dependency: %s' % dep["name"] + # roles_left.append(dep) + # else: + # print '- dependency %s already pending installation.' % dep["name"] + # else: + # print '- dependency %s is already installed, skipping.' % dep["name"] + if not tmp_file or not installed: - print "- %s was NOT installed successfully." % role.get("name") - exit_without_ignore(options) + self.display.warning("- %s was NOT installed successfully." % role.name) + self.exit_without_ignore() return 0 def execute_remove(self): @@ -426,14 +439,16 @@ class GalaxyCLI(CLI): if len(self.args) == 0: raise AnsibleOptionsError('- you must specify at least one role to remove.') - for role in self.args: - if get_role_metadata(role, options): - if remove_role(role, options): - self.display.display('- successfully removed %s' % role) + for role_name in self.args: + role = GalaxyRole(self.galaxy, role_name) + try: + if role.remove(): + self.display.display('- successfully removed %s' % role_name) else: - self.display.display("- failed to remove role: %s" % role) - else: - self.display.display('- %s is not installed, skipping.' % role) + self.display.display('- %s is not installed, skipping.' % role_name) + except Exception as e: + raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e))) + return 0 def execute_list(self): @@ -449,20 +464,18 @@ class GalaxyCLI(CLI): if len(self.args) == 1: # show only the request role, if it exists - role_name = self.args[0] - gr = GalaxyRole(self.galaxy, role_name) - metadata = gr.get_metadata() - if metadata: - install_info = gr.get_galaxy_install_info() + gr = GalaxyRole(self.galaxy, self.name) + if gr.metadata: + install_info = gr.install_info version = None if install_info: version = install_info.get("version", None) if not version: version = "(unknown version)" # show some more info about single roles here - self.display.display("- %s, %s" % (role_name, version)) + self.display.display("- %s, %s" % (self.name, version)) else: - self.display.display("- the role %s was not found" % role_name) + self.display.display("- the role %s was not found" % self.name) else: # show all valid roles in the roles_path directory roles_path = self.get_opt('roles_path') @@ -473,8 +486,8 @@ class GalaxyCLI(CLI): raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path) path_files = os.listdir(roles_path) for path_file in path_files: - if get_role_metadata(path_file, options): - install_info = get_galaxy_install_info(path_file, options) + if gr.metadata: + install_info = gr.metadata version = None if install_info: version = install_info.get("version", None) @@ -502,4 +515,4 @@ if __name__ == '__main__': sys.exit(2) except KeyboardInterrupt: display.error("interrupted") - sys.exit(3) + sys.exit(4) diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook index 700538cb56c20c..724c3ce027c17d 100755 --- a/v2/bin/ansible-playbook +++ b/v2/bin/ansible-playbook @@ -201,9 +201,14 @@ if __name__ == "__main__": cli = PlaybookCLI(sys.argv, display=display) cli.parse() sys.exit(cli.run()) - except AnsibleError as e: + except AnsibleOptionsError as e: + cli.parser.print_help() display.display(str(e), stderr=True, color='red') sys.exit(1) + except AnsibleError as e: + display.display(str(e), stderr=True, color='red') + sys.exit(2) except KeyboardInterrupt: display.error("interrupted") - sys.exit(1) + sys.exit(4) + diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault index 78686b6839a034..0437eac409bbe4 100755 --- a/v2/bin/ansible-vault +++ b/v2/bin/ansible-vault @@ -33,7 +33,7 @@ import os import sys import traceback -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.parsing.vault import VaultEditor from ansible.utils.cli import CLI from ansible.utils.display import Display @@ -142,9 +142,13 @@ if __name__ == "__main__": cli = VaultCli(sys.argv, display=display) cli.parse() sys.exit(cli.run()) - except AnsibleError as e: + except AnsibleOptionsError as e: + cli.parser.print_help() display.display(str(e), stderr=True, color='red') sys.exit(1) + except AnsibleError as e: + display.display(str(e), stderr=True, color='red') + sys.exit(2) except KeyboardInterrupt: display.error("interrupted") - sys.exit(1) + sys.exit(4) From dabf16a714e1807f8b3da4a15e78ff968c910210 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 08:08:45 -0700 Subject: [PATCH 0468/3617] Update core module pointers for v1 and v2 --- lib/ansible/modules/core | 2 +- v2/ansible/modules/core | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e95c0b2df33cf8..e356692c74fed2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e95c0b2df33cf84c517366b9a674454447ce6c3a +Subproject commit e356692c74fed2e8a072e0afc4cd23b71e6307ec diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core index 34784b7a617aa3..8bfa8ad1bd263f 160000 --- a/v2/ansible/modules/core +++ b/v2/ansible/modules/core @@ -1 +1 @@ -Subproject commit 34784b7a617aa35d3b994c9f0795567afc6fb0b0 +Subproject commit 8bfa8ad1bd263f885a9cafd1ac1987d34dbdd73c From fb96173d10dc7e3ae21fb4ab608859c426e6f548 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 08:52:02 -0700 Subject: [PATCH 0469/3617] to_nice_json filter no longer has a trailing space when formatting dicts --- test/integration/roles/test_template/files/foo.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_template/files/foo.txt b/test/integration/roles/test_template/files/foo.txt index edd704da048007..84279bc7b3bf7c 100644 --- a/test/integration/roles/test_template/files/foo.txt +++ b/test/integration/roles/test_template/files/foo.txt @@ -1,8 +1,8 @@ templated_var_loaded { - "bool": true, - "multi_part": "1Foo", - "number": 5, + "bool": true, + "multi_part": "1Foo", + "number": 5, "string_num": "5" } From aafda44bb397ff516a5b43c04c837fdc083b9ac5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 30 Apr 2015 11:13:43 -0500 Subject: [PATCH 0470/3617] Add play to the parent object structures for inheritence in v2 --- v2/ansible/playbook/base.py | 14 ++++++ v2/ansible/playbook/block.py | 60 ++++++++++++++++------- v2/ansible/playbook/helpers.py | 6 ++- v2/ansible/playbook/play.py | 10 ++-- v2/ansible/playbook/role/__init__.py | 10 ++-- v2/ansible/playbook/taggable.py | 8 +-- v2/ansible/playbook/task.py | 26 +++++----- v2/ansible/plugins/strategies/__init__.py | 1 + v2/samples/test_tags.yml | 7 +++ 9 files changed, 98 insertions(+), 44 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 73eceba996ba6a..3a7879265ec3a5 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -324,6 +324,20 @@ def deserialize(self, data): # restore the UUID field setattr(self, '_uuid', data.get('uuid')) + def _extend_value(self, value, new_value): + ''' + Will extend the value given with new_value (and will turn both + into lists if they are not so already). The values are run through + a set to remove duplicate values. + ''' + + if not isinstance(value, list): + value = [ value ] + if not isinstance(new_value, list): + new_value = [ new_value ] + + return list(set(value + new_value)) + def __getstate__(self): return self.serialize() diff --git a/v2/ansible/playbook/block.py b/v2/ansible/playbook/block.py index b80deec6ed1e22..e6ad8e5745fb3d 100644 --- a/v2/ansible/playbook/block.py +++ b/v2/ansible/playbook/block.py @@ -37,10 +37,11 @@ class Block(Base, Become, Conditional, Taggable): # similar to the 'else' clause for exceptions #_otherwise = FieldAttribute(isa='list') - def __init__(self, parent_block=None, role=None, task_include=None, use_handlers=False): - self._parent_block = parent_block + def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False): + self._play = play self._role = role self._task_include = task_include + self._parent_block = parent_block self._use_handlers = use_handlers self._dep_chain = [] @@ -65,8 +66,8 @@ def get_vars(self): return all_vars @staticmethod - def load(data, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): - b = Block(parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers) + def load(data, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): + b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers) return b.load_data(data, variable_manager=variable_manager, loader=loader) def preprocess_data(self, ds): @@ -92,6 +93,7 @@ def preprocess_data(self, ds): def _load_block(self, attr, ds): return load_list_of_tasks( ds, + play=self._play, block=self, role=self._role, task_include=self._task_include, @@ -103,6 +105,7 @@ def _load_block(self, attr, ds): def _load_rescue(self, attr, ds): return load_list_of_tasks( ds, + play=self._play, block=self, role=self._role, task_include=self._task_include, @@ -114,6 +117,7 @@ def _load_rescue(self, attr, ds): def _load_always(self, attr, ds): return load_list_of_tasks( ds, + play=self._play, block=self, role=self._role, task_include=self._task_include, @@ -126,6 +130,7 @@ def _load_always(self, attr, ds): #def _load_otherwise(self, attr, ds): # return load_list_of_tasks( # ds, + # play=self._play, # block=self, # role=self._role, # task_include=self._task_include, @@ -148,6 +153,7 @@ def _dupe_task_list(task_list, new_block): return new_task_list new_me = super(Block, self).copy() + new_me._play = self._play new_me._use_handlers = self._use_handlers new_me._dep_chain = self._dep_chain[:] @@ -248,24 +254,44 @@ def set_loader(self, loader): for dep in self._dep_chain: dep.set_loader(loader) - def _get_parent_attribute(self, attr): + def _get_parent_attribute(self, attr, extend=False): ''' Generic logic to get the attribute or parent attribute for a block value. ''' value = self._attributes[attr] - if not value: - if self._parent_block: - value = getattr(self._parent_block, attr) - elif self._role: - value = getattr(self._role, attr) - if not value and len(self._dep_chain): - reverse_dep_chain = self._dep_chain[:] - reverse_dep_chain.reverse() - for dep in reverse_dep_chain: - value = getattr(dep, attr) - if value: - break + if self._parent_block and (not value or extend): + parent_value = getattr(self._parent_block, attr) + if extend: + value = self._extend_value(value, parent_value) + else: + value = parent_value + if self._task_include and (not value or extend): + parent_value = getattr(self._task_include, attr) + if extend: + value = self._extend_value(value, parent_value) + else: + value = parent_value + if self._role and (not value or extend): + parent_value = getattr(self._role, attr) + if len(self._dep_chain) and (not value or extend): + reverse_dep_chain = self._dep_chain[:] + reverse_dep_chain.reverse() + for dep in reverse_dep_chain: + dep_value = getattr(dep, attr) + if extend: + value = self._extend_value(value, parent_value) + else: + value = parent_value + + if value and not extend: + break + if self._play and (not value or extend): + parent_value = getattr(self._play, attr) + if extend: + value = self._extend_value(value, parent_value) + else: + value = parent_value return value diff --git a/v2/ansible/playbook/helpers.py b/v2/ansible/playbook/helpers.py index 92f1c64c83e8fa..302e14a6e097a4 100644 --- a/v2/ansible/playbook/helpers.py +++ b/v2/ansible/playbook/helpers.py @@ -26,7 +26,7 @@ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence -def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): +def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' Given a list of mixed task/block data (parsed from YAML), return a list of Block() objects, where implicit blocks @@ -43,6 +43,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use for block in ds: b = Block.load( block, + play=play, parent_block=parent_block, role=role, task_include=task_include, @@ -55,7 +56,7 @@ def load_list_of_blocks(ds, parent_block=None, role=None, task_include=None, use return block_list -def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): +def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): ''' Given a list of task datastructures (parsed from YAML), return a list of Task() or TaskInclude() objects. @@ -76,6 +77,7 @@ def load_list_of_tasks(ds, block=None, role=None, task_include=None, use_handler if 'block' in task: t = Block.load( task, + play=play, parent_block=block, role=role, task_include=task_include, diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py index 457f23810904f0..b99c01fdf74e63 100644 --- a/v2/ansible/playbook/play.py +++ b/v2/ansible/playbook/play.py @@ -145,28 +145,28 @@ def _load_tasks(self, attr, ds): Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds, variable_manager=self._variable_manager, loader=self._loader) + return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) def _load_pre_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds, variable_manager=self._variable_manager, loader=self._loader) + return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) def _load_post_tasks(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed tasks/blocks. Bare tasks outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds, variable_manager=self._variable_manager, loader=self._loader) + return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) def _load_handlers(self, attr, ds): ''' Loads a list of blocks from a list which may be mixed handlers/blocks. Bare handlers outside of a block are given an implicit block. ''' - return load_list_of_blocks(ds, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader) + return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader) def _load_roles(self, attr, ds): ''' @@ -196,7 +196,7 @@ def _compile_roles(self): if len(self.roles) > 0: for r in self.roles: - block_list.extend(r.compile()) + block_list.extend(r.compile(play=self)) return block_list diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index bc4d4262eb17c5..33935d197f7d27 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -79,6 +79,7 @@ def __init__(self): self._loader = None self._metadata = None + self._play = None self._parents = [] self._dependencies = [] self._task_blocks = [] @@ -163,11 +164,11 @@ def _load_role_data(self, role_include, parent_role=None): task_data = self._load_role_yaml('tasks') if task_data: - self._task_blocks = load_list_of_blocks(task_data, role=self, loader=self._loader) + self._task_blocks = load_list_of_blocks(task_data, play=None, role=self, loader=self._loader) handler_data = self._load_role_yaml('handlers') if handler_data: - self._handler_blocks = load_list_of_blocks(handler_data, role=self, loader=self._loader) + self._handler_blocks = load_list_of_blocks(handler_data, play=None, role=self, loader=self._loader) # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars') @@ -293,7 +294,7 @@ def has_run(self): return self._had_task_run and self._completed - def compile(self, dep_chain=[]): + def compile(self, play, dep_chain=[]): ''' Returns the task list for this role, which is created by first recursively compiling the tasks for all direct dependencies, and @@ -311,10 +312,11 @@ def compile(self, dep_chain=[]): deps = self.get_direct_dependencies() for dep in deps: - dep_blocks = dep.compile(dep_chain=new_dep_chain) + dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain) for dep_block in dep_blocks: new_dep_block = dep_block.copy() new_dep_block._dep_chain = new_dep_chain + new_dep_block._play = play block_list.append(new_dep_block) block_list.extend(self._task_blocks) diff --git a/v2/ansible/playbook/taggable.py b/v2/ansible/playbook/taggable.py index f721cd195f4cd5..3622dc34b27e0e 100644 --- a/v2/ansible/playbook/taggable.py +++ b/v2/ansible/playbook/taggable.py @@ -26,7 +26,7 @@ class Taggable: untagged = set(['untagged']) - _tags = FieldAttribute(isa='list', default=[]) + _tags = FieldAttribute(isa='list', default=None) def __init__(self): super(Taggable, self).__init__() @@ -44,9 +44,11 @@ def _get_attr_tags(self): Override for the 'tags' getattr fetcher, used from Base. ''' tags = self._attributes['tags'] + if tags is None: + tags = [] if hasattr(self, '_get_parent_attribute'): - tags.extend(self._get_parent_attribute('tags')) - return list(set(tags)) + tags = self._get_parent_attribute('tags', extend=True) + return tags def evaluate_tags(self, only_tags, skip_tags, all_vars): ''' this checks if the current item should be executed depending on tag options ''' diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index bdffc13eb8097d..06f7239d1bd53b 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -205,16 +205,6 @@ def get_vars(self): del all_vars['when'] return all_vars - # no longer used, as blocks are the lowest level of compilation now - #def compile(self): - # ''' - # For tasks, this is just a dummy method returning an array - # with 'self' in it, so we don't have to care about task types - # further up the chain. - # ''' - # - # return [self] - def copy(self, exclude_block=False): new_me = super(Task, self).copy() @@ -299,12 +289,22 @@ def set_loader(self, loader): if self._task_include: self._task_include.set_loader(loader) - def _get_parent_attribute(self, attr): + def _get_parent_attribute(self, attr, extend=False): ''' Generic logic to get the attribute or parent attribute for a task value. ''' value = self._attributes[attr] - if not value and self._block: - value = getattr(self._block, attr) + if self._block and (not value or extend): + parent_value = getattr(self._block, attr) + if extend: + value = self._extend_value(value, parent_value) + else: + value = parent_value + if self._task_include and (not value or extend): + parent_value = getattr(self._task_include, attr) + if extend: + value = self._extend_value(value, parent_value) + else: + value = parent_value return value diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index d01360463b6f99..238c6222a831ef 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -308,6 +308,7 @@ def _load_included_file(self, included_file): is_handler = isinstance(included_file._task, Handler) block_list = load_list_of_blocks( data, + play=included_file._task._block._play, parent_block=included_file._task._block, task_include=included_file._task, role=included_file._task._role, diff --git a/v2/samples/test_tags.yml b/v2/samples/test_tags.yml index c94b88e0a0c573..d352cf9bfb40ca 100644 --- a/v2/samples/test_tags.yml +++ b/v2/samples/test_tags.yml @@ -1,10 +1,17 @@ - hosts: localhost gather_facts: no + vars: + a: "tags" + tags: + - play tasks: - block: - debug: msg="this is the tagged block" tags: - block + - include: include.yml + tags: + - include - block: - debug: msg="tagged debug from second block" tags: From 8d0ceeca910894ee4f53ab452cd519b555d7b9e3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 09:23:57 -0700 Subject: [PATCH 0471/3617] Make template test work with both py2.6 and 2.7+ --- .../integration/roles/test_template/files/foo-py26.txt | 8 ++++++++ test/integration/roles/test_template/files/foo.txt | 6 +++--- test/integration/roles/test_template/tasks/main.yml | 10 ++++++++++ 3 files changed, 21 insertions(+), 3 deletions(-) create mode 100644 test/integration/roles/test_template/files/foo-py26.txt diff --git a/test/integration/roles/test_template/files/foo-py26.txt b/test/integration/roles/test_template/files/foo-py26.txt new file mode 100644 index 00000000000000..84279bc7b3bf7c --- /dev/null +++ b/test/integration/roles/test_template/files/foo-py26.txt @@ -0,0 +1,8 @@ +templated_var_loaded + +{ + "bool": true, + "multi_part": "1Foo", + "number": 5, + "string_num": "5" +} diff --git a/test/integration/roles/test_template/files/foo.txt b/test/integration/roles/test_template/files/foo.txt index 84279bc7b3bf7c..edd704da048007 100644 --- a/test/integration/roles/test_template/files/foo.txt +++ b/test/integration/roles/test_template/files/foo.txt @@ -1,8 +1,8 @@ templated_var_loaded { - "bool": true, - "multi_part": "1Foo", - "number": 5, + "bool": true, + "multi_part": "1Foo", + "number": 5, "string_num": "5" } diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 2568843cf7e090..0574868c9cd4bc 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -41,8 +41,18 @@ # VERIFY CONTENTS +- name: check what python version ansible is running on + command: python -c 'import distutils.sysconfig ; print(distutils.sysconfig.get_python_version())' + register: pyver + delegate_to: localhost + - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt + when: pyver.stdout != '2.6' + +- name: copy known good into place + copy: src=foo-py2.6.txt dest={{output_dir}}/foo.txt + when: pyver.stdout == '2.6' - name: compare templated file to known good shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt From b851ce29e93813948b5078c5dd8698a525d7bbc0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 12:55:59 -0700 Subject: [PATCH 0472/3617] Update core modules to pick up mysql_user fix --- lib/ansible/modules/core | 2 +- v2/ansible/modules/core | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e356692c74fed2..c5f3df809fba49 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e356692c74fed2e8a072e0afc4cd23b71e6307ec +Subproject commit c5f3df809fba49fe84d20e8cd3cb7e18b5a7f960 diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core index 8bfa8ad1bd263f..80dc34147d6458 160000 --- a/v2/ansible/modules/core +++ b/v2/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8bfa8ad1bd263f885a9cafd1ac1987d34dbdd73c +Subproject commit 80dc34147d645892ff44f70e96caf4f6d5b162b5 From 974731bec0578cb18800c4583954c2ab85404538 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 13:54:03 -0700 Subject: [PATCH 0473/3617] Fix filename of output file --- test/integration/roles/test_template/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 0574868c9cd4bc..a35b93d9d924a8 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -51,7 +51,7 @@ when: pyver.stdout != '2.6' - name: copy known good into place - copy: src=foo-py2.6.txt dest={{output_dir}}/foo.txt + copy: src=foo-py26.txt dest={{output_dir}}/foo.txt when: pyver.stdout == '2.6' - name: compare templated file to known good From 3ccc2ae2992fad677734dada597e4fa61b00ec27 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 15:25:53 -0700 Subject: [PATCH 0474/3617] Fix include test to keep type --- .../roles/test_includes/tasks/main.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/test_includes/tasks/main.yml b/test/integration/roles/test_includes/tasks/main.yml index 7cf9283f9a3965..fb76841fdabc62 100644 --- a/test/integration/roles/test_includes/tasks/main.yml +++ b/test/integration/roles/test_includes/tasks/main.yml @@ -26,13 +26,29 @@ - "cb == '2'" - "cc == '3'" +# Fact takes precedence over include param as fact is host-specific - set_fact: - a: 101 + a: 101 b: 102 c: 103 - include: included_task1.yml a={{a}} b={{b}} c=103 +- name: verify variable include params + assert: + that: + - "ca == 101" + - "cb == 102" + - "cc == 103" + +# Test that strings are not turned into numbers +- set_fact: + a: "101" + b: "102" + c: "103" + +- include: included_task1.yml a={{a}} b={{b}} c=103 + - name: verify variable include params assert: that: From 6a985b9c6b6d2e867159d348d3b769488f490d4a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 16:16:27 -0700 Subject: [PATCH 0475/3617] Add Fedora mysql vars so we get mariadb rather than mariadb-galera --- test/integration/roles/setup_mysql_db/tasks/main.yml | 2 ++ test/integration/roles/setup_mysql_db/vars/Fedora.yml | 6 ++++++ 2 files changed, 8 insertions(+) create mode 100644 test/integration/roles/setup_mysql_db/vars/Fedora.yml diff --git a/test/integration/roles/setup_mysql_db/tasks/main.yml b/test/integration/roles/setup_mysql_db/tasks/main.yml index a36abeb6c2f607..a8010e71389186 100644 --- a/test/integration/roles/setup_mysql_db/tasks/main.yml +++ b/test/integration/roles/setup_mysql_db/tasks/main.yml @@ -20,7 +20,9 @@ - include_vars: '{{ item }}' with_first_found: - files: + - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml' - '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml' + - '{{ ansible_distribution }}.yml' - '{{ ansible_os_family }}.yml' paths: '../vars' diff --git a/test/integration/roles/setup_mysql_db/vars/Fedora.yml b/test/integration/roles/setup_mysql_db/vars/Fedora.yml new file mode 100644 index 00000000000000..f8b29fd7a16162 --- /dev/null +++ b/test/integration/roles/setup_mysql_db/vars/Fedora.yml @@ -0,0 +1,6 @@ +mysql_service: 'mariadb' + +mysql_packages: + - mariadb-server + - MySQL-python + - bzip2 From f2afd1a24834707b6627a6a648795c3607634dbf Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 17:44:38 -0700 Subject: [PATCH 0476/3617] Update core pointer to pick up docker fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c5f3df809fba49..e51ea29d8f69f7 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c5f3df809fba49fe84d20e8cd3cb7e18b5a7f960 +Subproject commit e51ea29d8f69f79c239a2f80f79edbb2d9fcc496 From da5e201b0786638801346dfe443f4fe83fe530b2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 17:48:03 -0700 Subject: [PATCH 0477/3617] Change python-q into sharutils as the epel repo for centos6 is being funky. --- test/integration/roles/test_yum/tasks/yum.yml | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/test/integration/roles/test_yum/tasks/yum.yml b/test/integration/roles/test_yum/tasks/yum.yml index 78bb9abf7830a0..923717552b5acb 100644 --- a/test/integration/roles/test_yum/tasks/yum.yml +++ b/test/integration/roles/test_yum/tasks/yum.yml @@ -84,8 +84,8 @@ - "not yum_result.changed" # Multiple packages -- name: uninstall sos and python-q - yum: name=sos,python-q state=removed +- name: uninstall sos and sharutils + yum: name=sos,sharutils state=removed register: yum_result - name: check sos with rpm @@ -93,19 +93,19 @@ failed_when: False register: rpm_sos_result -- name: check python-q with rpm - shell: rpm -q python-q +- name: check sharutils with rpm + shell: rpm -q sharutils failed_when: False - register: rpm_python_q_result + register: rpm_sharutils_result - name: verify packages installed assert: that: - "rpm_sos_result.rc != 0" - - "rpm_python_q_result.rc != 0" + - "rpm_sharutils_result.rc != 0" -- name: install sos and python-q as comma separated - yum: name=sos,python-q state=present +- name: install sos and sharutils as comma separated + yum: name=sos,sharutils state=present register: yum_result - name: check sos with rpm @@ -113,10 +113,10 @@ failed_when: False register: rpm_sos_result -- name: check python-q with rpm - shell: rpm -q python-q +- name: check sharutils with rpm + shell: rpm -q sharutils failed_when: False - register: rpm_python_q_result + register: rpm_sharutils_result - name: verify packages installed assert: @@ -124,17 +124,17 @@ - "yum_result.rc == 0" - "yum_result.changed" - "rpm_sos_result.rc == 0" - - "rpm_python_q_result.rc == 0" + - "rpm_sharutils_result.rc == 0" -- name: uninstall sos and python-q - yum: name=sos,python-q state=removed +- name: uninstall sos and sharutils + yum: name=sos,sharutils state=removed register: yum_result -- name: install sos and python-q as list +- name: install sos and sharutils as list yum: name: - sos - - python-q + - sharutils state: present register: yum_result @@ -143,10 +143,10 @@ failed_when: False register: rpm_sos_result -- name: check python-q with rpm - shell: rpm -q python-q +- name: check sharutils with rpm + shell: rpm -q sharutils failed_when: False - register: rpm_python_q_result + register: rpm_sharutils_result - name: verify packages installed assert: @@ -154,15 +154,15 @@ - "yum_result.rc == 0" - "yum_result.changed" - "rpm_sos_result.rc == 0" - - "rpm_python_q_result.rc == 0" + - "rpm_sharutils_result.rc == 0" -- name: uninstall sos and python-q - yum: name=sos,python-q state=removed +- name: uninstall sos and sharutils + yum: name=sos,sharutils state=removed register: yum_result -- name: install sos and python-q as comma separated with spaces +- name: install sos and sharutils as comma separated with spaces yum: - name: "sos, python-q" + name: "sos, sharutils" state: present register: yum_result @@ -172,9 +172,9 @@ register: rpm_sos_result - name: check sos with rpm - shell: rpm -q python-q + shell: rpm -q sharutils failed_when: False - register: rpm_python_q_result + register: rpm_sharutils_result - name: verify packages installed assert: @@ -182,7 +182,7 @@ - "yum_result.rc == 0" - "yum_result.changed" - "rpm_sos_result.rc == 0" - - "rpm_python_q_result.rc == 0" + - "rpm_sharutils_result.rc == 0" -- name: uninstall sos and python-q - yum: name=sos,python-q state=removed +- name: uninstall sos and sharutils + yum: name=sos,sharutils state=removed From 040a39f249b1de57befb485ab7ee406f4cd9898a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Apr 2015 18:43:53 -0400 Subject: [PATCH 0478/3617] there will be only one cli --- v2/ansible/{utils/cli.py => cli/__init__.py} | 124 +++++++++--------- v2/{bin/ansible => ansible/cli/adhoc.py} | 41 +----- .../ansible-galaxy => ansible/cli/galaxy.py} | 23 ---- .../cli/playbook.py} | 35 ----- .../ansible-vault => ansible/cli/vault.py} | 35 +---- 5 files changed, 68 insertions(+), 190 deletions(-) rename v2/ansible/{utils/cli.py => cli/__init__.py} (83%) rename v2/{bin/ansible => ansible/cli/adhoc.py} (83%) mode change 100755 => 100644 rename v2/{bin/ansible-galaxy => ansible/cli/galaxy.py} (96%) mode change 100755 => 100644 rename v2/{bin/ansible-playbook => ansible/cli/playbook.py} (88%) mode change 100755 => 100644 rename v2/{bin/ansible-vault => ansible/cli/vault.py} (80%) mode change 100755 => 100644 diff --git a/v2/ansible/utils/cli.py b/v2/ansible/cli/__init__.py similarity index 83% rename from v2/ansible/utils/cli.py rename to v2/ansible/cli/__init__.py index 0cceab01968174..e1ea57630188f7 100644 --- a/v2/ansible/utils/cli.py +++ b/v2/ansible/cli/__init__.py @@ -31,9 +31,6 @@ from ansible.errors import AnsibleError from ansible.utils.unicode import to_bytes -# FIXME: documentation for methods here, which have mostly been -# copied directly over from the old utils/__init__.py - class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' @@ -92,6 +89,7 @@ def run(self): @staticmethod def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False): + ''' prompt for vault password and/or password change ''' vault_pass = None new_vault_pass = None @@ -122,6 +120,7 @@ def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_ def ask_passwords(self): + ''' prompt for connection and become passwords if needed ''' op = self.options sshpass = None @@ -162,6 +161,7 @@ def normalize_become_options(self): def validate_conflicts(self): + ''' check for conflicting options ''' op = self.options @@ -186,7 +186,7 @@ def validate_conflicts(self): @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): - ''' create an options parser for any ansible script ''' + ''' create an options parser for most ansible scripts ''' parser = SortedOptParser(usage, version=CLI.version("%prog")) @@ -290,6 +290,7 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, @staticmethod def version(prog): + ''' return ansible version ''' result = "{0} {1}".format(prog, __version__) gitinfo = _gitinfo() if gitinfo: @@ -299,6 +300,7 @@ def version(prog): @staticmethod def version_info(gitinfo=False): + ''' return full ansible version info ''' if gitinfo: # expensive call, user with care ansible_version_string = version('') @@ -322,61 +324,63 @@ def version_info(gitinfo=False): 'minor': ansible_versions[1], 'revision': ansible_versions[2]} -def _git_repo_info(repo_path): - ''' returns a string containing git branch, commit id and commit date ''' - result = None - if os.path.exists(repo_path): - # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. - if os.path.isfile(repo_path): - try: - gitdir = yaml.safe_load(open(repo_path)).get('gitdir') - # There is a possibility the .git file to have an absolute path. - if os.path.isabs(gitdir): - repo_path = gitdir - else: - repo_path = os.path.join(repo_path[:-4], gitdir) - except (IOError, AttributeError): - return '' - f = open(os.path.join(repo_path, "HEAD")) - branch = f.readline().split('/')[-1].rstrip("\n") - f.close() - branch_path = os.path.join(repo_path, "refs", "heads", branch) - if os.path.exists(branch_path): - f = open(branch_path) - commit = f.readline()[:10] + @staticmethod + def _git_repo_info(repo_path): + ''' returns a string containing git branch, commit id and commit date ''' + result = None + if os.path.exists(repo_path): + # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. + if os.path.isfile(repo_path): + try: + gitdir = yaml.safe_load(open(repo_path)).get('gitdir') + # There is a possibility the .git file to have an absolute path. + if os.path.isabs(gitdir): + repo_path = gitdir + else: + repo_path = os.path.join(repo_path[:-4], gitdir) + except (IOError, AttributeError): + return '' + f = open(os.path.join(repo_path, "HEAD")) + branch = f.readline().split('/')[-1].rstrip("\n") f.close() + branch_path = os.path.join(repo_path, "refs", "heads", branch) + if os.path.exists(branch_path): + f = open(branch_path) + commit = f.readline()[:10] + f.close() + else: + # detached HEAD + commit = branch[:10] + branch = 'detached HEAD' + branch_path = os.path.join(repo_path, "HEAD") + + date = time.localtime(os.stat(branch_path).st_mtime) + if time.daylight == 0: + offset = time.timezone + else: + offset = time.altzone + result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, + time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36)) else: - # detached HEAD - commit = branch[:10] - branch = 'detached HEAD' - branch_path = os.path.join(repo_path, "HEAD") - - date = time.localtime(os.stat(branch_path).st_mtime) - if time.daylight == 0: - offset = time.timezone - else: - offset = time.altzone - result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, - time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36)) - else: - result = '' - return result - -def _gitinfo(): - basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') - repo_path = os.path.join(basedir, '.git') - result = _git_repo_info(repo_path) - submodules = os.path.join(basedir, '.gitmodules') - if not os.path.exists(submodules): - return result - f = open(submodules) - for line in f: - tokens = line.strip().split(' ') - if tokens[0] == 'path': - submodule_path = tokens[2] - submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) - if not submodule_info: - submodule_info = ' not found - use git submodule update --init ' + submodule_path - result += "\n {0}: {1}".format(submodule_path, submodule_info) - f.close() - return result + result = '' + return result + + @staticmethod + def _gitinfo(): + basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') + repo_path = os.path.join(basedir, '.git') + result = _git_repo_info(repo_path) + submodules = os.path.join(basedir, '.gitmodules') + if not os.path.exists(submodules): + return result + f = open(submodules) + for line in f: + tokens = line.strip().split(' ') + if tokens[0] == 'path': + submodule_path = tokens[2] + submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) + if not submodule_info: + submodule_info = ' not found - use git submodule update --init ' + submodule_path + result += "\n {0}: {1}".format(submodule_path, submodule_info) + f.close() + return result diff --git a/v2/bin/ansible b/v2/ansible/cli/adhoc.py old mode 100755 new mode 100644 similarity index 83% rename from v2/bin/ansible rename to v2/ansible/cli/adhoc.py index d08fd5ce5c6519..5b34acf13ef80d --- a/v2/bin/ansible +++ b/v2/ansible/cli/adhoc.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # (c) 2012, Michael DeHaan # # This file is part of Ansible @@ -18,18 +16,6 @@ # along with Ansible. If not, see . ######################################################## - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - import os import sys @@ -47,7 +33,7 @@ ######################################################## -class AdHocCli(CLI): +class AdHocCLI(CLI): ''' code behind ansible ad-hoc cli''' def parse(self): @@ -72,8 +58,7 @@ def parse(self): self.options, self.args = self.parser.parse_args() if len(self.args) != 1: - self.parser.print_help() - sys.exit(1) + raise AnsibleOptionsError("Missing target hosts") self.display.verbosity = self.options.verbosity self.validate_conflicts() @@ -141,10 +126,10 @@ def run(self): play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # now create a task queue manager to execute the play + tqm = None try: tqm = TaskQueueManager( inventory=inventory, - callback='minimal', variable_manager=variable_manager, loader=loader, display=self.display, @@ -170,23 +155,3 @@ def poll_while_needed(self, poller): return poller.results - -######################################################## - -if __name__ == '__main__': - - display = Display() - try: - cli = AdHocCli(sys.argv, display=display) - cli.parse() - sys.exit(cli.run()) - except AnsibleOptionsError as e: - cli.parser.print_help() - display.display(str(e), stderr=True, color='red') - sys.exit(1) - except AnsibleError as e: - display.display(str(e), stderr=True, color='red') - sys.exit(2) - except KeyboardInterrupt: - display.error("interrupted") - sys.exit(4) diff --git a/v2/bin/ansible-galaxy b/v2/ansible/cli/galaxy.py old mode 100755 new mode 100644 similarity index 96% rename from v2/bin/ansible-galaxy rename to v2/ansible/cli/galaxy.py index 30b97535c9d0fb..76633162ed1a7d --- a/v2/bin/ansible-galaxy +++ b/v2/ansible/cli/galaxy.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - ######################################################################## # # (C) 2013, James Cammarata @@ -495,24 +493,3 @@ def execute_list(self): version = "(unknown version)" self.display.display("- %s, %s" % (path_file, version)) return 0 - -#------------------------------------------------------------------------------------- -# The main entry point -#------------------------------------------------------------------------------------- -if __name__ == '__main__': - - display = Display() - try: - cli = GalaxyCLI(sys.argv, display=display) - cli.parse() - sys.exit(cli.run()) - except AnsibleOptionsError as e: - cli.parser.print_help() - display.display(str(e), stderr=True, color='red') - sys.exit(1) - except AnsibleError as e: - display.display(str(e), stderr=True, color='red') - sys.exit(2) - except KeyboardInterrupt: - display.error("interrupted") - sys.exit(4) diff --git a/v2/bin/ansible-playbook b/v2/ansible/cli/playbook.py old mode 100755 new mode 100644 similarity index 88% rename from v2/bin/ansible-playbook rename to v2/ansible/cli/playbook.py index 724c3ce027c17d..e7666682e3c3c2 --- a/v2/bin/ansible-playbook +++ b/v2/ansible/cli/playbook.py @@ -18,20 +18,6 @@ # along with Ansible. If not, see . ######################################################## -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - import os import stat import sys @@ -191,24 +177,3 @@ def run(self): return 0 else: return results - -######################################################## - -if __name__ == "__main__": - - display = Display() - try: - cli = PlaybookCLI(sys.argv, display=display) - cli.parse() - sys.exit(cli.run()) - except AnsibleOptionsError as e: - cli.parser.print_help() - display.display(str(e), stderr=True, color='red') - sys.exit(1) - except AnsibleError as e: - display.display(str(e), stderr=True, color='red') - sys.exit(2) - except KeyboardInterrupt: - display.error("interrupted") - sys.exit(4) - diff --git a/v2/bin/ansible-vault b/v2/ansible/cli/vault.py old mode 100755 new mode 100644 similarity index 80% rename from v2/bin/ansible-vault rename to v2/ansible/cli/vault.py index 0437eac409bbe4..62ec5a373b629a --- a/v2/bin/ansible-vault +++ b/v2/ansible/cli/vault.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - # (c) 2014, James Tanner # # Ansible is free software: you can redistribute it and/or modify @@ -18,17 +16,6 @@ # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - import os import sys import traceback @@ -38,7 +25,7 @@ from ansible.utils.cli import CLI from ansible.utils.display import Display -class VaultCli(CLI): +class VaultCLI(CLI): """ Vault command line class """ VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") @@ -132,23 +119,3 @@ def execute_rekey(self): this_editor.rekey_file(new_password) self.display.display("Rekey successful") - -######################################################## - -if __name__ == "__main__": - - display = Display() - try: - cli = VaultCli(sys.argv, display=display) - cli.parse() - sys.exit(cli.run()) - except AnsibleOptionsError as e: - cli.parser.print_help() - display.display(str(e), stderr=True, color='red') - sys.exit(1) - except AnsibleError as e: - display.display(str(e), stderr=True, color='red') - sys.exit(2) - except KeyboardInterrupt: - display.error("interrupted") - sys.exit(4) From 9de6fea2fab5f9cc576fffa9c86f583122b389a9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Apr 2015 21:22:23 -0400 Subject: [PATCH 0479/3617] one cli to bind them all --- v2/ansible/cli/__init__.py | 71 +++++++++++++++++--------------- v2/ansible/cli/adhoc.py | 11 +++-- v2/ansible/cli/doc.py | 83 ++++++++++++++++++++++++++++++++++++++ v2/ansible/cli/galaxy.py | 16 +++----- v2/ansible/cli/playbook.py | 7 ++-- v2/ansible/cli/pull.py | 69 +++++++++++++++++++++++++++++++ v2/ansible/cli/vault.py | 14 ++++--- v2/bin/ansible | 79 ++++++++++++++++++++++++++++++++++++ v2/bin/ansible-doc | 1 + v2/bin/ansible-galaxy | 1 + v2/bin/ansible-playbook | 1 + v2/bin/ansible-pull | 1 + v2/bin/ansible-vault | 1 + 13 files changed, 298 insertions(+), 57 deletions(-) create mode 100644 v2/ansible/cli/doc.py create mode 100644 v2/ansible/cli/pull.py create mode 100755 v2/bin/ansible create mode 120000 v2/bin/ansible-doc create mode 120000 v2/bin/ansible-galaxy create mode 120000 v2/bin/ansible-playbook create mode 120000 v2/bin/ansible-pull create mode 120000 v2/bin/ansible-vault diff --git a/v2/ansible/cli/__init__.py b/v2/ansible/cli/__init__.py index e1ea57630188f7..115a2176f50996 100644 --- a/v2/ansible/cli/__init__.py +++ b/v2/ansible/cli/__init__.py @@ -34,11 +34,12 @@ class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' - def format_help(self, formatter=None): + #FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog + + def format_help(self, formatter=None, epilog=None): self.option_list.sort(key=operator.methodcaller('get_opt_string')) return optparse.OptionParser.format_help(self, formatter=None) -#TODO: move many cli only functions in this file into the CLI class class CLI(object): ''' code behind bin/ansible* programs ''' @@ -71,8 +72,7 @@ def set_action(self): break if not self.action: - self.parser.print_help() - raise AnsibleError("Missing required action") + raise AnsibleOptionsError("Missing required action") def execute(self): """ @@ -184,36 +184,37 @@ def validate_conflicts(self): " are exclusive of each other") @staticmethod - def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, - async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): + def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, + async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None): ''' create an options parser for most ansible scripts ''' - parser = SortedOptParser(usage, version=CLI.version("%prog")) + #FIXME: implemente epilog parsing + #OptionParser.format_epilog = lambda self, formatter: self.epilog - parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', - help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) + # base opts + parser = SortedOptParser(usage, version=CLI.version("%prog")) parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") - parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', - help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) - parser.add_option('-i', '--inventory-file', dest='inventory', - help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, - default=C.DEFAULT_HOST_LIST) - parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', - help='ask for connection password') - parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', - help='use this file to authenticate the connection') - parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', - help='ask for vault password') - parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, - dest='vault_password_file', help="vault password file") - parser.add_option('--list-hosts', dest='listhosts', action='store_true', - help='outputs a list of matching hosts; does not execute anything else') - parser.add_option('-M', '--module-path', dest='module_path', - help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, - default=None) - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) + + if runtask_opts: + parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', + help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) + parser.add_option('-i', '--inventory-file', dest='inventory', + help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, + default=C.DEFAULT_HOST_LIST) + parser.add_option('--list-hosts', dest='listhosts', action='store_true', + help='outputs a list of matching hosts; does not execute anything else') + parser.add_option('-M', '--module-path', dest='module_path', + help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None) + parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", + help="set additional variables as key=value or YAML/JSON", default=[]) + + if vault_opts: + parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', + help='ask for vault password') + parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, + dest='vault_password_file', help="vault password file") + if subset_opts: parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', @@ -256,6 +257,12 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, if connect_opts: + parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', + help='ask for connection password') + parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', + help='use this file to authenticate the connection') + parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', + help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT, help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT) parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', @@ -292,7 +299,7 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, def version(prog): ''' return ansible version ''' result = "{0} {1}".format(prog, __version__) - gitinfo = _gitinfo() + gitinfo = CLI._gitinfo() if gitinfo: result = result + " {0}".format(gitinfo) result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH @@ -369,7 +376,7 @@ def _git_repo_info(repo_path): def _gitinfo(): basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') repo_path = os.path.join(basedir, '.git') - result = _git_repo_info(repo_path) + result = CLI._git_repo_info(repo_path) submodules = os.path.join(basedir, '.gitmodules') if not os.path.exists(submodules): return result @@ -378,7 +385,7 @@ def _gitinfo(): tokens = line.strip().split(' ') if tokens[0] == 'path': submodule_path = tokens[2] - submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) + submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git')) if not submodule_info: submodule_info = ' not found - use git submodule update --init ' + submodule_path result += "\n {0}: {1}".format(submodule_path, submodule_info) diff --git a/v2/ansible/cli/adhoc.py b/v2/ansible/cli/adhoc.py index 5b34acf13ef80d..16c2dc9e4215fe 100644 --- a/v2/ansible/cli/adhoc.py +++ b/v2/ansible/cli/adhoc.py @@ -16,17 +16,14 @@ # along with Ansible. If not, see . ######################################################## -import os -import sys - from ansible import constants as C -from ansible.errors import * +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.task_queue_manager import TaskQueueManager from ansible.inventory import Inventory from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play -from ansible.utils.cli import CLI +from ansible.cli import CLI from ansible.utils.display import Display from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager @@ -46,6 +43,8 @@ def parse(self): output_opts=True, connect_opts=True, check_opts=True, + runtask_opts=True, + vault_opts=True, ) # options unique to ansible ad-hoc @@ -101,7 +100,7 @@ def run(self): if self.options.listhosts: for host in hosts: - self.display.display(' %s' % host.name) + self.display.display(' %s' % host) return 0 if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: diff --git a/v2/ansible/cli/doc.py b/v2/ansible/cli/doc.py new file mode 100644 index 00000000000000..ec09cb158da9b3 --- /dev/null +++ b/v2/ansible/cli/doc.py @@ -0,0 +1,83 @@ +# (c) 2014, James Tanner +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-vault is a script that encrypts/decrypts YAML files. See +# http://docs.ansible.com/playbooks_vault.html for more details. + +import os +import sys +import traceback + +from ansible import constants as C +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.cli import CLI +#from ansible.utils import module_docs + +class DocCLI(CLI): + """ Vault command line class """ + + BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') + IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"] + + _ITALIC = re.compile(r"I\(([^)]+)\)") + _BOLD = re.compile(r"B\(([^)]+)\)") + _MODULE = re.compile(r"M\(([^)]+)\)") + _URL = re.compile(r"U\(([^)]+)\)") + _CONST = re.compile(r"C\(([^)]+)\)") + + PAGER = 'less' + LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) + # -S (chop long lines) -X (disable termcap init and de-init) + + + def parse(self): + + self.parser = optparse.OptionParser( + version=version("%prog"), + usage='usage: %prog [options] [module...]', + description='Show Ansible module documentation', + ) + + self.parser.add_option("-M", "--module-path", action="store", dest="module_path", default=C.DEFAULT_MODULE_PATH, + help="Ansible modules/ directory") + self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir', + help='List available modules') + self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', + help='Show playbook snippet for specified module(s)') + self.parser.add_option('-v', action='version', help='Show version number and exit') + + + self.options, self.args = self.parser.parse_args() + self.display.verbosity = self.options.verbosity + + + def run(self): + + if options.module_path is not None: + for i in options.module_path.split(os.pathsep): + utils.plugins.module_finder.add_directory(i) + + if options.list_dir: + # list modules + paths = utils.plugins.module_finder._get_paths() + module_list = [] + for path in paths: + find_modules(path, module_list) + + pager(get_module_list_text(module_list)) + + if len(args) == 0: + raise AnsibleOptionsError("Incorrect options passed") + diff --git a/v2/ansible/cli/galaxy.py b/v2/ansible/cli/galaxy.py index 76633162ed1a7d..abe85e0af8e97a 100644 --- a/v2/ansible/cli/galaxy.py +++ b/v2/ansible/cli/galaxy.py @@ -40,13 +40,13 @@ import ansible.constants as C import ansible.utils import ansible.galaxy +from ansible.cli import CLI from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.role import GalaxyRole from ansible.playbook.role.requirement import RoleRequirement from ansible.utils.display import Display -from ansible.utils.cli import CLI class GalaxyCLI(CLI): @@ -62,17 +62,13 @@ def __init__(self, args, display=None): def parse(self): ''' create an options parser for bin/ansible ''' - usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS) - epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) - OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(usage=usage, epilog=epilog) + self.parser = CLI.base_parser( + usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), + epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) + ) - self.parser = parser - self.set_action() - # verbose - self.parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count", - help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") + self.set_action() # options specific to actions if self.action == "info": diff --git a/v2/ansible/cli/playbook.py b/v2/ansible/cli/playbook.py index e7666682e3c3c2..c2b881d2b6d332 100644 --- a/v2/ansible/cli/playbook.py +++ b/v2/ansible/cli/playbook.py @@ -23,6 +23,7 @@ import sys from ansible import constants as C +from ansible.cli import CLI from ansible.errors import AnsibleError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.inventory import Inventory @@ -30,7 +31,6 @@ from ansible.parsing.splitter import parse_kv from ansible.playbook import Playbook from ansible.playbook.task import Task -from ansible.utils.cli import CLI from ansible.utils.display import Display from ansible.utils.unicode import to_unicode from ansible.utils.vars import combine_vars @@ -53,6 +53,8 @@ def parse(self): subset_opts=True, check_opts=True, diff_opts=True, + runtask_opts=True, + vault_opts=True, ) # ansible playbook specific opts @@ -68,8 +70,7 @@ def parse(self): self.options, self.args = parser.parse_args() if len(self.args) == 0: - parser.print_help(file=sys.stderr) - raise AnsibleError("You must specify a playbook file to run") + raise AnsibleOptionsError("You must specify a playbook file to run") self.parser = parser diff --git a/v2/ansible/cli/pull.py b/v2/ansible/cli/pull.py new file mode 100644 index 00000000000000..65741e95446453 --- /dev/null +++ b/v2/ansible/cli/pull.py @@ -0,0 +1,69 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +######################################################## +import os +import sys + +from ansible import constants as C +from ansible.errors import * +from ansible.cli import CLI +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.inventory import Inventory +from ansible.parsing import DataLoader +from ansible.parsing.splitter import parse_kv +from ansible.playbook.play import Play +from ansible.utils.display import Display +from ansible.utils.vault import read_vault_file +from ansible.vars import VariableManager + +######################################################## + +class PullCLI(CLI): + ''' code behind ansible ad-hoc cli''' + + def parse(self): + ''' create an options parser for bin/ansible ''' + + self.parser = CLI.base_parser( + usage='%prog [options]', + runas_opts=True, + async_opts=True, + output_opts=True, + connect_opts=True, + check_opts=True, + runtask_opts=True, + vault_opts=True, + ) + + # options unique to pull + + self.options, self.args = self.parser.parse_args() + + if len(self.args) != 1: + raise AnsibleOptionsError("Missing target hosts") + + self.display.verbosity = self.options.verbosity + self.validate_conflicts() + + return True + + + def run(self): + ''' use Runner lib to do SSH things ''' + + raise AnsibleError("Not ported to v2 yet") diff --git a/v2/ansible/cli/vault.py b/v2/ansible/cli/vault.py index 62ec5a373b629a..6231f74332acfe 100644 --- a/v2/ansible/cli/vault.py +++ b/v2/ansible/cli/vault.py @@ -20,9 +20,10 @@ import sys import traceback +from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.parsing.vault import VaultEditor -from ansible.utils.cli import CLI +from ansible.cli import CLI from ansible.utils.display import Display class VaultCLI(CLI): @@ -34,13 +35,14 @@ class VaultCLI(CLI): def __init__(self, args, display=None): self.vault_pass = None - super(VaultCli, self).__init__(args, display) + super(VaultCLI, self).__init__(args, display) def parse(self): - # create parser for CLI options self.parser = CLI.base_parser( - usage = "%prog vaultfile.yml", + vault_opts=True, + usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS), + epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) self.set_action() @@ -60,10 +62,10 @@ def parse(self): self.parser.set_usage("usage: %prog rekey [options] file_name") self.options, self.args = self.parser.parse_args() + self.display.verbosity = self.options.verbosity if len(self.args) == 0 or len(self.args) > 1: - self.parser.print_help() - raise AnsibleError("Vault requires a single filename as a parameter") + raise AnsibleOptionsError("Vault requires a single filename as a parameter") def run(self): diff --git a/v2/bin/ansible b/v2/bin/ansible new file mode 100755 index 00000000000000..467dd505a2e17a --- /dev/null +++ b/v2/bin/ansible @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +######################################################## +from __future__ import (absolute_import) +__metaclass__ = type + +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + +import os +import sys + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.utils.display import Display + +######################################################## + +if __name__ == '__main__': + + cli = None + display = Display() + me = os.path.basename(__file__) + + try: + if me == 'ansible-playbook': + from ansible.cli.playbook import PlaybookCLI as mycli + elif me == 'ansible': + from ansible.cli.adhoc import AdHocCLI as mycli + elif me == 'ansible-pull': + from ansible.cli.pull import PullCLI as mycli + elif me == 'ansible-doc': + from ansible.cli.doc import DocCLI as mycli + elif me == 'ansible-vault': + from ansible.cli.vault import VaultCLI as mycli + elif me == 'ansible-galaxy': + from ansible.cli.galaxy import GalaxyCLI as mycli + + cli = mycli(sys.argv, display=display) + if cli: + cli.parse() + sys.exit(cli.run()) + else: + raise AnsibleError("Program not implemented: %s" % me) + + except AnsibleOptionsError as e: + cli.parser.print_help() + display.display(str(e), stderr=True, color='red') + sys.exit(1) + except AnsibleError as e: + display.display(str(e), stderr=True, color='red') + sys.exit(2) + except KeyboardInterrupt: + display.error("interrupted") + sys.exit(4) diff --git a/v2/bin/ansible-doc b/v2/bin/ansible-doc new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/v2/bin/ansible-doc @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/v2/bin/ansible-galaxy b/v2/bin/ansible-galaxy new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/v2/bin/ansible-galaxy @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/v2/bin/ansible-playbook @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/v2/bin/ansible-pull b/v2/bin/ansible-pull new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/v2/bin/ansible-pull @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/v2/bin/ansible-vault @@ -0,0 +1 @@ +ansible \ No newline at end of file From 13978a7d75509704ccb58b550ff02c7fd30d1d91 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Apr 2015 21:27:29 -0400 Subject: [PATCH 0480/3617] fixed typo --- v2/ansible/cli/playbook.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/ansible/cli/playbook.py b/v2/ansible/cli/playbook.py index c2b881d2b6d332..eb60bacbd22afa 100644 --- a/v2/ansible/cli/playbook.py +++ b/v2/ansible/cli/playbook.py @@ -118,7 +118,7 @@ def run(self): only_tags = self.options.tags.split(",") skip_tags = self.options.skip_tags if self.options.skip_tags is not None: - skip_tags = self.ptions.skip_tags.split(",") + skip_tags = self.options.skip_tags.split(",") # initial error check, to make sure all specified playbooks are accessible # before we start running anything through the playbook executor From df881b7f37bb53287c504f0180ad2813eaf36e03 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 19:10:08 -0700 Subject: [PATCH 0481/3617] Update core module ref for docker fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e51ea29d8f69f7..1fdf75d49d1e39 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e51ea29d8f69f79c239a2f80f79edbb2d9fcc496 +Subproject commit 1fdf75d49d1e396b4512e4311680bc435ae7910a From 38d2042739dd3c2c295ecf11267ebcc07bce5bf4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Apr 2015 22:29:12 -0400 Subject: [PATCH 0482/3617] v2 ansible-doc can now list modules --- v2/ansible/cli/doc.py | 114 +++++++++++++++++++++---- v2/ansible/utils/module_docs.py | 102 ++++++++++++++++++++++ v2/ansible/utils/module_docs_fragments | 1 + 3 files changed, 202 insertions(+), 15 deletions(-) create mode 100644 v2/ansible/utils/module_docs.py create mode 120000 v2/ansible/utils/module_docs_fragments diff --git a/v2/ansible/cli/doc.py b/v2/ansible/cli/doc.py index ec09cb158da9b3..f77ccf67da3515 100644 --- a/v2/ansible/cli/doc.py +++ b/v2/ansible/cli/doc.py @@ -16,14 +16,19 @@ # ansible-vault is a script that encrypts/decrypts YAML files. See # http://docs.ansible.com/playbooks_vault.html for more details. +import fcntl import os +import re +import struct import sys +import termios import traceback from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.plugins import module_loader from ansible.cli import CLI -#from ansible.utils import module_docs +from ansible.utils import module_docs class DocCLI(CLI): """ Vault command line class """ @@ -41,13 +46,16 @@ class DocCLI(CLI): LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) # -S (chop long lines) -X (disable termcap init and de-init) + def __init__(self, args, display=None): + + super(DocCLI, self).__init__(args, display) + self.module_list = [] def parse(self): - self.parser = optparse.OptionParser( - version=version("%prog"), + self.parser = CLI.base_parser( usage='usage: %prog [options] [module...]', - description='Show Ansible module documentation', + epilog='Show Ansible module documentation', ) self.parser.add_option("-M", "--module-path", action="store", dest="module_path", default=C.DEFAULT_MODULE_PATH, @@ -56,8 +64,6 @@ def parse(self): help='List available modules') self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') - self.parser.add_option('-v', action='version', help='Show version number and exit') - self.options, self.args = self.parser.parse_args() self.display.verbosity = self.options.verbosity @@ -65,19 +71,97 @@ def parse(self): def run(self): - if options.module_path is not None: - for i in options.module_path.split(os.pathsep): - utils.plugins.module_finder.add_directory(i) + if self.options.module_path is not None: + for i in self.options.module_path.split(os.pathsep): + module_loader.add_directory(i) - if options.list_dir: + if self.options.list_dir: # list modules - paths = utils.plugins.module_finder._get_paths() - module_list = [] + paths = module_loader._get_paths() for path in paths: - find_modules(path, module_list) + self.find_modules(path) - pager(get_module_list_text(module_list)) + #self.pager(get_module_list_text(module_list)) + print self.get_module_list_text() + return 0 - if len(args) == 0: + if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") + + def find_modules(self, path): + + if os.path.isdir(path): + for module in os.listdir(path): + if module.startswith('.'): + continue + elif os.path.isdir(module): + self.find_modules(module) + elif any(module.endswith(x) for x in self.BLACKLIST_EXTS): + continue + elif module.startswith('__'): + continue + elif module in self.IGNORE_FILES: + continue + elif module.startswith('_'): + fullpath = '/'.join([path,module]) + if os.path.islink(fullpath): # avoids aliases + continue + + module = os.path.splitext(module)[0] # removes the extension + self.module_list.append(module) + + + def get_module_list_text(self): + tty_size = 0 + if os.isatty(0): + tty_size = struct.unpack('HHHH', + fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1] + columns = max(60, tty_size) + displace = max(len(x) for x in self.module_list) + linelimit = columns - displace - 5 + text = [] + deprecated = [] + for module in sorted(set(self.module_list)): + + if module in module_docs.BLACKLIST_MODULES: + continue + + filename = module_loader.find_plugin(module) + + if filename is None: + continue + if filename.endswith(".ps1"): + continue + if os.path.isdir(filename): + continue + + try: + doc, plainexamples, returndocs = module_docs.get_docstring(filename) + desc = self.tty_ify(doc.get('short_description', '?')).strip() + if len(desc) > linelimit: + desc = desc[:linelimit] + '...' + + if module.startswith('_'): # Handle deprecated + deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) + else: + text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) + except: + traceback.print_exc() + sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) + + if len(deprecated) > 0: + text.append("\nDEPRECATED:") + text.extend(deprecated) + return "\n".join(text) + + @classmethod + def tty_ify(self, text): + + t = self._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' + t = self._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* + t = self._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] + t = self._URL.sub(r"\1", t) # U(word) => word + t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' + + return t diff --git a/v2/ansible/utils/module_docs.py b/v2/ansible/utils/module_docs.py new file mode 100644 index 00000000000000..632b4a00c2a36a --- /dev/null +++ b/v2/ansible/utils/module_docs.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# (c) 2012, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import os +import sys +import ast +import yaml +import traceback + +from ansible.plugins import fragment_loader + +# modules that are ok that they do not have documentation strings +BLACKLIST_MODULES = [ + 'async_wrapper', 'accelerate', 'async_status' +] + +def get_docstring(filename, verbose=False): + """ + Search for assignment of the DOCUMENTATION and EXAMPLES variables + in the given file. + Parse DOCUMENTATION from YAML and return the YAML doc or None + together with EXAMPLES, as plain text. + + DOCUMENTATION can be extended using documentation fragments + loaded by the PluginLoader from the module_docs_fragments + directory. + """ + + doc = None + plainexamples = None + returndocs = None + + try: + # Thank you, Habbie, for this bit of code :-) + M = ast.parse(''.join(open(filename))) + for child in M.body: + if isinstance(child, ast.Assign): + if 'DOCUMENTATION' in (t.id for t in child.targets): + doc = yaml.safe_load(child.value.s) + fragment_slug = doc.get('extends_documentation_fragment', + 'doesnotexist').lower() + + # Allow the module to specify a var other than DOCUMENTATION + # to pull the fragment from, using dot notation as a separator + if '.' in fragment_slug: + fragment_name, fragment_var = fragment_slug.split('.', 1) + fragment_var = fragment_var.upper() + else: + fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION' + + + if fragment_slug != 'doesnotexist': + fragment_class = fragment_loader.get(fragment_name) + assert fragment_class is not None + + fragment_yaml = getattr(fragment_class, fragment_var, '{}') + fragment = yaml.safe_load(fragment_yaml) + + if fragment.has_key('notes'): + notes = fragment.pop('notes') + if notes: + if not doc.has_key('notes'): + doc['notes'] = [] + doc['notes'].extend(notes) + + if 'options' not in fragment.keys(): + raise Exception("missing options in fragment, possibly misformatted?") + + for key, value in fragment.items(): + if not doc.has_key(key): + doc[key] = value + else: + doc[key].update(value) + + if 'EXAMPLES' in (t.id for t in child.targets): + plainexamples = child.value.s[1:] # Skip first empty line + + if 'RETURN' in (t.id for t in child.targets): + returndocs = child.value.s[1:] + except: + traceback.print_exc() # temp + if verbose == True: + traceback.print_exc() + print "unable to parse %s" % filename + return doc, plainexamples, returndocs + diff --git a/v2/ansible/utils/module_docs_fragments b/v2/ansible/utils/module_docs_fragments new file mode 120000 index 00000000000000..83aef9ec19ab8f --- /dev/null +++ b/v2/ansible/utils/module_docs_fragments @@ -0,0 +1 @@ +../../../lib/ansible/utils/module_docs_fragments \ No newline at end of file From f9e9dd1684117b08c04d1fefc3e2bdb8fd39e590 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 30 Apr 2015 22:54:38 -0400 Subject: [PATCH 0483/3617] v2 ansible-doc now does everything v1 did --- v2/ansible/cli/__init__.py | 54 ++++++++++++ v2/ansible/cli/doc.py | 166 +++++++++++++++++++++++++++++++------ 2 files changed, 195 insertions(+), 25 deletions(-) diff --git a/v2/ansible/cli/__init__.py b/v2/ansible/cli/__init__.py index 115a2176f50996..0b0494e03282b6 100644 --- a/v2/ansible/cli/__init__.py +++ b/v2/ansible/cli/__init__.py @@ -22,9 +22,12 @@ import operator import optparse import os +import sys import time import yaml +import re import getpass +import subprocess from ansible import __version__ from ansible import constants as C @@ -45,6 +48,16 @@ class CLI(object): VALID_ACTIONS = ['No Actions'] + _ITALIC = re.compile(r"I\(([^)]+)\)") + _BOLD = re.compile(r"B\(([^)]+)\)") + _MODULE = re.compile(r"M\(([^)]+)\)") + _URL = re.compile(r"U\(([^)]+)\)") + _CONST = re.compile(r"C\(([^)]+)\)") + + PAGER = 'less' + LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) + # -S (chop long lines) -X (disable termcap init and de-init) + def __init__(self, args, display=None): """ Base init method for all command line programs @@ -391,3 +404,44 @@ def _gitinfo(): result += "\n {0}: {1}".format(submodule_path, submodule_info) f.close() return result + + + @staticmethod + def pager(text): + ''' find reasonable way to display text ''' + # this is a much simpler form of what is in pydoc.py + if not sys.stdout.isatty(): + pager_print(text) + elif 'PAGER' in os.environ: + if sys.platform == 'win32': + pager_print(text) + else: + CLI.pager_pipe(text, os.environ['PAGER']) + elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: + CLI.pager_pipe(text, 'less') + else: + pager_print(text) + + @staticmethod + def pager_pipe(text, cmd): + ''' pipe text through a pager ''' + if 'LESS' not in os.environ: + os.environ['LESS'] = LESS_OPTS + try: + cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) + cmd.communicate(input=text) + except IOError: + pass + except KeyboardInterrupt: + pass + + @classmethod + def tty_ify(self, text): + + t = self._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' + t = self._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* + t = self._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] + t = self._URL.sub(r"\1", t) # U(word) => word + t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' + + return t diff --git a/v2/ansible/cli/doc.py b/v2/ansible/cli/doc.py index f77ccf67da3515..797a59f0381c9e 100644 --- a/v2/ansible/cli/doc.py +++ b/v2/ansible/cli/doc.py @@ -17,12 +17,12 @@ # http://docs.ansible.com/playbooks_vault.html for more details. import fcntl +import datetime import os -import re import struct -import sys import termios import traceback +import textwrap from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError @@ -36,16 +36,6 @@ class DocCLI(CLI): BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"] - _ITALIC = re.compile(r"I\(([^)]+)\)") - _BOLD = re.compile(r"B\(([^)]+)\)") - _MODULE = re.compile(r"M\(([^)]+)\)") - _URL = re.compile(r"U\(([^)]+)\)") - _CONST = re.compile(r"C\(([^)]+)\)") - - PAGER = 'less' - LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) - # -S (chop long lines) -X (disable termcap init and de-init) - def __init__(self, args, display=None): super(DocCLI, self).__init__(args, display) @@ -75,19 +65,62 @@ def run(self): for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) + # list modules if self.options.list_dir: - # list modules paths = module_loader._get_paths() for path in paths: self.find_modules(path) - #self.pager(get_module_list_text(module_list)) - print self.get_module_list_text() + CLI.pager(self.get_module_list_text()) return 0 if len(self.args) == 0: raise AnsibleOptionsError("Incorrect options passed") + # process command line module list + text = '' + for module in self.args: + + filename = module_loader.find_plugin(module) + if filename is None: + self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) + continue + + if any(filename.endswith(x) for x in self.BLACKLIST_EXTS): + continue + + try: + doc, plainexamples, returndocs = module_docs.get_docstring(filename) + except: + self.display.vvv(traceback.print_exc()) + self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) + continue + + if doc is not None: + + all_keys = [] + for (k,v) in doc['options'].iteritems(): + all_keys.append(k) + all_keys = sorted(all_keys) + doc['option_keys'] = all_keys + + doc['filename'] = filename + doc['docuri'] = doc['module'].replace('_', '-') + doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') + doc['plainexamples'] = plainexamples + doc['returndocs'] = returndocs + + if self.options.show_snippet: + text += DocCLI.get_snippet_text(doc) + else: + text += DocCLI.get_man_text(doc) + else: + # this typically means we couldn't even parse the docstring, not just that the YAML is busted, + # probably a quoting issue. + self.display.warning("module %s missing documentation (or could not parse documentation)\n" % module) + + CLI.pager(text) + return 0 def find_modules(self, path): @@ -147,21 +180,104 @@ def get_module_list_text(self): else: text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) except: - traceback.print_exc() - sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) + raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module) if len(deprecated) > 0: text.append("\nDEPRECATED:") text.extend(deprecated) return "\n".join(text) - @classmethod - def tty_ify(self, text): - t = self._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' - t = self._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* - t = self._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] - t = self._URL.sub(r"\1", t) # U(word) => word - t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' + @staticmethod + def print_paths(finder): + ''' Returns a string suitable for printing of the search path ''' + + # Uses a list to get the order right + ret = [] + for i in finder._get_paths(): + if i not in ret: + ret.append(i) + return os.pathsep.join(ret) + + @staticmethod + def get_snippet_text(doc): + + text = [] + desc = CLI.tty_ify(" ".join(doc['short_description'])) + text.append("- name: %s" % (desc)) + text.append(" action: %s" % (doc['module'])) - return t + for o in sorted(doc['options'].keys()): + opt = doc['options'][o] + desc = CLI.tty_ify(" ".join(opt['description'])) + + if opt.get('required', False): + s = o + "=" + else: + s = o + + text.append(" %-20s # %s" % (s, desc)) + text.append('') + + return "\n".join(text) + + @staticmethod + def get_man_text(doc): + + opt_indent=" " + text = [] + text.append("> %s\n" % doc['module'].upper()) + + desc = " ".join(doc['description']) + + text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=" ", subsequent_indent=" ")) + + if 'option_keys' in doc and len(doc['option_keys']) > 0: + text.append("Options (= is mandatory):\n") + + for o in sorted(doc['option_keys']): + opt = doc['options'][o] + + if opt.get('required', False): + opt_leadin = "=" + else: + opt_leadin = "-" + + text.append("%s %s" % (opt_leadin, o)) + + desc = " ".join(opt['description']) + + if 'choices' in opt: + choices = ", ".join(str(i) for i in opt['choices']) + desc = desc + " (Choices: " + choices + ")" + if 'default' in opt: + default = str(opt['default']) + desc = desc + " [Default: " + default + "]" + text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=opt_indent, + subsequent_indent=opt_indent)) + + if 'notes' in doc and len(doc['notes']) > 0: + notes = " ".join(doc['notes']) + text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), initial_indent=" ", + subsequent_indent=opt_indent)) + + + if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: + req = ", ".join(doc['requirements']) + text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), initial_indent=" ", + subsequent_indent=opt_indent)) + + if 'examples' in doc and len(doc['examples']) > 0: + text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) + for ex in doc['examples']: + text.append("%s\n" % (ex['code'])) + + if 'plainexamples' in doc and doc['plainexamples'] is not None: + text.append("EXAMPLES:") + text.append(doc['plainexamples']) + if 'returndocs' in doc and doc['returndocs'] is not None: + text.append("RETURN VALUES:") + text.append(doc['returndocs']) + text.append('') + + return "\n".join(text) From 1c250ee4e7aa7488c56e4b6de62d411908fd91d3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 20:06:53 -0700 Subject: [PATCH 0484/3617] Pull in route53 fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 1fdf75d49d1e39..e971543bd45c0e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 1fdf75d49d1e396b4512e4311680bc435ae7910a +Subproject commit e971543bd45c0e4b2affa0acf0cfbf7ea1964b1a From 6e65ccabc3dac26a89cf2b1782ed160327320528 Mon Sep 17 00:00:00 2001 From: Simon Dick Date: Fri, 1 May 2015 13:52:29 +0100 Subject: [PATCH 0485/3617] Allow the use of HTTP on custom ports in the fetch_url function --- lib/ansible/module_utils/urls.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 962b868ee0d2b4..d56cc89395e338 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -377,6 +377,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, netloc = netloc.split('@', 1)[1] if ':' in netloc: hostname, port = netloc.split(':', 1) + port = int(port) else: hostname = netloc port = 443 From 4e08064afae62fdb74a50fbfe690544f8509ccc6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 1 May 2015 08:32:26 -0500 Subject: [PATCH 0486/3617] Fix issue where included blocks were not filtered on tags (v2) --- v2/ansible/playbook/taggable.py | 2 +- v2/ansible/plugins/strategies/linear.py | 4 +++- v2/samples/hosts | 1 + v2/samples/include.yml | 2 ++ 4 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 v2/samples/hosts diff --git a/v2/ansible/playbook/taggable.py b/v2/ansible/playbook/taggable.py index 3622dc34b27e0e..40e05d1817a9e7 100644 --- a/v2/ansible/playbook/taggable.py +++ b/v2/ansible/playbook/taggable.py @@ -26,7 +26,7 @@ class Taggable: untagged = set(['untagged']) - _tags = FieldAttribute(isa='list', default=None) + _tags = FieldAttribute(isa='list', default=[]) def __init__(self): super(Taggable, self).__init__() diff --git a/v2/ansible/plugins/strategies/linear.py b/v2/ansible/plugins/strategies/linear.py index 9988bb3e2a3db9..95ecac1451f51a 100644 --- a/v2/ansible/plugins/strategies/linear.py +++ b/v2/ansible/plugins/strategies/linear.py @@ -285,7 +285,9 @@ def __repr__(self): noop_block.rescue = [noop_task for t in new_block.rescue] for host in hosts_left: if host in included_file._hosts: - all_blocks[host].append(new_block) + task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task) + final_block = new_block.filter_tagged_tasks(connection_info, task_vars) + all_blocks[host].append(final_block) else: all_blocks[host].append(noop_block) diff --git a/v2/samples/hosts b/v2/samples/hosts new file mode 100644 index 00000000000000..118379ffd9e254 --- /dev/null +++ b/v2/samples/hosts @@ -0,0 +1 @@ +testing ansible_connection=local ansible_ssh_host=192.168.122.100 ansible_ssh_user=testing diff --git a/v2/samples/include.yml b/v2/samples/include.yml index 3a2e88f8985976..121c4ce0794d5f 100644 --- a/v2/samples/include.yml +++ b/v2/samples/include.yml @@ -1,4 +1,6 @@ - debug: msg="this is the include, a=={{a}}" + tags: + - included #- debug: msg="this is the second debug in the include" #- debug: msg="this is the third debug in the include, and a is still {{a}}" From 428f667497c8dfed360c73b8e054579f0d3dd4ef Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 1 May 2015 08:18:23 -0700 Subject: [PATCH 0487/3617] Update module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- v2/ansible/modules/core | 2 +- v2/ansible/modules/extras | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e971543bd45c0e..9028e9d4be8a3d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e971543bd45c0e4b2affa0acf0cfbf7ea1964b1a +Subproject commit 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 764a0e26b6df02..dd80fa221ce0ad 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 764a0e26b6df02cf2924254589a065918b6ca5d6 +Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core index 80dc34147d6458..0341ddd35ed5ff 160000 --- a/v2/ansible/modules/core +++ b/v2/ansible/modules/core @@ -1 +1 @@ -Subproject commit 80dc34147d645892ff44f70e96caf4f6d5b162b5 +Subproject commit 0341ddd35ed5ff477ad5de2488d947255ce86259 diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras index 764a0e26b6df02..dd80fa221ce0ad 160000 --- a/v2/ansible/modules/extras +++ b/v2/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 764a0e26b6df02cf2924254589a065918b6ca5d6 +Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc From 034ac8ae78553678716682cd4cd68cfb61873fe9 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 1 May 2015 17:25:06 +0200 Subject: [PATCH 0488/3617] cloudstack: _has_changed() should not compare None values --- lib/ansible/module_utils/cloudstack.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 518ef7a7326681..7ea02d1be7be9b 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -81,6 +81,10 @@ def _has_changed(self, want_dict, current_dict, only_keys=None): if only_keys and key not in only_keys: continue; + # Skip None values + if value is None: + continue; + if key in current_dict: # API returns string for int in some cases, just to make sure From 2f1b561bd36b32bbf470db4cda0072035bed3ba4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 1 May 2015 12:39:12 -0400 Subject: [PATCH 0489/3617] porting fix #10893 to v2 --- v2/ansible/module_utils/urls.py | 1 + 1 file changed, 1 insertion(+) diff --git a/v2/ansible/module_utils/urls.py b/v2/ansible/module_utils/urls.py index 962b868ee0d2b4..d56cc89395e338 100644 --- a/v2/ansible/module_utils/urls.py +++ b/v2/ansible/module_utils/urls.py @@ -377,6 +377,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, netloc = netloc.split('@', 1)[1] if ':' in netloc: hostname, port = netloc.split(':', 1) + port = int(port) else: hostname = netloc port = 443 From c9815ef286cbb832041b9235251ee9cc01f894c6 Mon Sep 17 00:00:00 2001 From: Charles Lanahan Date: Fri, 1 May 2015 10:23:56 -0700 Subject: [PATCH 0490/3617] Update intro_dynamic_inventory.rst Examples of special characters being converted to underscores for clarity. --- docsite/rst/intro_dynamic_inventory.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 6734efca1905d4..00023a4ccae7c8 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -140,9 +140,9 @@ Security Group Tags Each instance can have a variety of key/value pairs associated with it called Tags. The most common tag key is 'Name', though anything is possible. Each key/value pair is its own group of instances, again with special characters converted to underscores, in the format ``tag_KEY_VALUE`` e.g. - ``tag_Name_Web`` - ``tag_Name_redis-master-001`` - ``tag_aws_cloudformation_logical-id_WebServerGroup`` + ``tag_Name_Web`` can be used as is + ``tag_Name_redis-master-001`` becomes ``tag_Name_redis_master_001`` + ``tag_aws_cloudformation_logical-id_WebServerGroup`` becomes ``tag_aws_cloudformation_logical_id_WebServerGroup`` When the Ansible is interacting with a specific server, the EC2 inventory script is called again with the ``--host HOST`` option. This looks up the HOST in the index cache to get the instance ID, and then makes an API call to AWS to get information about that specific instance. It then makes information about that instance available as variables to your playbooks. Each variable is prefixed by ``ec2_``. Here are some of the variables available: From 4d4e2bb5a0b8afe97237b22cc221bfdd35133450 Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Fri, 1 May 2015 13:53:29 -0400 Subject: [PATCH 0491/3617] Tweak vault description. Vault isn't specifically for source control. Make description a little more generic and descriptive. --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 9ccb5b50f17b01..921a05c50edcd1 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -3,7 +3,7 @@ Vault .. contents:: Topics -New in Ansible 1.5, "Vault" is a feature of ansible that allows keeping encrypted data in source control. +New in Ansible 1.5, "Vault" is a feature of ansible that allows keeping sensitive data such as passwords or keys in encrypted files, rather than as plaintext in your playbooks or roles. These vault files can then be distributed or placed in source control. To enable this feature, a command line tool, `ansible-vault` is used to edit files, and a command line flag `--ask-vault-pass` or `--vault-password-file` is used. From 8d324e6a50459641fb654e4b921ebb8418a6643d Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Fri, 1 May 2015 13:57:13 -0400 Subject: [PATCH 0492/3617] Fix pylint error on "cachefile does not exist" Since cachefile is used to show the error message when stat fail, the variable need to be declared. --- lib/ansible/cache/jsonfile.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py index 9c45dc22fd7912..0bade893a82077 100644 --- a/lib/ansible/cache/jsonfile.py +++ b/lib/ansible/cache/jsonfile.py @@ -108,6 +108,7 @@ def keys(self): return keys def contains(self, key): + cachefile = "%s/%s" % (self._cache_dir, key) if key in self._cache: return True @@ -115,7 +116,7 @@ def contains(self, key): if self.has_expired(key): return False try: - st = os.stat("%s/%s" % (self._cache_dir, key)) + st = os.stat(cachefile) return True except (OSError,IOError), e: if e.errno == errno.ENOENT: From 8a7496af4285133e760431f8eeb3fffee6d0e07b Mon Sep 17 00:00:00 2001 From: Greg Back Date: Fri, 1 May 2015 16:45:56 -0500 Subject: [PATCH 0493/3617] Make "include" variable documentation consistent There is already a good example of a list variable ("ssh_keys") contained above this snippet, so reuse the content here. --- docsite/rst/playbooks_roles.rst | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 3ffabe835d3828..ce6c04c5cadf7e 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -86,10 +86,9 @@ which also supports structured variables:: - include: wordpress.yml vars: wp_user: timmy - some_list_variable: - - alpha - - beta - - gamma + ssh_keys: + - keys/one.txt + - keys/two.txt Playbooks can include other playbooks too, but that's mentioned in a later section. From 0b836262f0720bf6b95761095bbaaf44046973c2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 1 May 2015 18:29:15 -0400 Subject: [PATCH 0494/3617] draft ansible pull uspport --- v2/ansible/cli/pull.py | 184 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 167 insertions(+), 17 deletions(-) diff --git a/v2/ansible/cli/pull.py b/v2/ansible/cli/pull.py index 65741e95446453..6b087d4ec060ce 100644 --- a/v2/ansible/cli/pull.py +++ b/v2/ansible/cli/pull.py @@ -16,54 +16,204 @@ # along with Ansible. If not, see . ######################################################## +import datetime import os -import sys +import random +import shutil +import socket from ansible import constants as C -from ansible.errors import * +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.cli import CLI -from ansible.executor.task_queue_manager import TaskQueueManager -from ansible.inventory import Inventory -from ansible.parsing import DataLoader -from ansible.parsing.splitter import parse_kv -from ansible.playbook.play import Play from ansible.utils.display import Display from ansible.utils.vault import read_vault_file -from ansible.vars import VariableManager ######################################################## class PullCLI(CLI): ''' code behind ansible ad-hoc cli''' + DEFAULT_REPO_TYPE = 'git' + DEFAULT_PLAYBOOK = 'local.yml' + PLAYBOOK_ERRORS = { + 1: 'File does not exist', + 2: 'File is not readable' + } + SUPPORTED_REPO_MODULES = ['git'] + def parse(self): ''' create an options parser for bin/ansible ''' self.parser = CLI.base_parser( usage='%prog [options]', - runas_opts=True, - async_opts=True, - output_opts=True, connect_opts=True, - check_opts=True, - runtask_opts=True, vault_opts=True, ) # options unique to pull + self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') + self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', + help='only run the playbook if the repository has been updated') + self.parser.add_option('-s', '--sleep', dest='sleep', default=None, + help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') + self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', + help='run the playbook even if the repository could not be updated') + self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') + self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') + self.parser.add_option('-C', '--checkout', dest='checkout', + help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') + self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', + help='adds the hostkey for the repo url if not already added') + self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE, + help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE) + self.options, self.args = self.parser.parse_args() + if self.options.sleep: + try: + secs = random.randint(0,int(self.options.sleep)) + self.options.sleep = secs + except ValueError: + raise AnsibleOptionsError("%s is not a number." % self.options.sleep) + + if not self.options.url: + raise AnsibleOptionsError("URL for repository not specified, use -h for help") + if len(self.args) != 1: raise AnsibleOptionsError("Missing target hosts") + if self.options.module_name not in self.SUPPORTED_REPO_MODULES: + raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES))) + self.display.verbosity = self.options.verbosity self.validate_conflicts() - return True - - def run(self): ''' use Runner lib to do SSH things ''' - raise AnsibleError("Not ported to v2 yet") + # log command line + now = datetime.datetime.now() + self.display.display(now.strftime("Starting Ansible Pull at %F %T")) + self.display.display(' '.join(sys.argv)) + + # Build Checkout command + # Now construct the ansible command + limit_opts = 'localhost:%s:127.0.0.1' % socket.getfqdn() + base_opts = '-c local --limit "%s"' % limit_opts + if self.options.verbosity > 0: + base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ]) + + # Attempt to use the inventory passed in as an argument + # It might not yet have been downloaded so use localhost if note + if not self.options.inventory or not os.path.exists(self.options.inventory): + inv_opts = 'localhost,' + else: + inv_opts = self.options.inventory + + #TODO: enable more repo modules hg/svn? + if self.options.module_name == 'git': + repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) + if self.options.checkout: + repo_opts += ' version=%s' % self.options.checkout + + if self.options.accept_host_key: + repo_opts += ' accept_hostkey=yes' + + if self.options.key_file: + repo_opts += ' key_file=%s' % options.key_file + + path = utils.plugins.module_finder.find_plugin(options.module_name) + if path is None: + raise AnsibleOptionsError(("module '%s' not found.\n" % options.module_name)) + + bin_path = os.path.dirname(os.path.abspath(__file__)) + cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( + bin_path, inv_opts, base_opts, self.options.module_name, repo_opts + ) + + for ev in self.options.extra_vars: + cmd += ' -e "%s"' % ev + + # Nap? + if self.options.sleep: + self.display.display("Sleeping for %d seconds..." % self.options.sleep) + time.sleep(self.options.sleep); + + # RUN the Checkout command + rc, out, err = cmd_functions.run_cmd(cmd, live=True) + + if rc != 0: + if self.options.force: + self.display.warning("Unable to update repository. Continuing with (forced) run of playbook.") + else: + return rc + elif self.options.ifchanged and '"changed": true' not in out: + self.display.display("Repository has not changed, quitting.") + return 0 + + playbook = self.select_playbook(path) + + if playbook is None: + raise AnsibleOptionsError("Could not find a playbook to run.") + + # Build playbook command + cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) + if self.options.vault_password_file: + cmd += " --vault-password-file=%s" % self.options.vault_password_file + if self.options.inventory: + cmd += ' -i "%s"' % self.options.inventory + for ev in self.options.extra_vars: + cmd += ' -e "%s"' % ev + if self.options.ask_sudo_pass: + cmd += ' -K' + if self.options.tags: + cmd += ' -t "%s"' % self.options.tags + + os.chdir(self.options.dest) + + # RUN THE PLAYBOOK COMMAND + rc, out, err = cmd_functions.run_cmd(cmd, live=True) + + if self.options.purge: + os.chdir('/') + try: + shutil.rmtree(options.dest) + except Exception, e: + print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e)) + + return rc + + + def try_playbook(self, path): + if not os.path.exists(path): + return 1 + if not os.access(path, os.R_OK): + return 2 + return 0 + + def select_playbook(self, path): + playbook = None + if len(self.args) > 0 and self.args[0] is not None: + playbook = os.path.join(path, self.args[0]) + rc = self.try_playbook(playbook) + if rc != 0: + self.display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc])) + return None + return playbook + else: + fqdn = socket.getfqdn() + hostpb = os.path.join(path, fqdn + '.yml') + shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml') + localpb = os.path.join(path, DEFAULT_PLAYBOOK) + errors = [] + for pb in [hostpb, shorthostpb, localpb]: + rc = self.try_playbook(pb) + if rc == 0: + playbook = pb + break + else: + errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc])) + if playbook is None: + self.display.warning("\n".join(errors)) + return playbook From f310d132806dd6870a92cd93b2a8983c24ff548d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 1 May 2015 23:48:11 -0500 Subject: [PATCH 0495/3617] Make sure all plugin loaders are loaded from roles and shared correctly (v2) --- v2/ansible/executor/connection_info.py | 3 +-- v2/ansible/executor/playbook_executor.py | 4 +++- v2/ansible/executor/process/worker.py | 4 ++-- v2/ansible/executor/task_executor.py | 27 +++++++++++++---------- v2/ansible/executor/task_queue_manager.py | 4 +++- v2/ansible/playbook/base.py | 4 +--- v2/ansible/playbook/role/__init__.py | 17 +++++++++----- v2/ansible/playbook/task.py | 8 +++---- v2/ansible/plugins/action/__init__.py | 18 +++++++-------- v2/ansible/plugins/action/debug.py | 2 +- v2/ansible/plugins/strategies/__init__.py | 21 ++++++++++++++++-- v2/ansible/template/__init__.py | 13 ++++++++--- 12 files changed, 80 insertions(+), 45 deletions(-) diff --git a/v2/ansible/executor/connection_info.py b/v2/ansible/executor/connection_info.py index 7c9c9892ba5dcd..1c168a8e26436b 100644 --- a/v2/ansible/executor/connection_info.py +++ b/v2/ansible/executor/connection_info.py @@ -248,12 +248,11 @@ def check_become_success(self, output, become_settings): def _get_fields(self): return [i for i in self.__dict__.keys() if i[:1] != '_'] - def post_validate(self, variables, loader): + def post_validate(self, templar): ''' Finalizes templated values which may be set on this objects fields. ''' - templar = Templar(loader=loader, variables=variables) for field in self._get_fields(): value = templar.template(getattr(self, field)) setattr(self, field, value) diff --git a/v2/ansible/executor/playbook_executor.py b/v2/ansible/executor/playbook_executor.py index 777587f7536f3a..2d5958697b395e 100644 --- a/v2/ansible/executor/playbook_executor.py +++ b/v2/ansible/executor/playbook_executor.py @@ -25,6 +25,7 @@ from ansible.errors import * from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook +from ansible.template import Templar from ansible.utils.color import colorize, hostcolor from ansible.utils.debug import debug @@ -80,8 +81,9 @@ def run(self): # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) + templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False) new_play = play.copy() - new_play.post_validate(all_vars, fail_on_undefined=False) + new_play.post_validate(templar) if self._tqm is None: # we are just doing a listing diff --git a/v2/ansible/executor/process/worker.py b/v2/ansible/executor/process/worker.py index 7a75af146ef36c..d8e8960fe4077b 100644 --- a/v2/ansible/executor/process/worker.py +++ b/v2/ansible/executor/process/worker.py @@ -94,7 +94,7 @@ def run(self): try: if not self._main_q.empty(): debug("there's work to be done!") - (host, task, basedir, job_vars, connection_info, module_loader) = self._main_q.get(block=False) + (host, task, basedir, job_vars, connection_info, shared_loader_obj) = self._main_q.get(block=False) debug("got a task/handler to work on: %s" % task) # because the task queue manager starts workers (forks) before the @@ -115,7 +115,7 @@ def run(self): # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (host, task)) - executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._new_stdin, self._loader, module_loader).run() + executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._new_stdin, self._loader, shared_loader_obj).run() debug("done running TaskExecutor() for %s/%s" % (host, task)) task_result = TaskResult(host, task, executor_result) diff --git a/v2/ansible/executor/task_executor.py b/v2/ansible/executor/task_executor.py index 5dd3250ea0ec44..2f90b3d87eb534 100644 --- a/v2/ansible/executor/task_executor.py +++ b/v2/ansible/executor/task_executor.py @@ -31,6 +31,7 @@ from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.plugins import lookup_loader, connection_loader, action_loader +from ansible.template import Templar from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode @@ -47,14 +48,14 @@ class TaskExecutor: class. ''' - def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, module_loader): - self._host = host - self._task = task - self._job_vars = job_vars - self._connection_info = connection_info - self._new_stdin = new_stdin - self._loader = loader - self._module_loader = module_loader + def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, shared_loader_obj): + self._host = host + self._task = task + self._job_vars = job_vars + self._connection_info = connection_info + self._new_stdin = new_stdin + self._loader = loader + self._shared_loader_obj = shared_loader_obj def run(self): ''' @@ -195,9 +196,11 @@ def _execute(self, variables=None): if variables is None: variables = self._job_vars + templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) + # fields set from the play/task may be based on variables, so we have to # do the same kind of post validation step on it here before we use it. - self._connection_info.post_validate(variables=variables, loader=self._loader) + self._connection_info.post_validate(templar=templar) # now that the connection information is finalized, we can add 'magic' # variables to the variable dictionary @@ -216,7 +219,7 @@ def _execute(self, variables=None): return dict(changed=False, skipped=True, skip_reason='Conditional check failed') # Now we do final validation on the task, which sets all fields to their final values - self._task.post_validate(variables) + self._task.post_validate(templar=templar) # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host @@ -336,7 +339,7 @@ def _poll_async_result(self, result): connection=self._connection, connection_info=self._connection_info, loader=self._loader, - module_loader=self._module_loader, + shared_loader_obj=self._shared_loader_obj, ) time_left = self._task.async @@ -408,7 +411,7 @@ def _get_action_handler(self, connection): connection=connection, connection_info=self._connection_info, loader=self._loader, - module_loader=self._module_loader, + shared_loader_obj=self._shared_loader_obj, ) if not handler: diff --git a/v2/ansible/executor/task_queue_manager.py b/v2/ansible/executor/task_queue_manager.py index 89869ad109dc93..a875c310d51b86 100644 --- a/v2/ansible/executor/task_queue_manager.py +++ b/v2/ansible/executor/task_queue_manager.py @@ -32,6 +32,7 @@ from ansible.executor.process.result import ResultProcess from ansible.executor.stats import AggregateStats from ansible.plugins import callback_loader, strategy_loader +from ansible.template import Templar from ansible.utils.debug import debug @@ -159,9 +160,10 @@ def run(self, play): ''' all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) + templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False) new_play = play.copy() - new_play.post_validate(all_vars, fail_on_undefined=False) + new_play.post_validate(templar) connection_info = ConnectionInformation(new_play, self._options, self.passwords) for callback_plugin in self._callback_plugins: diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 3a7879265ec3a5..82d1e704d19ddf 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -234,7 +234,7 @@ def copy(self): return new_me - def post_validate(self, all_vars=dict(), fail_on_undefined=True): + def post_validate(self, templar): ''' we can't tell that everything is of the right type until we have all the variables. Run basic types (from isa) as well as @@ -245,8 +245,6 @@ def post_validate(self, all_vars=dict(), fail_on_undefined=True): if self._loader is not None: basedir = self._loader.get_basedir() - templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=fail_on_undefined) - for (name, attribute) in iteritems(self._get_base_attributes()): if getattr(self, name) is None: diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index 33935d197f7d27..6e1983ee9ac23a 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -21,6 +21,7 @@ from six import iteritems, string_types +import inspect import os from hashlib import sha1 @@ -36,9 +37,11 @@ from ansible.playbook.role.include import RoleInclude from ansible.playbook.role.metadata import RoleMetadata from ansible.playbook.taggable import Taggable -from ansible.plugins import module_loader +from ansible.plugins import PluginLoader from ansible.utils.vars import combine_vars +from ansible import plugins as ansible_plugins + __all__ = ['Role', 'ROLE_CACHE', 'hash_params'] @@ -152,11 +155,15 @@ def _load_role_data(self, role_include, parent_role=None): current_tags.extend(role_include.tags) setattr(self, 'tags', current_tags) - # load the role's files, if they exist - library = os.path.join(self._role_path, 'library') - if os.path.isdir(library): - module_loader.add_directory(library) + # dynamically load any plugins from the role directory + for name, obj in inspect.getmembers(ansible_plugins): + if isinstance(obj, PluginLoader): + if obj.subdir: + plugin_path = os.path.join(self._role_path, obj.subdir) + if os.path.isdir(plugin_path): + obj.add_directory(plugin_path) + # load the role's other files, if they exist metadata = self._load_role_yaml('meta') if metadata: self._metadata = RoleMetadata.load(metadata, owner=self, loader=self._loader) diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py index 06f7239d1bd53b..060602579851d3 100644 --- a/v2/ansible/playbook/task.py +++ b/v2/ansible/playbook/task.py @@ -177,18 +177,18 @@ def preprocess_data(self, ds): return super(Task, self).preprocess_data(new_ds) - def post_validate(self, all_vars=dict(), fail_on_undefined=True): + def post_validate(self, templar): ''' Override of base class post_validate, to also do final validation on the block and task include (if any) to which this task belongs. ''' if self._block: - self._block.post_validate(all_vars=all_vars, fail_on_undefined=fail_on_undefined) + self._block.post_validate(templar) if self._task_include: - self._task_include.post_validate(all_vars=all_vars, fail_on_undefined=fail_on_undefined) + self._task_include.post_validate(templar) - super(Task, self).post_validate(all_vars=all_vars, fail_on_undefined=fail_on_undefined) + super(Task, self).post_validate(templar) def get_vars(self): all_vars = self.vars.copy() diff --git a/v2/ansible/plugins/action/__init__.py b/v2/ansible/plugins/action/__init__.py index 4265a8a5b2a3b9..62036cc7068211 100644 --- a/v2/ansible/plugins/action/__init__.py +++ b/v2/ansible/plugins/action/__init__.py @@ -44,13 +44,13 @@ class ActionBase: action in use. ''' - def __init__(self, task, connection, connection_info, loader, module_loader): - self._task = task - self._connection = connection - self._connection_info = connection_info - self._loader = loader - self._module_loader = module_loader - self._shell = self.get_shell() + def __init__(self, task, connection, connection_info, loader, shared_loader_obj): + self._task = task + self._connection = connection + self._connection_info = connection_info + self._loader = loader + self._shared_loader_obj = shared_loader_obj + self._shell = self.get_shell() self._supports_check_mode = True @@ -73,9 +73,9 @@ def _configure_module(self, module_name, module_args): # Search module path(s) for named module. module_suffixes = getattr(self._connection, 'default_suffixes', None) - module_path = self._module_loader.find_plugin(module_name, module_suffixes) + module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, module_suffixes) if module_path is None: - module_path2 = self._module_loader.find_plugin('ping', module_suffixes) + module_path2 = self._shared_loader_obj.module_loader.find_plugin('ping', module_suffixes) if module_path2 is not None: raise AnsibleError("The module %s was not found in configured module paths" % (module_name)) else: diff --git a/v2/ansible/plugins/action/debug.py b/v2/ansible/plugins/action/debug.py index dc80dfc1795aa1..04db3c9cc1ba9d 100644 --- a/v2/ansible/plugins/action/debug.py +++ b/v2/ansible/plugins/action/debug.py @@ -35,7 +35,7 @@ def run(self, tmp=None, task_vars=dict()): result = dict(msg=self._task.args['msg']) # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - templar = Templar(loader=self._loader, variables=task_vars) + templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=task_vars) results = templar.template(self._task.args['var'], convert_bare=True) result = dict() result[self._task.args['var']] = results diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index 238c6222a831ef..ffba0ef7052459 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -30,12 +30,24 @@ from ansible.playbook.handler import Handler from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role import ROLE_CACHE, hash_params -from ansible.plugins import module_loader +from ansible.plugins import module_loader, filter_loader, lookup_loader from ansible.utils.debug import debug __all__ = ['StrategyBase'] +# FIXME: this should probably be in the plugins/__init__.py, with +# a smarter mechanism to set all of the attributes based on +# the loaders created there +class SharedPluginLoaderObj: + ''' + A simple object to make pass the various plugin loaders to + the forked processes over the queue easier + ''' + def __init__(self): + self.module_loader = module_loader + self.filter_loader = filter_loader + self.lookup_loader = lookup_loader class StrategyBase: @@ -108,7 +120,12 @@ def _queue_task(self, host, task, task_vars, connection_info): self._cur_worker = 0 self._pending_results += 1 - main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, module_loader), block=False) + + # create a dummy object with plugin loaders set as an easier + # way to share them with the forked processes + shared_loader_obj = SharedPluginLoaderObj() + + main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, shared_loader_obj), block=False) except (EOFError, IOError, AssertionError) as e: # most likely an abort debug("got an error while queuing: %s" % e) diff --git a/v2/ansible/template/__init__.py b/v2/ansible/template/__init__.py index 3e61028d8d0b5c..19e091b9b27ad6 100644 --- a/v2/ansible/template/__init__.py +++ b/v2/ansible/template/__init__.py @@ -53,12 +53,19 @@ class Templar: The main class for templating, with the main entry-point of template(). ''' - def __init__(self, loader, variables=dict(), fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR): + def __init__(self, loader, shared_loader_obj=None, variables=dict(), fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR): self._loader = loader self._basedir = loader.get_basedir() self._filters = None self._available_variables = variables + if shared_loader_obj: + self._filter_loader = getattr(shared_loader_obj, 'filter_loader') + self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader') + else: + self._filter_loader = filter_loader + self._lookup_loader = lookup_loader + # flags to determine whether certain failures during templating # should result in fatal errors being raised self._fail_on_lookup_errors = True @@ -88,7 +95,7 @@ def _get_filters(self): if self._filters is not None: return self._filters.copy() - plugins = [x for x in filter_loader.all()] + plugins = [x for x in self._filter_loader.all()] self._filters = dict() for fp in plugins: @@ -205,7 +212,7 @@ def _finalize(self, thing): return thing if thing is not None else '' def _lookup(self, name, *args, **kwargs): - instance = lookup_loader.get(name.lower(), loader=self._loader) + instance = self._lookup_loader.get(name.lower(), loader=self._loader) if instance is not None: # safely catch run failures per #5059 From a87d8891826246cceb15961cfbbd6a2c728afb52 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 2 May 2015 01:34:03 -0500 Subject: [PATCH 0496/3617] Generalize plugin enumeration a bit (v2) --- v2/ansible/playbook/role/__init__.py | 15 ++++++--------- v2/ansible/plugins/__init__.py | 9 +++++++-- v2/ansible/plugins/strategies/__init__.py | 4 ++-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/v2/ansible/playbook/role/__init__.py b/v2/ansible/playbook/role/__init__.py index 6e1983ee9ac23a..bea61147ae86b6 100644 --- a/v2/ansible/playbook/role/__init__.py +++ b/v2/ansible/playbook/role/__init__.py @@ -37,11 +37,9 @@ from ansible.playbook.role.include import RoleInclude from ansible.playbook.role.metadata import RoleMetadata from ansible.playbook.taggable import Taggable -from ansible.plugins import PluginLoader +from ansible.plugins import get_all_plugin_loaders from ansible.utils.vars import combine_vars -from ansible import plugins as ansible_plugins - __all__ = ['Role', 'ROLE_CACHE', 'hash_params'] @@ -156,12 +154,11 @@ def _load_role_data(self, role_include, parent_role=None): setattr(self, 'tags', current_tags) # dynamically load any plugins from the role directory - for name, obj in inspect.getmembers(ansible_plugins): - if isinstance(obj, PluginLoader): - if obj.subdir: - plugin_path = os.path.join(self._role_path, obj.subdir) - if os.path.isdir(plugin_path): - obj.add_directory(plugin_path) + for name, obj in get_all_plugin_loaders(): + if obj.subdir: + plugin_path = os.path.join(self._role_path, obj.subdir) + if os.path.isdir(plugin_path): + obj.add_directory(plugin_path) # load the role's other files, if they exist metadata = self._load_role_yaml('meta') diff --git a/v2/ansible/plugins/__init__.py b/v2/ansible/plugins/__init__.py index f81f8c9d387b16..5791677bd26f9e 100644 --- a/v2/ansible/plugins/__init__.py +++ b/v2/ansible/plugins/__init__.py @@ -20,11 +20,13 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import glob +import imp +import inspect import os import os.path import sys -import glob -import imp + from ansible import constants as C from ansible.utils.display import Display from ansible import errors @@ -40,6 +42,9 @@ def push_basedir(basedir): if basedir not in _basedirs: _basedirs.insert(0, basedir) +def get_all_plugin_loaders(): + return [(name, obj) for (name, obj) in inspect.getmembers(sys.modules[__name__]) if isinstance(obj, PluginLoader)] + class PluginLoader: ''' diff --git a/v2/ansible/plugins/strategies/__init__.py b/v2/ansible/plugins/strategies/__init__.py index ffba0ef7052459..f6103343712f5c 100644 --- a/v2/ansible/plugins/strategies/__init__.py +++ b/v2/ansible/plugins/strategies/__init__.py @@ -30,7 +30,7 @@ from ansible.playbook.handler import Handler from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role import ROLE_CACHE, hash_params -from ansible.plugins import module_loader, filter_loader, lookup_loader +from ansible.plugins import filter_loader, lookup_loader, module_loader from ansible.utils.debug import debug @@ -45,9 +45,9 @@ class SharedPluginLoaderObj: the forked processes over the queue easier ''' def __init__(self): - self.module_loader = module_loader self.filter_loader = filter_loader self.lookup_loader = lookup_loader + self.module_loader = module_loader class StrategyBase: From 6a44056d76321a6728485f73f5aa6a03cc2a7b79 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 2 May 2015 22:15:45 -0500 Subject: [PATCH 0497/3617] Fix bug in f310d13 (v2) --- v2/ansible/playbook/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/v2/ansible/playbook/base.py b/v2/ansible/playbook/base.py index 82d1e704d19ddf..ecd217c1e8f26d 100644 --- a/v2/ansible/playbook/base.py +++ b/v2/ansible/playbook/base.py @@ -260,7 +260,7 @@ def post_validate(self, templar): # run the post-validator if present method = getattr(self, '_post_validate_%s' % name, None) if method: - value = method(attribute, value, all_vars, fail_on_undefined) + value = method(attribute, value, all_vars, templar._fail_on_undefined_errors) else: # otherwise, just make sure the attribute is of the type it should be if attribute.isa == 'string': @@ -281,7 +281,7 @@ def post_validate(self, templar): except (TypeError, ValueError) as e: raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) except UndefinedError as e: - if fail_on_undefined: + if templar._fail_on_undefined_errors: raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds()) def serialize(self): From 8cf4452d48e583cfd59f96e67cfd34a1c35226e7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 2 May 2015 22:17:02 -0500 Subject: [PATCH 0498/3617] Fix module arg parsing when 'args' are present but not a dict (v2) --- v2/ansible/parsing/mod_args.py | 6 +++++- v2/ansible/plugins/cache/__init__.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/v2/ansible/parsing/mod_args.py b/v2/ansible/parsing/mod_args.py index f46b525c663c32..ed527f1b08fbaf 100644 --- a/v2/ansible/parsing/mod_args.py +++ b/v2/ansible/parsing/mod_args.py @@ -135,7 +135,11 @@ def _normalize_parameters(self, thing, action=None, additional_args=dict()): # this can occasionally happen, simplify if args and 'args' in args: - args = args['args'] + tmp_args = args['args'] + del args['args'] + if isinstance(tmp_args, string_types): + tmp_args = parse_kv(tmp_args) + args.update(tmp_args) # finally, update the args we're going to return with the ones # which were normalized above diff --git a/v2/ansible/plugins/cache/__init__.py b/v2/ansible/plugins/cache/__init__.py index 4aa8fda8bbbfff..8ffe554cc639d4 100644 --- a/v2/ansible/plugins/cache/__init__.py +++ b/v2/ansible/plugins/cache/__init__.py @@ -27,6 +27,7 @@ class FactCache(MutableMapping): def __init__(self, *args, **kwargs): self._plugin = cache_loader.get(C.CACHE_PLUGIN) if self._plugin is None: + # FIXME: this should be an exception return def __getitem__(self, key): From af74d7f1a961f2d1cccb06f1d911864c16ef9e86 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 3 May 2015 14:34:25 +0200 Subject: [PATCH 0499/3617] cloudstack: add get_domain() and get_account() to utils --- lib/ansible/module_utils/cloudstack.py | 42 ++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 7ea02d1be7be9b..2396c49caec6af 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -48,6 +48,8 @@ def __init__(self, module): self.module = module self._connect() + self.domain = None + self.account = None self.project = None self.ip_address = None self.zone = None @@ -73,7 +75,7 @@ def _connect(self): else: self.cs = CloudStack(**read_config()) - + # TODO: rename to has_changed() def _has_changed(self, want_dict, current_dict, only_keys=None): for key, value in want_dict.iteritems(): @@ -245,6 +247,42 @@ def get_hypervisor(self): self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor) + def get_account(self, key=None): + if self.account: + return self._get_by_key(key, self.account) + + account = self.module.params.get('account') + if not account: + return None + + args = {} + args['name'] = account + args['listall'] = True + accounts = self.cs.listAccounts(**args) + if accounts: + self.account = accounts['account'][0] + return self._get_by_key(key, self.account) + self.module.fail_json(msg="Account '%s' not found" % account) + + + def get_domain(self, key=None): + if self.domain: + return self._get_by_key(key, self.domain) + + domain = self.module.params.get('domain') + if not domain: + return None + + args = {} + args['name'] = domain + args['listall'] = True + domain = self.cs.listDomains(**args) + if domains: + self.domain = domains['domain'][0] + return self._get_by_key(key, self.domain) + self.module.fail_json(msg="Domain '%s' not found" % domain) + + def get_tags(self, resource=None): existing_tags = self.cs.listTags(resourceid=resource['id']) if existing_tags: @@ -309,7 +347,7 @@ def get_capabilities(self, key=None): self.capabilities = capabilities['capability'] return self._get_by_key(key, self.capabilities) - + # TODO: rename to poll_job() def _poll_job(self, job=None, key=None): if 'jobid' in job: while True: From 333c623b35c62f9199cdbdb4684e58789497f80a Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 3 May 2015 14:59:02 +0200 Subject: [PATCH 0500/3617] cloudstack: implement account und domain support in utils --- lib/ansible/module_utils/cloudstack.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 2396c49caec6af..f791b403263f91 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -121,8 +121,11 @@ def get_project(self, key=None): project = self.module.params.get('project') if not project: return None - - projects = self.cs.listProjects(listall=True) + args = {} + args['listall'] = True + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + projects = self.cs.listProjects(**args) if projects: for p in projects['project']: if project in [ p['name'], p['displaytext'], p['id'] ]: @@ -146,6 +149,8 @@ def get_ip_address(self, key=None): args = {} args['ipaddress'] = ip_address + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') args['projectid'] = self.get_project(key='id') ip_addresses = self.cs.listPublicIpAddresses(**args) @@ -170,6 +175,8 @@ def get_vm(self, key=None): self.module.fail_json(msg="Virtual machine param 'vm' is required") args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') args['projectid'] = self.get_project(key='id') args['zoneid'] = self.get_zone(key='id') vms = self.cs.listVirtualMachines(**args) @@ -255,8 +262,13 @@ def get_account(self, key=None): if not account: return None + domain = self.module.params.get('domain') + if not domain: + self.module.fail_json(msg="Account must be specified with Domain") + args = {} args['name'] = account + args['domainid'] = self.get_domain(key='id') args['listall'] = True accounts = self.cs.listAccounts(**args) if accounts: From 0588a0fdd974cae3b667e09c861455fcbab02f11 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sun, 3 May 2015 16:06:30 +0200 Subject: [PATCH 0501/3617] cloudstack: integration tests --- test/integration/Makefile | 5 + test/integration/cloudstack.yml | 13 ++ .../roles/test_cs_affinitygroup/meta/main.yml | 3 + .../test_cs_affinitygroup/tasks/main.yml | 58 ++++++ .../roles/test_cs_common/defaults/main.yml | 2 + .../roles/test_cs_instance/defaults/main.yml | 2 + .../roles/test_cs_instance/meta/main.yml | 3 + .../roles/test_cs_instance/tasks/absent.yml | 23 +++ .../roles/test_cs_instance/tasks/cleanup.yml | 36 ++++ .../roles/test_cs_instance/tasks/main.yml | 11 ++ .../roles/test_cs_instance/tasks/present.yml | 168 ++++++++++++++++++ .../roles/test_cs_instance/tasks/setup.yml | 32 ++++ .../roles/test_cs_instance/tasks/tags.yml | 82 +++++++++ .../roles/test_cs_instancegroup/meta/main.yml | 3 + .../test_cs_instancegroup/tasks/main.yml | 58 ++++++ .../roles/test_cs_securitygroup/meta/main.yml | 3 + .../test_cs_securitygroup/tasks/main.yml | 58 ++++++ .../test_cs_securitygroup_rule/meta/main.yml | 3 + .../tasks/absent.yml | 105 +++++++++++ .../tasks/cleanup.yml | 7 + .../test_cs_securitygroup_rule/tasks/main.yml | 4 + .../tasks/present.yml | 118 ++++++++++++ .../tasks/setup.yml | 56 ++++++ .../roles/test_cs_sshkeypair/meta/main.yml | 3 + .../roles/test_cs_sshkeypair/tasks/main.yml | 89 ++++++++++ 25 files changed, 945 insertions(+) create mode 100644 test/integration/cloudstack.yml create mode 100644 test/integration/roles/test_cs_affinitygroup/meta/main.yml create mode 100644 test/integration/roles/test_cs_affinitygroup/tasks/main.yml create mode 100644 test/integration/roles/test_cs_common/defaults/main.yml create mode 100644 test/integration/roles/test_cs_instance/defaults/main.yml create mode 100644 test/integration/roles/test_cs_instance/meta/main.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/absent.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/cleanup.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/main.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/present.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/setup.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/tags.yml create mode 100644 test/integration/roles/test_cs_instancegroup/meta/main.yml create mode 100644 test/integration/roles/test_cs_instancegroup/tasks/main.yml create mode 100644 test/integration/roles/test_cs_securitygroup/meta/main.yml create mode 100644 test/integration/roles/test_cs_securitygroup/tasks/main.yml create mode 100644 test/integration/roles/test_cs_securitygroup_rule/meta/main.yml create mode 100644 test/integration/roles/test_cs_securitygroup_rule/tasks/absent.yml create mode 100644 test/integration/roles/test_cs_securitygroup_rule/tasks/cleanup.yml create mode 100644 test/integration/roles/test_cs_securitygroup_rule/tasks/main.yml create mode 100644 test/integration/roles/test_cs_securitygroup_rule/tasks/present.yml create mode 100644 test/integration/roles/test_cs_securitygroup_rule/tasks/setup.yml create mode 100644 test/integration/roles/test_cs_sshkeypair/meta/main.yml create mode 100644 test/integration/roles/test_cs_sshkeypair/tasks/main.yml diff --git a/test/integration/Makefile b/test/integration/Makefile index 6e2acec341d131..28de76c7cdf759 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -144,6 +144,11 @@ rackspace: $(CREDENTIALS_FILE) CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make rackspace_cleanup ; \ exit $$RC; +cloudstack: + ansible-playbook cloudstack.yml -i $(INVENTORY) -e @$(VARS_FILE) -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ + RC=$$? ; \ + exit $$RC; + $(CONSUL_RUNNING): consul: diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml new file mode 100644 index 00000000000000..5f5e65c6cd1107 --- /dev/null +++ b/test/integration/cloudstack.yml @@ -0,0 +1,13 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tags: + - cloudstack + roles: + - { role: test_cs_sshkeypair, tags: test_cs_sshkeypair } + - { role: test_cs_affinitygroup, tags: test_cs_affinitygroup } + - { role: test_cs_securitygroup, tags: test_cs_securitygroup } + - { role: test_cs_securitygroup_rule, tags: test_cs_securitygroup_rule } + - { role: test_cs_instance, tags: test_cs_instance } + - { role: test_cs_instancegroup, tags: test_cs_instancegroup } diff --git a/test/integration/roles/test_cs_affinitygroup/meta/main.yml b/test/integration/roles/test_cs_affinitygroup/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_affinitygroup/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_affinitygroup/tasks/main.yml b/test/integration/roles/test_cs_affinitygroup/tasks/main.yml new file mode 100644 index 00000000000000..7ebab20bad3230 --- /dev/null +++ b/test/integration/roles/test_cs_affinitygroup/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: setup + cs_affinitygroup: name={{ cs_resource_prefix }}_ag state=absent + register: ag +- name: verify setup + assert: + that: + - ag|success + +- name: test fail if missing name + action: cs_affinitygroup + register: ag + ignore_errors: true +- name: verify results of fail if missing name + assert: + that: + - ag|failed + - ag.msg == "missing required arguments: name" + +- name: test present affinity group + cs_affinitygroup: name={{ cs_resource_prefix }}_ag + register: ag +- name: verify results of create affinity group + assert: + that: + - ag|success + - ag|changed + - ag.name == "{{ cs_resource_prefix }}_ag" + +- name: test present affinity group is idempotence + cs_affinitygroup: name={{ cs_resource_prefix }}_ag + register: ag +- name: verify results present affinity group is idempotence + assert: + that: + - ag|success + - not ag|changed + - ag.name == "{{ cs_resource_prefix }}_ag" + +- name: test absent affinity group + cs_affinitygroup: name={{ cs_resource_prefix }}_ag state=absent + register: ag +- name: verify results of absent affinity group + assert: + that: + - ag|success + - ag|changed + - ag.name == "{{ cs_resource_prefix }}_ag" + +- name: test absent affinity group is idempotence + cs_affinitygroup: name={{ cs_resource_prefix }}_ag state=absent + register: ag +- name: verify results of absent affinity group is idempotence + assert: + that: + - ag|success + - not ag|changed + - ag.name is undefined diff --git a/test/integration/roles/test_cs_common/defaults/main.yml b/test/integration/roles/test_cs_common/defaults/main.yml new file mode 100644 index 00000000000000..ba9674ac923760 --- /dev/null +++ b/test/integration/roles/test_cs_common/defaults/main.yml @@ -0,0 +1,2 @@ +--- +cs_resource_prefix: cloudstack diff --git a/test/integration/roles/test_cs_instance/defaults/main.yml b/test/integration/roles/test_cs_instance/defaults/main.yml new file mode 100644 index 00000000000000..585947ab43e8bd --- /dev/null +++ b/test/integration/roles/test_cs_instance/defaults/main.yml @@ -0,0 +1,2 @@ +--- +instance_number: 1 diff --git a/test/integration/roles/test_cs_instance/meta/main.yml b/test/integration/roles/test_cs_instance/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_instance/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_instance/tasks/absent.yml b/test/integration/roles/test_cs_instance/tasks/absent.yml new file mode 100644 index 00000000000000..bafb3ec9e7621b --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/absent.yml @@ -0,0 +1,23 @@ +--- +- name: test destroy instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance + assert: + that: + - instance|success + - instance|changed + - instance.state == "Destroyed" + +- name: test destroy instance idempotence + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance idempotence + assert: + that: + - instance|success + - not instance|changed diff --git a/test/integration/roles/test_cs_instance/tasks/cleanup.yml b/test/integration/roles/test_cs_instance/tasks/cleanup.yml new file mode 100644 index 00000000000000..63192dbd608c8d --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/cleanup.yml @@ -0,0 +1,36 @@ +--- +- name: cleanup ssh key + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey state=absent + register: sshkey +- name: verify cleanup ssh key + assert: + that: + - sshkey|success + +- name: cleanup affinity group + cs_affinitygroup: name={{ cs_resource_prefix }}-ag state=absent + register: ag + until: ag|success + retries: 20 + delay: 5 +- name: verify cleanup affinity group + assert: + that: + - ag|success + +- name: cleanup security group ...take a while unless instance is expunged + cs_securitygroup: name={{ cs_resource_prefix }}-sg state=absent + register: sg + until: sg|success + retries: 100 + delay: 10 +- name: verify cleanup security group + assert: + that: + - sg|success + +# force expunge, only works with admin permissions +- cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: expunged + failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/main.yml b/test/integration/roles/test_cs_instance/tasks/main.yml new file mode 100644 index 00000000000000..479ea01c15d9b8 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- include: setup.yml + tags: any +- include: present.yml + tags: test_cs_instance_present +#- include: tags.yml +# tags: test_cs_instance_tags +- include: absent.yml + tags: test_cs_instance_absent +- include: cleanup.yml + tags: test_cs_instance_cleanup diff --git a/test/integration/roles/test_cs_instance/tasks/present.yml b/test/integration/roles/test_cs_instance/tasks/present.yml new file mode 100644 index 00000000000000..4337f0acf4d213 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/present.yml @@ -0,0 +1,168 @@ +--- +- name: test create instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: Linux Debian 7 64-bit + service_offering: Tiny + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "Tiny" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + + +- name: test create instance idempotence + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: Linux Debian 7 64-bit + service_offering: Tiny + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "Tiny" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + + +- name: test running instance not updated + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: Micro + register: instance +- name: verify running instance not updated + assert: + that: + - instance|success + - not instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "Tiny" + - instance.state == "Running" + + +- name: test stopping instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "Tiny" + - instance.state == "Stopped" + + +- name: test stopping instance idempotence + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.state == "Stopped" + + +- name: test updating stopped instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + display_name: "{{ cs_resource_prefix }}-display-{{ instance_number }}" + service_offering: Micro + register: instance +- name: verify updating stopped instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "Micro" + - instance.state == "Stopped" + + +- name: test starting instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "Micro" + - instance.state == "Running" + + +- name: test starting instance idempotence + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.state == "Running" + +- name: test force update running instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: Tiny + force: true + register: instance +- name: verify force update running instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "Tiny" + - instance.state == "Running" + +- name: test force update running instance idempotence + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: Tiny + force: true + register: instance +- name: verify force update running instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "Tiny" + - instance.state == "Running" diff --git a/test/integration/roles/test_cs_instance/tasks/setup.yml b/test/integration/roles/test_cs_instance/tasks/setup.yml new file mode 100644 index 00000000000000..32f3ff13e248e6 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/setup.yml @@ -0,0 +1,32 @@ +--- +- name: setup ssh key + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey + register: sshkey +- name: verify setup ssh key + assert: + that: + - sshkey|success + +- name: setup affinity group + cs_affinitygroup: name={{ cs_resource_prefix }}-ag + register: ag +- name: verify setup affinity group + assert: + that: + - ag|success + +- name: setup security group + cs_securitygroup: name={{ cs_resource_prefix }}-sg + register: sg +- name: verify setup security group + assert: + that: + - sg|success + +- name: setup instance to be absent + cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent + register: instance +- name: verify instance to be absent + assert: + that: + - instance|success diff --git a/test/integration/roles/test_cs_instance/tasks/tags.yml b/test/integration/roles/test_cs_instance/tasks/tags.yml new file mode 100644 index 00000000000000..a86158df0fdb64 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/tags.yml @@ -0,0 +1,82 @@ +--- +- name: test add tags to instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + tags: + - { key: "{{ cs_resource_prefix }}-tag1", value: "{{ cs_resource_prefix }}-value1" } + - { key: "{{ cs_resource_prefix }}-tag2", value: "{{ cs_resource_prefix }}-value2" } + register: instance +- name: verify add tags to instance + assert: + that: + - instance|success + - instance|changed + - instance.tags|length == 2 + - instance.tags[0]['key'] == "{{ cs_resource_prefix }}-tag1" + - instance.tags[1]['key'] == "{{ cs_resource_prefix }}-tag2" + - instance.tags[0]['value'] == "{{ cs_resource_prefix }}-value1" + - instance.tags[1]['value'] == "{{ cs_resource_prefix }}-value2" + + +- name: test tags to instance idempotence + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + tags: + - { key: "{{ cs_resource_prefix }}-tag1", value: "{{ cs_resource_prefix }}-value1" } + - { key: "{{ cs_resource_prefix }}-tag2", value: "{{ cs_resource_prefix }}-value2" } + register: instance +- name: verify tags to instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.tags|length == 2 + - instance.tags[0]['key'] == "{{ cs_resource_prefix }}-tag1" + - instance.tags[1]['key'] == "{{ cs_resource_prefix }}-tag2" + - instance.tags[0]['value'] == "{{ cs_resource_prefix }}-value1" + - instance.tags[1]['value'] == "{{ cs_resource_prefix }}-value2" + +- name: test change tags of instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + tags: + - { key: "{{ cs_resource_prefix }}-tag2", value: "{{ cs_resource_prefix }}-value2" } + - { key: "{{ cs_resource_prefix }}-tag3", value: "{{ cs_resource_prefix }}-value3" } + register: instance +- name: verify tags to instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.tags|length == 2 + - instance.tags[0]['key'] == "{{ cs_resource_prefix }}-tag1" + - instance.tags[1]['key'] == "{{ cs_resource_prefix }}-tag3" + - instance.tags[0]['value'] == "{{ cs_resource_prefix }}-value1" + - instance.tags[1]['value'] == "{{ cs_resource_prefix }}-value3" + +- name: test not touch tags of instance if no param tags + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + register: instance +- name: verify not touch tags of instance if no param tags + assert: + that: + - instance|success + - not instance|changed + - instance.tags|length == 2 + - instance.tags[0]['key'] == "{{ cs_resource_prefix }}-tag1" + - instance.tags[1]['key'] == "{{ cs_resource_prefix }}-tag3" + - instance.tags[0]['value'] == "{{ cs_resource_prefix }}-value1" + - instance.tags[1]['value'] == "{{ cs_resource_prefix }}-value3" + +- name: test remove tags + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + tags: [] + register: instance +- name: verify remove tags + assert: + that: + - instance|success + - not instance|changed + - instance.tags|length == 0 diff --git a/test/integration/roles/test_cs_instancegroup/meta/main.yml b/test/integration/roles/test_cs_instancegroup/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_instancegroup/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_instancegroup/tasks/main.yml b/test/integration/roles/test_cs_instancegroup/tasks/main.yml new file mode 100644 index 00000000000000..e3a726bf6f7f53 --- /dev/null +++ b/test/integration/roles/test_cs_instancegroup/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: setup + cs_instancegroup: name={{ cs_resource_prefix }}_ig state=absent + register: ig +- name: verify setup + assert: + that: + - ig|success + +- name: test fail if missing name + action: cs_instancegroup + register: ig + ignore_errors: true +- name: verify results of fail if missing name + assert: + that: + - ig|failed + - ig.msg == "missing required arguments: name" + +- name: test present instance group + cs_instancegroup: name={{ cs_resource_prefix }}_ig + register: ig +- name: verify results of create instance group + assert: + that: + - ig|success + - ig|changed + - ig.name == "{{ cs_resource_prefix }}_ig" + +- name: test present instance group is idempotence + cs_instancegroup: name={{ cs_resource_prefix }}_ig + register: ig +- name: verify results present instance group is idempotence + assert: + that: + - ig|success + - not ig|changed + - ig.name == "{{ cs_resource_prefix }}_ig" + +- name: test absent instance group + cs_instancegroup: name={{ cs_resource_prefix }}_ig state=absent + register: ig +- name: verify results of absent instance group + assert: + that: + - ig|success + - ig|changed + - ig.name == "{{ cs_resource_prefix }}_ig" + +- name: test absent instance group is idempotence + cs_instancegroup: name={{ cs_resource_prefix }}_ig state=absent + register: ig +- name: verify results of absent instance group is idempotence + assert: + that: + - ig|success + - not ig|changed + - ig.name is undefined diff --git a/test/integration/roles/test_cs_securitygroup/meta/main.yml b/test/integration/roles/test_cs_securitygroup/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_securitygroup/tasks/main.yml b/test/integration/roles/test_cs_securitygroup/tasks/main.yml new file mode 100644 index 00000000000000..d22871739e52ab --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: setup + cs_securitygroup: name={{ cs_resource_prefix }}_sg state=absent + register: sg +- name: verify setup + assert: + that: + - sg|success + +- name: test fail if missing name + action: cs_securitygroup + register: sg + ignore_errors: true +- name: verify results of fail if missing name + assert: + that: + - sg|failed + - sg.msg == "missing required arguments: name" + +- name: test present security group + cs_securitygroup: name={{ cs_resource_prefix }}_sg + register: sg +- name: verify results of create security group + assert: + that: + - sg|success + - sg|changed + - sg.name == "{{ cs_resource_prefix }}_sg" + +- name: test present security group is idempotence + cs_securitygroup: name={{ cs_resource_prefix }}_sg + register: sg +- name: verify results present security group is idempotence + assert: + that: + - sg|success + - not sg|changed + - sg.name == "{{ cs_resource_prefix }}_sg" + +- name: test absent security group + cs_securitygroup: name={{ cs_resource_prefix }}_sg state=absent + register: sg +- name: verify results of absent security group + assert: + that: + - sg|success + - sg|changed + - sg.name == "{{ cs_resource_prefix }}_sg" + +- name: test absent security group is idempotence + cs_securitygroup: name={{ cs_resource_prefix }}_sg state=absent + register: sg +- name: verify results of absent security group is idempotence + assert: + that: + - sg|success + - not sg|changed + - sg.name is undefined diff --git a/test/integration/roles/test_cs_securitygroup_rule/meta/main.yml b/test/integration/roles/test_cs_securitygroup_rule/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup_rule/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_securitygroup_rule/tasks/absent.yml b/test/integration/roles/test_cs_securitygroup_rule/tasks/absent.yml new file mode 100644 index 00000000000000..8ee080a8aef7b4 --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup_rule/tasks/absent.yml @@ -0,0 +1,105 @@ +- name: test remove http range rule + cs_securitygroup_rule: + security_group: default + start_port: 8000 + end_port: 8888 + cidr: 1.2.3.4/32 + state: absent + register: sg_rule +- name: verify create http range rule + assert: + that: + - sg_rule|success + - sg_rule|changed + - sg_rule.type == 'ingress' + - sg_rule.security_group == 'default' + - sg_rule.protocol == 'tcp' + - sg_rule.start_port == 8000 + - sg_rule.end_port == 8888 + - sg_rule.cidr == '1.2.3.4/32' + +- name: test remove http range rule idempotence + cs_securitygroup_rule: + security_group: default + start_port: 8000 + end_port: 8888 + cidr: 1.2.3.4/32 + state: absent + register: sg_rule +- name: verify create http range rule idempotence + assert: + that: + - sg_rule|success + - not sg_rule|changed + +- name: test remove single port udp rule + cs_securitygroup_rule: + security_group: default + port: 5353 + protocol: udp + type: egress + user_security_group: '{{ cs_resource_prefix }}_sg' + state: absent + register: sg_rule +- name: verify remove single port udp rule + assert: + that: + - sg_rule|success + - sg_rule|changed + - sg_rule.type == 'egress' + - sg_rule.security_group == 'default' + - sg_rule.protocol == 'udp' + - sg_rule.start_port == 5353 + - sg_rule.end_port == 5353 + - sg_rule.user_security_group == '{{ cs_resource_prefix }}_sg' + +- name: test remove single port udp rule idempotence + cs_securitygroup_rule: + security_group: default + port: 5353 + protocol: udp + type: egress + user_security_group: '{{ cs_resource_prefix }}_sg' + state: absent + register: sg_rule +- name: verify remove single port udp rule idempotence + assert: + that: + - sg_rule|success + - not sg_rule|changed + +- name: test remove icmp rule + cs_securitygroup_rule: + security_group: default + protocol: icmp + type: ingress + icmp_type: -1 + icmp_code: -1 + state: absent + register: sg_rule +- name: verify icmp rule + assert: + that: + - sg_rule|success + - sg_rule|changed + - sg_rule.type == 'ingress' + - sg_rule.security_group == 'default' + - sg_rule.cidr == '0.0.0.0/0' + - sg_rule.protocol == 'icmp' + - sg_rule.icmp_code == -1 + - sg_rule.icmp_type == -1 + +- name: test remove icmp rule idempotence + cs_securitygroup_rule: + security_group: default + protocol: icmp + type: ingress + icmp_type: -1 + icmp_code: -1 + state: absent + register: sg_rule +- name: verify icmp rule idempotence + assert: + that: + - sg_rule|success + - not sg_rule|changed diff --git a/test/integration/roles/test_cs_securitygroup_rule/tasks/cleanup.yml b/test/integration/roles/test_cs_securitygroup_rule/tasks/cleanup.yml new file mode 100644 index 00000000000000..712ab5c6ce4881 --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup_rule/tasks/cleanup.yml @@ -0,0 +1,7 @@ +- name: cleanup custom security group + cs_securitygroup: name={{ cs_resource_prefix }}_sg state=absent + register: sg +- name: verify setup + assert: + that: + - sg|success diff --git a/test/integration/roles/test_cs_securitygroup_rule/tasks/main.yml b/test/integration/roles/test_cs_securitygroup_rule/tasks/main.yml new file mode 100644 index 00000000000000..e76745cb543013 --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup_rule/tasks/main.yml @@ -0,0 +1,4 @@ +- include: setup.yml +- include: present.yml +- include: absent.yml +- include: cleanup.yml diff --git a/test/integration/roles/test_cs_securitygroup_rule/tasks/present.yml b/test/integration/roles/test_cs_securitygroup_rule/tasks/present.yml new file mode 100644 index 00000000000000..92973290d40577 --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup_rule/tasks/present.yml @@ -0,0 +1,118 @@ +- name: test create http range rule + cs_securitygroup_rule: + security_group: default + start_port: 8000 + end_port: 8888 + cidr: 1.2.3.4/32 + register: sg_rule +- name: verify create http range rule + assert: + that: + - sg_rule|success + - sg_rule|changed + - sg_rule.type == 'ingress' + - sg_rule.security_group == 'default' + - sg_rule.protocol == 'tcp' + - sg_rule.start_port == 8000 + - sg_rule.end_port == 8888 + - sg_rule.cidr == '1.2.3.4/32' + +- name: test create http range rule idempotence + cs_securitygroup_rule: + security_group: default + start_port: 8000 + end_port: 8888 + cidr: 1.2.3.4/32 + register: sg_rule +- name: verify create http range rule idempotence + assert: + that: + - sg_rule|success + - not sg_rule|changed + - sg_rule.type == 'ingress' + - sg_rule.security_group == 'default' + - sg_rule.protocol == 'tcp' + - sg_rule.start_port == 8000 + - sg_rule.end_port == 8888 + - sg_rule.cidr == '1.2.3.4/32' + +- name: test create single port udp rule + cs_securitygroup_rule: + security_group: default + port: 5353 + protocol: udp + type: egress + user_security_group: '{{ cs_resource_prefix }}_sg' + register: sg_rule +- name: verify create single port udp rule + assert: + that: + - sg_rule|success + - sg_rule|changed + - sg_rule.type == 'egress' + - sg_rule.security_group == 'default' + - sg_rule.protocol == 'udp' + - sg_rule.start_port == 5353 + - sg_rule.end_port == 5353 + - sg_rule.user_security_group == '{{ cs_resource_prefix }}_sg' + + +- name: test single port udp rule idempotence + cs_securitygroup_rule: + security_group: default + port: 5353 + protocol: udp + type: egress + user_security_group: '{{ cs_resource_prefix }}_sg' + register: sg_rule +- name: verify single port udp rule idempotence + assert: + that: + - sg_rule|success + - not sg_rule|changed + - sg_rule.type == 'egress' + - sg_rule.security_group == 'default' + - sg_rule.protocol == 'udp' + - sg_rule.start_port == 5353 + - sg_rule.end_port == 5353 + - sg_rule.user_security_group == '{{ cs_resource_prefix }}_sg' + +- name: test icmp rule + cs_securitygroup_rule: + security_group: default + protocol: icmp + type: ingress + icmp_type: -1 + icmp_code: -1 + register: sg_rule +- name: verify icmp rule + assert: + that: + - sg_rule|success + - sg_rule|changed + - sg_rule.type == 'ingress' + - sg_rule.security_group == 'default' + - sg_rule.cidr == '0.0.0.0/0' + - sg_rule.protocol == 'icmp' + - sg_rule.icmp_code == -1 + - sg_rule.icmp_type == -1 + +- name: test icmp rule idempotence + cs_securitygroup_rule: + security_group: default + protocol: icmp + type: ingress + icmp_type: -1 + icmp_code: -1 + register: sg_rule +- name: verify icmp rule idempotence + assert: + that: + - sg_rule|success + - not sg_rule|changed + - sg_rule.type == 'ingress' + - sg_rule.security_group == 'default' + - sg_rule.cidr == '0.0.0.0/0' + - sg_rule.protocol == 'icmp' + - sg_rule.icmp_code == -1 + - sg_rule.icmp_type == -1 diff --git a/test/integration/roles/test_cs_securitygroup_rule/tasks/setup.yml b/test/integration/roles/test_cs_securitygroup_rule/tasks/setup.yml new file mode 100644 index 00000000000000..797330ebc18640 --- /dev/null +++ b/test/integration/roles/test_cs_securitygroup_rule/tasks/setup.yml @@ -0,0 +1,56 @@ +- name: setup custom security group + cs_securitygroup: name={{ cs_resource_prefix }}_sg + register: sg +- name: verify setup + assert: + that: + - sg|success + +- name: setup default security group + cs_securitygroup: name=default + register: sg +- name: verify setup + assert: + that: + - sg|success + +- name: setup remove icmp rule + cs_securitygroup_rule: + security_group: default + protocol: icmp + type: ingress + icmp_type: -1 + icmp_code: -1 + state: absent + register: sg_rule +- name: verify remove icmp rule + assert: + that: + - sg_rule|success + +- name: setup remove http range rule + cs_securitygroup_rule: + security_group: default + start_port: 8000 + end_port: 8888 + cidr: 1.2.3.4/32 + state: absent + register: sg_rule +- name: verify remove http range rule + assert: + that: + - sg_rule|success + +- name: setup remove single port udp rule + cs_securitygroup_rule: + security_group: default + port: 5353 + protocol: udp + type: egress + user_security_group: '{{ cs_resource_prefix }}-user-sg' + state: absent + register: sg_rule +- name: verify remove single port udp rule + assert: + that: + - sg_rule|success diff --git a/test/integration/roles/test_cs_sshkeypair/meta/main.yml b/test/integration/roles/test_cs_sshkeypair/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_sshkeypair/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_sshkeypair/tasks/main.yml b/test/integration/roles/test_cs_sshkeypair/tasks/main.yml new file mode 100644 index 00000000000000..35023b38aa178b --- /dev/null +++ b/test/integration/roles/test_cs_sshkeypair/tasks/main.yml @@ -0,0 +1,89 @@ +--- +- name: setup cleanup + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey state=absent + +- name: test fail on missing name + action: cs_sshkeypair + ignore_errors: true + register: sshkey +- name: verify results of fail on missing name + assert: + that: + - sshkey|failed + - sshkey.msg == "missing required arguments: name" + +- name: test ssh key creation + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey + register: sshkey +- name: verify results of ssh key creation + assert: + that: + - sshkey|success + - sshkey|changed + - sshkey.fingerprint is defined and sshkey.fingerprint != "" + - sshkey.private_key is defined and sshkey.private_key != "" + - sshkey.name == "{{ cs_resource_prefix }}-sshkey" + +- name: test ssh key creation idempotence + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey + register: sshkey2 +- name: verify results of ssh key creation idempotence + assert: + that: + - sshkey2|success + - not sshkey2|changed + - sshkey2.fingerprint is defined and sshkey2.fingerprint == sshkey.fingerprint + - sshkey2.private_key is not defined + - sshkey2.name == "{{ cs_resource_prefix }}-sshkey" + +- name: test replace ssh public key + cs_sshkeypair: | + name={{ cs_resource_prefix }}-sshkey + public_key="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDsTI7KJZ8tz/CwQIrSol41c6s3vzkGYCMI8o7P9Et48UG9eRoGaMaGYaTvBTj/VQrD7cfurI6Bn0HTT3FLK3OHOweyelm9rIiQ2hjkSl+2lIKWHu992GO58E5Gcy9yYW4sHGgGLNZkPBKrrj0w7lhmiHjPtVnf+2+7Ix1WOO2/HXPcAHhsX/AlyItDewIL4mr/BT83vq0202sPCiM2cFQJl+5WGwS1wYYK8d167cspsmdyX7OyAFCUB0vueuqjE8MFqJvyIJR9y8Lj9Ny71pSV5/QWrXUgELxMYOKSby3gHkxcIXgYBMFLl4DipRTO74OWQlRRaOlqXlOOQbikcY4T rene.moser@swisstxt.ch" + register: sshkey3 +- name: verify results of replace ssh public key + assert: + that: + - sshkey3|success + - sshkey3|changed + - sshkey3.fingerprint is defined and sshkey3.fingerprint != sshkey2.fingerprint + - sshkey3.private_key is not defined + - sshkey3.name == "{{ cs_resource_prefix }}-sshkey" + +- name: test replace ssh public key idempotence + cs_sshkeypair: | + name={{ cs_resource_prefix }}-sshkey + public_key="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDsTI7KJZ8tz/CwQIrSol41c6s3vzkGYCMI8o7P9Et48UG9eRoGaMaGYaTvBTj/VQrD7cfurI6Bn0HTT3FLK3OHOweyelm9rIiQ2hjkSl+2lIKWHu992GO58E5Gcy9yYW4sHGgGLNZkPBKrrj0w7lhmiHjPtVnf+2+7Ix1WOO2/HXPcAHhsX/AlyItDewIL4mr/BT83vq0202sPCiM2cFQJl+5WGwS1wYYK8d167cspsmdyX7OyAFCUB0vueuqjE8MFqJvyIJR9y8Lj9Ny71pSV5/QWrXUgELxMYOKSby3gHkxcIXgYBMFLl4DipRTO74OWQlRRaOlqXlOOQbikcY4T rene.moser@swisstxt.ch" + register: sshkey4 +- name: verify results of ssh public key idempotence + assert: + that: + - sshkey4|success + - not sshkey4|changed + - sshkey4.fingerprint is defined and sshkey4.fingerprint == sshkey3.fingerprint + - sshkey4.private_key is not defined + - sshkey4.name == "{{ cs_resource_prefix }}-sshkey" + +- name: test ssh key absent + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey state=absent + register: sshkey5 +- name: verify result of key absent + assert: + that: + - sshkey5|success + - sshkey5|changed + - sshkey5.fingerprint is defined and sshkey5.fingerprint == sshkey3.fingerprint + - sshkey5.private_key is not defined + - sshkey5.name == "{{ cs_resource_prefix }}-sshkey" + +- name: test ssh key absent idempotence + cs_sshkeypair: name={{ cs_resource_prefix }}-sshkey state=absent + register: sshkey6 +- name: verify result of ssh key absent idempotence + assert: + that: + - sshkey6|success + - not sshkey6|changed + - sshkey6.fingerprint is not defined + - sshkey6.private_key is not defined + - sshkey6.name is not defined From ce3ef7f4c16e47d5a0b5600e1c56c177b7c93f0d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 3 May 2015 21:47:26 -0500 Subject: [PATCH 0502/3617] Making the switch to v2 --- .gitmodules | 16 - bin/ansible | 202 +- bin/ansible-doc | 338 +--- bin/ansible-galaxy | 958 +--------- bin/ansible-playbook | 331 +--- bin/ansible-pull | 258 +-- bin/ansible-vault | 242 +-- lib/ansible/__init__.py | 8 +- {v2 => lib}/ansible/cli/__init__.py | 0 {v2 => lib}/ansible/cli/adhoc.py | 0 {v2 => lib}/ansible/cli/doc.py | 0 {v2 => lib}/ansible/cli/galaxy.py | 0 {v2 => lib}/ansible/cli/playbook.py | 0 {v2 => lib}/ansible/cli/pull.py | 0 {v2 => lib}/ansible/cli/vault.py | 0 {v2 => lib}/ansible/compat/__init__.py | 0 {v2 => lib}/ansible/compat/tests/__init__.py | 0 {v2 => lib}/ansible/compat/tests/mock.py | 0 {v2 => lib}/ansible/compat/tests/unittest.py | 0 {v2 => lib}/ansible/config/__init__.py | 0 lib/ansible/constants.py | 47 +- {v2 => lib}/ansible/errors/__init__.py | 0 {v2 => lib}/ansible/errors/yaml_strings.py | 0 {v2 => lib}/ansible/executor/__init__.py | 0 .../ansible/executor/connection_info.py | 0 {v2 => lib}/ansible/executor/module_common.py | 0 {v2 => lib}/ansible/executor/play_iterator.py | 0 .../ansible/executor/playbook_executor.py | 0 .../ansible/executor/process/__init__.py | 0 .../ansible/executor/process/result.py | 0 .../ansible/executor/process/worker.py | 0 {v2 => lib}/ansible/executor/stats.py | 0 {v2 => lib}/ansible/executor/task_executor.py | 0 .../ansible/executor/task_queue_manager.py | 0 .../ansible/executor/task_queue_manager.py: | 0 {v2 => lib}/ansible/executor/task_result.py | 0 {v2 => lib}/ansible/galaxy/__init__.py | 0 {v2 => lib}/ansible/galaxy/api.py | 0 .../ansible/galaxy/data/metadata_template.j2 | 0 {v2 => lib}/ansible/galaxy/data/readme | 0 {v2 => lib}/ansible/galaxy/role.py | 0 lib/ansible/inventory/__init__.py | 96 +- lib/ansible/inventory/dir.py | 31 +- lib/ansible/inventory/expand_hosts.py | 3 + lib/ansible/inventory/group.py | 54 +- lib/ansible/inventory/host.py | 85 +- lib/ansible/inventory/ini.py | 58 +- lib/ansible/inventory/script.py | 36 +- lib/ansible/inventory/vars_plugins/noop.py | 2 + lib/ansible/module_utils/basic.py | 68 +- lib/ansible/module_utils/powershell.ps1 | 6 +- lib/ansible/modules/__init__.py | 20 + lib/ansible/modules/core | 1 - lib/ansible/modules/extras | 1 - {v2 => lib}/ansible/new_inventory/__init__.py | 0 {v2 => lib}/ansible/new_inventory/group.py | 0 {v2 => lib}/ansible/new_inventory/host.py | 0 {v2 => lib}/ansible/parsing/__init__.py | 0 {v2 => lib}/ansible/parsing/mod_args.py | 0 {v2 => lib}/ansible/parsing/splitter.py | 0 {v2 => lib}/ansible/parsing/utils/__init__.py | 0 {v2 => lib}/ansible/parsing/utils/jsonify.py | 0 {v2 => lib}/ansible/parsing/vault/__init__.py | 0 {v2 => lib}/ansible/parsing/yaml/__init__.py | 0 .../ansible/parsing/yaml/constructor.py | 0 {v2 => lib}/ansible/parsing/yaml/loader.py | 0 {v2 => lib}/ansible/parsing/yaml/objects.py | 0 lib/ansible/playbook/__init__.py | 887 +-------- {v2 => lib}/ansible/playbook/attribute.py | 0 {v2 => lib}/ansible/playbook/base.py | 0 {v2 => lib}/ansible/playbook/become.py | 0 {v2 => lib}/ansible/playbook/block.py | 0 {v2 => lib}/ansible/playbook/conditional.py | 0 {v2 => lib}/ansible/playbook/handler.py | 0 {v2 => lib}/ansible/playbook/helpers.py | 0 lib/ansible/playbook/play.py | 1124 +++-------- .../ansible/playbook/playbook_include.py | 0 {v2 => lib}/ansible/playbook/role/__init__.py | 0 .../ansible/playbook/role/definition.py | 0 {v2 => lib}/ansible/playbook/role/include.py | 0 {v2 => lib}/ansible/playbook/role/metadata.py | 0 .../ansible/playbook/role/requirement.py | 0 {v2 => lib}/ansible/playbook/taggable.py | 0 lib/ansible/playbook/task.py | 616 +++--- {v2 => lib}/ansible/playbook/vars.py | 0 {v2 => lib}/ansible/playbook/vars_file.py | 0 {v2 => lib}/ansible/plugins/__init__.py | 0 .../ansible/plugins/action/__init__.py | 0 .../ansible/plugins/action/add_host.py | 0 .../ansible/plugins/action/assemble.py | 0 {v2 => lib}/ansible/plugins/action/assert.py | 0 {v2 => lib}/ansible/plugins/action/async.py | 0 {v2 => lib}/ansible/plugins/action/copy.py | 0 {v2 => lib}/ansible/plugins/action/debug.py | 0 {v2 => lib}/ansible/plugins/action/fail.py | 0 {v2 => lib}/ansible/plugins/action/fetch.py | 0 .../ansible/plugins/action/group_by.py | 0 .../ansible/plugins/action/include_vars.py | 0 {v2 => lib}/ansible/plugins/action/normal.py | 0 {v2 => lib}/ansible/plugins/action/patch.py | 0 {v2 => lib}/ansible/plugins/action/pause.py | 0 {v2 => lib}/ansible/plugins/action/raw.py | 0 {v2 => lib}/ansible/plugins/action/script.py | 0 .../ansible/plugins/action/set_fact.py | 0 .../ansible/plugins/action/synchronize.py | 0 .../ansible/plugins/action/template.py | 0 .../ansible/plugins/action/unarchive.py | 0 {v2 => lib}/ansible/plugins/cache/__init__.py | 0 {v2 => lib}/ansible/plugins/cache/base.py | 0 .../ansible/plugins/cache/memcached.py | 0 {v2 => lib}/ansible/plugins/cache/memory.py | 0 {v2 => lib}/ansible/plugins/cache/redis.py | 0 .../ansible/plugins/callback/__init__.py | 0 .../ansible/plugins/callback/default.py | 0 .../ansible/plugins/callback/minimal.py | 0 .../ansible/plugins/connections/__init__.py | 0 .../ansible/plugins/connections/accelerate.py | 0 .../ansible/plugins/connections/chroot.py | 0 .../ansible/plugins/connections/funcd.py | 0 .../ansible/plugins/connections/jail.py | 0 .../plugins/connections/libvirt_lxc.py | 0 .../ansible/plugins/connections/local.py | 0 .../plugins/connections/paramiko_ssh.py | 0 .../ansible/plugins/connections/ssh.py | 0 .../ansible/plugins/connections/winrm.py | 0 .../ansible/plugins/connections/zone.py | 0 {v2 => lib}/ansible/plugins/filter | 0 .../ansible/plugins/inventory/__init__.py | 0 .../ansible/plugins/inventory/aggregate.py | 0 .../ansible/plugins/inventory/directory.py | 0 {v2 => lib}/ansible/plugins/inventory/ini.py | 0 .../ansible/plugins/lookup/__init__.py | 0 .../ansible/plugins/lookup/cartesian.py | 0 {v2 => lib}/ansible/plugins/lookup/csvfile.py | 0 {v2 => lib}/ansible/plugins/lookup/dict.py | 0 {v2 => lib}/ansible/plugins/lookup/dnstxt.py | 0 {v2 => lib}/ansible/plugins/lookup/env.py | 0 {v2 => lib}/ansible/plugins/lookup/etcd.py | 0 {v2 => lib}/ansible/plugins/lookup/file.py | 0 .../ansible/plugins/lookup/fileglob.py | 0 .../ansible/plugins/lookup/first_found.py | 0 .../ansible/plugins/lookup/flattened.py | 0 .../ansible/plugins/lookup/indexed_items.py | 0 .../plugins/lookup/inventory_hostnames.py | 0 {v2 => lib}/ansible/plugins/lookup/items.py | 0 {v2 => lib}/ansible/plugins/lookup/lines.py | 0 {v2 => lib}/ansible/plugins/lookup/nested.py | 0 .../ansible/plugins/lookup/password.py | 0 {v2 => lib}/ansible/plugins/lookup/pipe.py | 0 .../ansible/plugins/lookup/random_choice.py | 0 .../ansible/plugins/lookup/redis_kv.py | 0 .../ansible/plugins/lookup/sequence.py | 0 .../ansible/plugins/lookup/subelements.py | 0 .../ansible/plugins/lookup/template.py | 0 .../ansible/plugins/lookup/together.py | 0 {v2 => lib}/ansible/plugins/lookup/url.py | 0 {v2 => lib}/ansible/plugins/shell/__init__.py | 0 {v2 => lib}/ansible/plugins/shell/csh.py | 0 {v2 => lib}/ansible/plugins/shell/fish.py | 0 .../ansible/plugins/shell/powershell.py | 0 {v2 => lib}/ansible/plugins/shell/sh.py | 0 .../ansible/plugins/strategies/__init__.py | 0 .../ansible/plugins/strategies/free.py | 0 .../ansible/plugins/strategies/linear.py | 0 {v2 => lib}/ansible/plugins/vars/__init__.py | 0 {v2 => lib}/ansible/template/__init__.py | 0 {v2 => lib}/ansible/template/safe_eval.py | 0 {v2 => lib}/ansible/template/template.py | 0 {v2 => lib}/ansible/template/vars.py | 0 {v2 => lib/ansible}/test-requirements.txt | 0 lib/ansible/utils/__init__.py | 1646 +--------------- {v2 => lib}/ansible/utils/boolean.py | 0 {v2 => lib}/ansible/utils/color.py | 0 {v2 => lib}/ansible/utils/debug.py | 0 {v2 => lib}/ansible/utils/display.py | 0 {v2 => lib}/ansible/utils/encrypt.py | 0 lib/ansible/utils/hashing.py | 7 +- {v2 => lib}/ansible/utils/listify.py | 0 lib/ansible/utils/module_docs.py | 4 +- .../ansible/utils/module_docs_fragments | 0 {v2 => lib}/ansible/utils/path.py | 0 lib/ansible/utils/unicode.py | 37 +- {v2 => lib}/ansible/utils/vars.py | 0 lib/ansible/utils/vault.py | 593 +----- {v2 => lib}/ansible/vars/__init__.py | 0 {v2 => lib}/ansible/vars/hostvars.py | 0 {v2/samples => samples}/README.md | 0 {v2/samples => samples}/common_include.yml | 0 {v2/samples => samples}/hosts | 0 {v2/samples => samples}/ignore_errors.yml | 0 {v2/samples => samples}/include.yml | 0 {v2/samples => samples}/inv_lg | 0 {v2/samples => samples}/inv_md | 0 {v2/samples => samples}/inv_sm | 0 {v2/samples => samples}/l1_include.yml | 0 {v2/samples => samples}/l2_include.yml | 0 {v2/samples => samples}/l3_include.yml | 0 {v2/samples => samples}/localhost_include.yml | 0 {v2/samples => samples}/localhosts | 0 {v2/samples => samples}/lookup_file.yml | 0 {v2/samples => samples}/lookup_password.yml | 0 {v2/samples => samples}/lookup_pipe.py | 0 {v2/samples => samples}/lookup_template.yml | 0 {v2/samples => samples}/multi.py | 0 {v2/samples => samples}/multi_queues.py | 0 .../roles/common/meta/main.yml | 0 .../roles/common/tasks/main.yml | 0 .../roles/role_a/meta/main.yml | 0 .../roles/role_a/tasks/main.yml | 0 .../roles/role_b/meta/main.yml | 0 .../roles/role_b/tasks/main.yml | 0 .../roles/test_become_r1/meta/main.yml | 0 .../roles/test_become_r1/tasks/main.yml | 0 .../roles/test_become_r2/meta/main.yml | 0 .../roles/test_become_r2/tasks/main.yml | 0 .../roles/test_role/meta/main.yml | 0 .../roles/test_role/tasks/main.yml | 0 .../roles/test_role_dep/tasks/main.yml | 0 {v2/samples => samples}/src | 0 {v2/samples => samples}/template.j2 | 0 {v2/samples => samples}/test_become.yml | 0 {v2/samples => samples}/test_big_debug.yml | 0 {v2/samples => samples}/test_big_ping.yml | 0 {v2/samples => samples}/test_block.yml | 0 .../test_blocks_of_blocks.yml | 0 {v2/samples => samples}/test_fact_gather.yml | 0 {v2/samples => samples}/test_free.yml | 0 {v2/samples => samples}/test_include.yml | 0 {v2/samples => samples}/test_pb.yml | 0 {v2/samples => samples}/test_role.yml | 0 .../test_roles_complex.yml | 0 {v2/samples => samples}/test_run_once.yml | 0 {v2/samples => samples}/test_sudo.yml | 0 {v2/samples => samples}/test_tags.yml | 0 .../testing/extra_vars.yml | 0 {v2/samples => samples}/testing/frag1 | 0 {v2/samples => samples}/testing/frag2 | 0 {v2/samples => samples}/testing/frag3 | 0 {v2/samples => samples}/testing/vars.yml | 0 {v2/samples => samples}/with_dict.yml | 0 {v2/samples => samples}/with_env.yml | 0 {v2/samples => samples}/with_fileglob.yml | 0 {v2/samples => samples}/with_first_found.yml | 0 {v2/samples => samples}/with_flattened.yml | 0 .../with_indexed_items.yml | 0 {v2/samples => samples}/with_items.yml | 0 {v2/samples => samples}/with_lines.yml | 0 {v2/samples => samples}/with_nested.yml | 0 .../with_random_choice.yml | 0 {v2/samples => samples}/with_sequence.yml | 0 {v2/samples => samples}/with_subelements.yml | 0 {v2/samples => samples}/with_together.yml | 0 {v2/test => test/units}/__init__.py | 0 {v2/test => test/units}/errors/__init__.py | 0 {v2/test => test/units}/errors/test_errors.py | 0 {v2/test => test/units}/executor/__init__.py | 0 .../units}/executor/test_play_iterator.py | 0 .../modules => test/units/mock}/__init__.py | 0 {v2/test => test/units}/mock/loader.py | 0 {v2/test => test/units}/parsing/__init__.py | 0 .../units}/parsing/test_data_loader.py | 0 .../units}/parsing/test_mod_args.py | 0 .../units}/parsing/test_splitter.py | 0 .../units}/parsing/vault/__init__.py | 0 .../units}/parsing/vault/test_vault.py | 0 .../units}/parsing/vault/test_vault_editor.py | 0 .../units/parsing/yaml}/__init__.py | 0 .../units}/parsing/yaml/test_loader.py | 0 {v2/test => test/units}/playbook/__init__.py | 0 .../units}/playbook/test_block.py | 0 {v2/test => test/units}/playbook/test_play.py | 0 .../units}/playbook/test_playbook.py | 0 {v2/test => test/units}/playbook/test_role.py | 0 {v2/test => test/units}/playbook/test_task.py | 0 {v2/test => test/units}/plugins/__init__.py | 0 {v2/test => test/units}/plugins/test_cache.py | 0 .../units}/plugins/test_connection.py | 0 .../units}/plugins/test_plugins.py | 0 {v2/test => test/units}/vars/__init__.py | 0 .../units}/vars/test_variable_manager.py | 0 {v2/ansible/utils => v1/ansible}/__init__.py | 6 +- {lib => v1}/ansible/cache/__init__.py | 0 {lib => v1}/ansible/cache/base.py | 0 {lib => v1}/ansible/cache/jsonfile.py | 0 {lib => v1}/ansible/cache/memcached.py | 0 {lib => v1}/ansible/cache/memory.py | 0 {lib => v1}/ansible/cache/redis.py | 0 .../ansible/callback_plugins}/__init__.py | 0 {lib => v1}/ansible/callback_plugins/noop.py | 0 {lib => v1}/ansible/callbacks.py | 0 {lib => v1}/ansible/color.py | 0 {v2 => v1}/ansible/constants.py | 47 +- {lib => v1}/ansible/errors.py | 0 {v2 => v1}/ansible/inventory/__init__.py | 96 +- {v2 => v1}/ansible/inventory/dir.py | 31 +- {v2 => v1}/ansible/inventory/expand_hosts.py | 3 - {v2 => v1}/ansible/inventory/group.py | 54 +- v1/ansible/inventory/host.py | 67 + {v2 => v1}/ansible/inventory/ini.py | 58 +- {v2 => v1}/ansible/inventory/script.py | 36 +- .../inventory/vars_plugins}/__init__.py | 0 .../ansible/inventory/vars_plugins/noop.py | 2 - {lib => v1}/ansible/module_common.py | 0 {v2 => v1}/ansible/module_utils/__init__.py | 0 {v2 => v1}/ansible/module_utils/a10.py | 0 {v2 => v1}/ansible/module_utils/basic.py | 68 +- {v2 => v1}/ansible/module_utils/cloudstack.py | 0 {v2 => v1}/ansible/module_utils/database.py | 0 {v2 => v1}/ansible/module_utils/ec2.py | 0 {v2 => v1}/ansible/module_utils/facts.py | 0 {v2 => v1}/ansible/module_utils/gce.py | 0 .../ansible/module_utils/known_hosts.py | 0 {v2 => v1}/ansible/module_utils/openstack.py | 0 .../ansible/module_utils/powershell.ps1 | 6 +- {v2 => v1}/ansible/module_utils/rax.py | 0 {v2 => v1}/ansible/module_utils/redhat.py | 0 {v2 => v1}/ansible/module_utils/splitter.py | 0 {v2 => v1}/ansible/module_utils/urls.py | 0 {lib => v1}/ansible/module_utils/vmware.py | 0 .../ansible/modules}/__init__.py | 0 v1/ansible/playbook/__init__.py | 874 +++++++++ v1/ansible/playbook/play.py | 949 ++++++++++ v1/ansible/playbook/task.py | 346 ++++ {lib => v1}/ansible/runner/__init__.py | 0 .../runner/action_plugins}/__init__.py | 0 .../ansible/runner/action_plugins/add_host.py | 0 .../ansible/runner/action_plugins/assemble.py | 0 .../ansible/runner/action_plugins/assert.py | 0 .../ansible/runner/action_plugins/async.py | 0 .../ansible/runner/action_plugins/copy.py | 0 .../ansible/runner/action_plugins/debug.py | 0 .../ansible/runner/action_plugins/fail.py | 0 .../ansible/runner/action_plugins/fetch.py | 0 .../ansible/runner/action_plugins/group_by.py | 0 .../runner/action_plugins/include_vars.py | 0 .../ansible/runner/action_plugins/normal.py | 0 .../ansible/runner/action_plugins/patch.py | 0 .../ansible/runner/action_plugins/pause.py | 0 .../ansible/runner/action_plugins/raw.py | 0 .../ansible/runner/action_plugins/script.py | 0 .../ansible/runner/action_plugins/set_fact.py | 0 .../runner/action_plugins/synchronize.py | 0 .../ansible/runner/action_plugins/template.py | 0 .../runner/action_plugins/unarchive.py | 0 .../ansible/runner/action_plugins/win_copy.py | 0 .../runner/action_plugins/win_template.py | 0 {lib => v1}/ansible/runner/connection.py | 0 .../runner/connection_plugins}/__init__.py | 0 .../runner/connection_plugins/accelerate.py | 0 .../runner/connection_plugins/chroot.py | 0 .../runner/connection_plugins/fireball.py | 0 .../runner/connection_plugins/funcd.py | 0 .../ansible/runner/connection_plugins/jail.py | 0 .../runner/connection_plugins/libvirt_lxc.py | 0 .../runner/connection_plugins/local.py | 0 .../runner/connection_plugins/paramiko_ssh.py | 0 .../ansible/runner/connection_plugins/ssh.py | 0 .../runner/connection_plugins/winrm.py | 0 .../ansible/runner/connection_plugins/zone.py | 0 .../runner/filter_plugins}/__init__.py | 0 .../ansible/runner/filter_plugins/core.py | 0 .../ansible/runner/filter_plugins/ipaddr.py | 0 .../runner/filter_plugins/mathstuff.py | 0 .../runner/lookup_plugins}/__init__.py | 0 .../runner/lookup_plugins/cartesian.py | 0 .../runner/lookup_plugins/consul_kv.py | 0 .../ansible/runner/lookup_plugins/csvfile.py | 0 .../ansible/runner/lookup_plugins/dict.py | 0 .../ansible/runner/lookup_plugins/dig.py | 0 .../ansible/runner/lookup_plugins/dnstxt.py | 0 .../ansible/runner/lookup_plugins/env.py | 0 .../ansible/runner/lookup_plugins/etcd.py | 0 .../ansible/runner/lookup_plugins/file.py | 0 .../ansible/runner/lookup_plugins/fileglob.py | 0 .../runner/lookup_plugins/first_found.py | 0 .../runner/lookup_plugins/flattened.py | 0 .../runner/lookup_plugins/indexed_items.py | 0 .../lookup_plugins/inventory_hostnames.py | 0 .../ansible/runner/lookup_plugins/items.py | 0 .../ansible/runner/lookup_plugins/lines.py | 0 .../ansible/runner/lookup_plugins/nested.py | 0 .../ansible/runner/lookup_plugins/password.py | 0 .../ansible/runner/lookup_plugins/pipe.py | 0 .../runner/lookup_plugins/random_choice.py | 0 .../ansible/runner/lookup_plugins/redis_kv.py | 0 .../ansible/runner/lookup_plugins/sequence.py | 0 .../runner/lookup_plugins/subelements.py | 0 .../ansible/runner/lookup_plugins/template.py | 0 .../ansible/runner/lookup_plugins/together.py | 0 .../ansible/runner/lookup_plugins/url.py | 0 {lib => v1}/ansible/runner/poller.py | 0 {lib => v1}/ansible/runner/return_data.py | 0 .../ansible/runner/shell_plugins}/__init__.py | 0 .../ansible/runner/shell_plugins/csh.py | 0 .../ansible/runner/shell_plugins/fish.py | 0 .../runner/shell_plugins/powershell.py | 0 .../ansible/runner/shell_plugins/sh.py | 0 v1/ansible/utils/__init__.py | 1660 +++++++++++++++++ {lib => v1}/ansible/utils/cmd_functions.py | 0 .../ansible/utils/display_functions.py | 0 {v2 => v1}/ansible/utils/hashing.py | 7 +- {v2 => v1}/ansible/utils/module_docs.py | 4 +- .../utils/module_docs_fragments/__init__.py | 0 .../utils/module_docs_fragments/aws.py | 0 .../utils/module_docs_fragments/cloudstack.py | 0 .../utils/module_docs_fragments/files.py | 0 .../utils/module_docs_fragments/openstack.py | 0 .../utils/module_docs_fragments/rackspace.py | 0 {lib => v1}/ansible/utils/plugins.py | 0 {lib => v1}/ansible/utils/string_functions.py | 0 {lib => v1}/ansible/utils/su_prompts.py | 0 {lib => v1}/ansible/utils/template.py | 0 {v2 => v1}/ansible/utils/unicode.py | 37 +- v1/ansible/utils/vault.py | 585 ++++++ v1/bin/ansible | 207 ++ v1/bin/ansible-doc | 337 ++++ v1/bin/ansible-galaxy | 957 ++++++++++ v1/bin/ansible-playbook | 330 ++++ v1/bin/ansible-pull | 257 +++ v1/bin/ansible-vault | 241 +++ {test/units => v1/tests}/README.md | 0 {test/units => v1/tests}/TestConstants.py | 0 {test/units => v1/tests}/TestFilters.py | 0 {test/units => v1/tests}/TestInventory.py | 0 .../tests}/TestModuleUtilsBasic.py | 0 .../tests}/TestModuleUtilsDatabase.py | 0 {test/units => v1/tests}/TestModules.py | 0 {test/units => v1/tests}/TestPlayVarsFiles.py | 0 {test/units => v1/tests}/TestSynchronize.py | 0 {test/units => v1/tests}/TestUtils.py | 0 .../tests}/TestUtilsStringFunctions.py | 0 {test/units => v1/tests}/TestVault.py | 0 {test/units => v1/tests}/TestVaultEditor.py | 0 {test/units => v1/tests}/ansible.cfg | 0 .../tests}/inventory_test_data/ansible_hosts | 0 .../tests}/inventory_test_data/broken.yml | 0 .../inventory_test_data/common_vars.yml | 0 .../tests}/inventory_test_data/complex_hosts | 0 .../tests}/inventory_test_data/encrypted.yml | 0 .../tests}/inventory_test_data/hosts_list.yml | 0 .../inventory/test_alpha_end_before_beg | 0 .../inventory/test_combined_range | 0 .../inventory/test_incorrect_format | 0 .../inventory/test_incorrect_range | 0 .../inventory/test_leading_range | 0 .../inventory/test_missing_end | 0 .../inventory_test_data/inventory_api.py | 0 .../inventory_test_data/inventory_dir/0hosts | 0 .../inventory_dir/1mythology | 0 .../inventory_test_data/inventory_dir/2levels | 0 .../inventory_dir/3comments | 0 .../inventory_dir/4skip_extensions.ini | 0 .../tests}/inventory_test_data/large_range | 0 .../inventory_test_data/restrict_pattern | 0 .../tests}/inventory_test_data/simple_hosts | 0 .../tests}/module_tests/TestApt.py | 0 .../tests}/module_tests/TestDocker.py | 0 .../vault_test_data/foo-ansible-1.0.yml | 0 ...oo-ansible-1.1-ansible-newline-ansible.yml | 0 .../vault_test_data/foo-ansible-1.1.yml | 0 v2/README-tests.md | 33 - v2/ansible/__init__.py | 22 - v2/ansible/inventory/host.py | 130 -- v2/ansible/modules/core | 1 - v2/ansible/modules/extras | 1 - v2/ansible/playbook/__init__.py | 85 - v2/ansible/playbook/play.py | 263 --- v2/ansible/playbook/task.py | 310 --- v2/ansible/utils/vault.py | 56 - v2/bin/ansible | 79 - v2/bin/ansible-doc | 1 - v2/bin/ansible-galaxy | 1 - v2/bin/ansible-playbook | 1 - v2/bin/ansible-pull | 1 - v2/bin/ansible-vault | 1 - v2/hacking/README.md | 48 - v2/hacking/authors.sh | 14 - v2/hacking/env-setup | 78 - v2/hacking/env-setup.fish | 57 - v2/hacking/get_library.py | 29 - v2/hacking/module_formatter.py | 442 ----- v2/hacking/templates/rst.j2 | 153 -- v2/hacking/test-module | 192 -- v2/scripts/ansible | 20 - v2/setup.py | 36 - v2/test/mock/__init__.py | 20 - 486 files changed, 7996 insertions(+), 9118 deletions(-) mode change 100755 => 120000 bin/ansible-doc mode change 100755 => 120000 bin/ansible-galaxy mode change 100755 => 120000 bin/ansible-playbook mode change 100755 => 120000 bin/ansible-pull mode change 100755 => 120000 bin/ansible-vault rename {v2 => lib}/ansible/cli/__init__.py (100%) rename {v2 => lib}/ansible/cli/adhoc.py (100%) rename {v2 => lib}/ansible/cli/doc.py (100%) rename {v2 => lib}/ansible/cli/galaxy.py (100%) rename {v2 => lib}/ansible/cli/playbook.py (100%) rename {v2 => lib}/ansible/cli/pull.py (100%) rename {v2 => lib}/ansible/cli/vault.py (100%) rename {v2 => lib}/ansible/compat/__init__.py (100%) rename {v2 => lib}/ansible/compat/tests/__init__.py (100%) rename {v2 => lib}/ansible/compat/tests/mock.py (100%) rename {v2 => lib}/ansible/compat/tests/unittest.py (100%) rename {v2 => lib}/ansible/config/__init__.py (100%) rename {v2 => lib}/ansible/errors/__init__.py (100%) rename {v2 => lib}/ansible/errors/yaml_strings.py (100%) rename {v2 => lib}/ansible/executor/__init__.py (100%) rename {v2 => lib}/ansible/executor/connection_info.py (100%) rename {v2 => lib}/ansible/executor/module_common.py (100%) rename {v2 => lib}/ansible/executor/play_iterator.py (100%) rename {v2 => lib}/ansible/executor/playbook_executor.py (100%) rename {v2 => lib}/ansible/executor/process/__init__.py (100%) rename {v2 => lib}/ansible/executor/process/result.py (100%) rename {v2 => lib}/ansible/executor/process/worker.py (100%) rename {v2 => lib}/ansible/executor/stats.py (100%) rename {v2 => lib}/ansible/executor/task_executor.py (100%) rename {v2 => lib}/ansible/executor/task_queue_manager.py (100%) rename {v2 => lib}/ansible/executor/task_queue_manager.py: (100%) rename {v2 => lib}/ansible/executor/task_result.py (100%) rename {v2 => lib}/ansible/galaxy/__init__.py (100%) rename {v2 => lib}/ansible/galaxy/api.py (100%) rename {v2 => lib}/ansible/galaxy/data/metadata_template.j2 (100%) rename {v2 => lib}/ansible/galaxy/data/readme (100%) rename {v2 => lib}/ansible/galaxy/role.py (100%) delete mode 160000 lib/ansible/modules/core delete mode 160000 lib/ansible/modules/extras rename {v2 => lib}/ansible/new_inventory/__init__.py (100%) rename {v2 => lib}/ansible/new_inventory/group.py (100%) rename {v2 => lib}/ansible/new_inventory/host.py (100%) rename {v2 => lib}/ansible/parsing/__init__.py (100%) rename {v2 => lib}/ansible/parsing/mod_args.py (100%) rename {v2 => lib}/ansible/parsing/splitter.py (100%) rename {v2 => lib}/ansible/parsing/utils/__init__.py (100%) rename {v2 => lib}/ansible/parsing/utils/jsonify.py (100%) rename {v2 => lib}/ansible/parsing/vault/__init__.py (100%) rename {v2 => lib}/ansible/parsing/yaml/__init__.py (100%) rename {v2 => lib}/ansible/parsing/yaml/constructor.py (100%) rename {v2 => lib}/ansible/parsing/yaml/loader.py (100%) rename {v2 => lib}/ansible/parsing/yaml/objects.py (100%) rename {v2 => lib}/ansible/playbook/attribute.py (100%) rename {v2 => lib}/ansible/playbook/base.py (100%) rename {v2 => lib}/ansible/playbook/become.py (100%) rename {v2 => lib}/ansible/playbook/block.py (100%) rename {v2 => lib}/ansible/playbook/conditional.py (100%) rename {v2 => lib}/ansible/playbook/handler.py (100%) rename {v2 => lib}/ansible/playbook/helpers.py (100%) rename {v2 => lib}/ansible/playbook/playbook_include.py (100%) rename {v2 => lib}/ansible/playbook/role/__init__.py (100%) rename {v2 => lib}/ansible/playbook/role/definition.py (100%) rename {v2 => lib}/ansible/playbook/role/include.py (100%) rename {v2 => lib}/ansible/playbook/role/metadata.py (100%) rename {v2 => lib}/ansible/playbook/role/requirement.py (100%) rename {v2 => lib}/ansible/playbook/taggable.py (100%) rename {v2 => lib}/ansible/playbook/vars.py (100%) rename {v2 => lib}/ansible/playbook/vars_file.py (100%) rename {v2 => lib}/ansible/plugins/__init__.py (100%) rename {v2 => lib}/ansible/plugins/action/__init__.py (100%) rename {v2 => lib}/ansible/plugins/action/add_host.py (100%) rename {v2 => lib}/ansible/plugins/action/assemble.py (100%) rename {v2 => lib}/ansible/plugins/action/assert.py (100%) rename {v2 => lib}/ansible/plugins/action/async.py (100%) rename {v2 => lib}/ansible/plugins/action/copy.py (100%) rename {v2 => lib}/ansible/plugins/action/debug.py (100%) rename {v2 => lib}/ansible/plugins/action/fail.py (100%) rename {v2 => lib}/ansible/plugins/action/fetch.py (100%) rename {v2 => lib}/ansible/plugins/action/group_by.py (100%) rename {v2 => lib}/ansible/plugins/action/include_vars.py (100%) rename {v2 => lib}/ansible/plugins/action/normal.py (100%) rename {v2 => lib}/ansible/plugins/action/patch.py (100%) rename {v2 => lib}/ansible/plugins/action/pause.py (100%) rename {v2 => lib}/ansible/plugins/action/raw.py (100%) rename {v2 => lib}/ansible/plugins/action/script.py (100%) rename {v2 => lib}/ansible/plugins/action/set_fact.py (100%) rename {v2 => lib}/ansible/plugins/action/synchronize.py (100%) rename {v2 => lib}/ansible/plugins/action/template.py (100%) rename {v2 => lib}/ansible/plugins/action/unarchive.py (100%) rename {v2 => lib}/ansible/plugins/cache/__init__.py (100%) rename {v2 => lib}/ansible/plugins/cache/base.py (100%) rename {v2 => lib}/ansible/plugins/cache/memcached.py (100%) rename {v2 => lib}/ansible/plugins/cache/memory.py (100%) rename {v2 => lib}/ansible/plugins/cache/redis.py (100%) rename {v2 => lib}/ansible/plugins/callback/__init__.py (100%) rename {v2 => lib}/ansible/plugins/callback/default.py (100%) rename {v2 => lib}/ansible/plugins/callback/minimal.py (100%) rename {v2 => lib}/ansible/plugins/connections/__init__.py (100%) rename {v2 => lib}/ansible/plugins/connections/accelerate.py (100%) rename {v2 => lib}/ansible/plugins/connections/chroot.py (100%) rename {v2 => lib}/ansible/plugins/connections/funcd.py (100%) rename {v2 => lib}/ansible/plugins/connections/jail.py (100%) rename {v2 => lib}/ansible/plugins/connections/libvirt_lxc.py (100%) rename {v2 => lib}/ansible/plugins/connections/local.py (100%) rename {v2 => lib}/ansible/plugins/connections/paramiko_ssh.py (100%) rename {v2 => lib}/ansible/plugins/connections/ssh.py (100%) rename {v2 => lib}/ansible/plugins/connections/winrm.py (100%) rename {v2 => lib}/ansible/plugins/connections/zone.py (100%) rename {v2 => lib}/ansible/plugins/filter (100%) rename {v2 => lib}/ansible/plugins/inventory/__init__.py (100%) rename {v2 => lib}/ansible/plugins/inventory/aggregate.py (100%) rename {v2 => lib}/ansible/plugins/inventory/directory.py (100%) rename {v2 => lib}/ansible/plugins/inventory/ini.py (100%) rename {v2 => lib}/ansible/plugins/lookup/__init__.py (100%) rename {v2 => lib}/ansible/plugins/lookup/cartesian.py (100%) rename {v2 => lib}/ansible/plugins/lookup/csvfile.py (100%) rename {v2 => lib}/ansible/plugins/lookup/dict.py (100%) rename {v2 => lib}/ansible/plugins/lookup/dnstxt.py (100%) rename {v2 => lib}/ansible/plugins/lookup/env.py (100%) rename {v2 => lib}/ansible/plugins/lookup/etcd.py (100%) rename {v2 => lib}/ansible/plugins/lookup/file.py (100%) rename {v2 => lib}/ansible/plugins/lookup/fileglob.py (100%) rename {v2 => lib}/ansible/plugins/lookup/first_found.py (100%) rename {v2 => lib}/ansible/plugins/lookup/flattened.py (100%) rename {v2 => lib}/ansible/plugins/lookup/indexed_items.py (100%) rename {v2 => lib}/ansible/plugins/lookup/inventory_hostnames.py (100%) rename {v2 => lib}/ansible/plugins/lookup/items.py (100%) rename {v2 => lib}/ansible/plugins/lookup/lines.py (100%) rename {v2 => lib}/ansible/plugins/lookup/nested.py (100%) rename {v2 => lib}/ansible/plugins/lookup/password.py (100%) rename {v2 => lib}/ansible/plugins/lookup/pipe.py (100%) rename {v2 => lib}/ansible/plugins/lookup/random_choice.py (100%) rename {v2 => lib}/ansible/plugins/lookup/redis_kv.py (100%) rename {v2 => lib}/ansible/plugins/lookup/sequence.py (100%) rename {v2 => lib}/ansible/plugins/lookup/subelements.py (100%) rename {v2 => lib}/ansible/plugins/lookup/template.py (100%) rename {v2 => lib}/ansible/plugins/lookup/together.py (100%) rename {v2 => lib}/ansible/plugins/lookup/url.py (100%) rename {v2 => lib}/ansible/plugins/shell/__init__.py (100%) rename {v2 => lib}/ansible/plugins/shell/csh.py (100%) rename {v2 => lib}/ansible/plugins/shell/fish.py (100%) rename {v2 => lib}/ansible/plugins/shell/powershell.py (100%) rename {v2 => lib}/ansible/plugins/shell/sh.py (100%) rename {v2 => lib}/ansible/plugins/strategies/__init__.py (100%) rename {v2 => lib}/ansible/plugins/strategies/free.py (100%) rename {v2 => lib}/ansible/plugins/strategies/linear.py (100%) rename {v2 => lib}/ansible/plugins/vars/__init__.py (100%) rename {v2 => lib}/ansible/template/__init__.py (100%) rename {v2 => lib}/ansible/template/safe_eval.py (100%) rename {v2 => lib}/ansible/template/template.py (100%) rename {v2 => lib}/ansible/template/vars.py (100%) rename {v2 => lib/ansible}/test-requirements.txt (100%) rename {v2 => lib}/ansible/utils/boolean.py (100%) rename {v2 => lib}/ansible/utils/color.py (100%) rename {v2 => lib}/ansible/utils/debug.py (100%) rename {v2 => lib}/ansible/utils/display.py (100%) rename {v2 => lib}/ansible/utils/encrypt.py (100%) rename {v2 => lib}/ansible/utils/listify.py (100%) rename {v2 => lib}/ansible/utils/module_docs_fragments (100%) rename {v2 => lib}/ansible/utils/path.py (100%) rename {v2 => lib}/ansible/utils/vars.py (100%) rename {v2 => lib}/ansible/vars/__init__.py (100%) rename {v2 => lib}/ansible/vars/hostvars.py (100%) rename {v2/samples => samples}/README.md (100%) rename {v2/samples => samples}/common_include.yml (100%) rename {v2/samples => samples}/hosts (100%) rename {v2/samples => samples}/ignore_errors.yml (100%) rename {v2/samples => samples}/include.yml (100%) rename {v2/samples => samples}/inv_lg (100%) rename {v2/samples => samples}/inv_md (100%) rename {v2/samples => samples}/inv_sm (100%) rename {v2/samples => samples}/l1_include.yml (100%) rename {v2/samples => samples}/l2_include.yml (100%) rename {v2/samples => samples}/l3_include.yml (100%) rename {v2/samples => samples}/localhost_include.yml (100%) rename {v2/samples => samples}/localhosts (100%) rename {v2/samples => samples}/lookup_file.yml (100%) rename {v2/samples => samples}/lookup_password.yml (100%) rename {v2/samples => samples}/lookup_pipe.py (100%) rename {v2/samples => samples}/lookup_template.yml (100%) rename {v2/samples => samples}/multi.py (100%) rename {v2/samples => samples}/multi_queues.py (100%) rename {v2/samples => samples}/roles/common/meta/main.yml (100%) rename {v2/samples => samples}/roles/common/tasks/main.yml (100%) rename {v2/samples => samples}/roles/role_a/meta/main.yml (100%) rename {v2/samples => samples}/roles/role_a/tasks/main.yml (100%) rename {v2/samples => samples}/roles/role_b/meta/main.yml (100%) rename {v2/samples => samples}/roles/role_b/tasks/main.yml (100%) rename {v2/samples => samples}/roles/test_become_r1/meta/main.yml (100%) rename {v2/samples => samples}/roles/test_become_r1/tasks/main.yml (100%) rename {v2/samples => samples}/roles/test_become_r2/meta/main.yml (100%) rename {v2/samples => samples}/roles/test_become_r2/tasks/main.yml (100%) rename {v2/samples => samples}/roles/test_role/meta/main.yml (100%) rename {v2/samples => samples}/roles/test_role/tasks/main.yml (100%) rename {v2/samples => samples}/roles/test_role_dep/tasks/main.yml (100%) rename {v2/samples => samples}/src (100%) rename {v2/samples => samples}/template.j2 (100%) rename {v2/samples => samples}/test_become.yml (100%) rename {v2/samples => samples}/test_big_debug.yml (100%) rename {v2/samples => samples}/test_big_ping.yml (100%) rename {v2/samples => samples}/test_block.yml (100%) rename {v2/samples => samples}/test_blocks_of_blocks.yml (100%) rename {v2/samples => samples}/test_fact_gather.yml (100%) rename {v2/samples => samples}/test_free.yml (100%) rename {v2/samples => samples}/test_include.yml (100%) rename {v2/samples => samples}/test_pb.yml (100%) rename {v2/samples => samples}/test_role.yml (100%) rename {v2/samples => samples}/test_roles_complex.yml (100%) rename {v2/samples => samples}/test_run_once.yml (100%) rename {v2/samples => samples}/test_sudo.yml (100%) rename {v2/samples => samples}/test_tags.yml (100%) rename {v2/samples => samples}/testing/extra_vars.yml (100%) rename {v2/samples => samples}/testing/frag1 (100%) rename {v2/samples => samples}/testing/frag2 (100%) rename {v2/samples => samples}/testing/frag3 (100%) rename {v2/samples => samples}/testing/vars.yml (100%) rename {v2/samples => samples}/with_dict.yml (100%) rename {v2/samples => samples}/with_env.yml (100%) rename {v2/samples => samples}/with_fileglob.yml (100%) rename {v2/samples => samples}/with_first_found.yml (100%) rename {v2/samples => samples}/with_flattened.yml (100%) rename {v2/samples => samples}/with_indexed_items.yml (100%) rename {v2/samples => samples}/with_items.yml (100%) rename {v2/samples => samples}/with_lines.yml (100%) rename {v2/samples => samples}/with_nested.yml (100%) rename {v2/samples => samples}/with_random_choice.yml (100%) rename {v2/samples => samples}/with_sequence.yml (100%) rename {v2/samples => samples}/with_subelements.yml (100%) rename {v2/samples => samples}/with_together.yml (100%) rename {v2/test => test/units}/__init__.py (100%) rename {v2/test => test/units}/errors/__init__.py (100%) rename {v2/test => test/units}/errors/test_errors.py (100%) rename {v2/test => test/units}/executor/__init__.py (100%) rename {v2/test => test/units}/executor/test_play_iterator.py (100%) rename {v2/ansible/modules => test/units/mock}/__init__.py (100%) rename {v2/test => test/units}/mock/loader.py (100%) rename {v2/test => test/units}/parsing/__init__.py (100%) rename {v2/test => test/units}/parsing/test_data_loader.py (100%) rename {v2/test => test/units}/parsing/test_mod_args.py (100%) rename {v2/test => test/units}/parsing/test_splitter.py (100%) rename {v2/test => test/units}/parsing/vault/__init__.py (100%) rename {v2/test => test/units}/parsing/vault/test_vault.py (100%) rename {v2/test => test/units}/parsing/vault/test_vault_editor.py (100%) rename {lib/ansible/callback_plugins => test/units/parsing/yaml}/__init__.py (100%) rename {v2/test => test/units}/parsing/yaml/test_loader.py (100%) rename {v2/test => test/units}/playbook/__init__.py (100%) rename {v2/test => test/units}/playbook/test_block.py (100%) rename {v2/test => test/units}/playbook/test_play.py (100%) rename {v2/test => test/units}/playbook/test_playbook.py (100%) rename {v2/test => test/units}/playbook/test_role.py (100%) rename {v2/test => test/units}/playbook/test_task.py (100%) rename {v2/test => test/units}/plugins/__init__.py (100%) rename {v2/test => test/units}/plugins/test_cache.py (100%) rename {v2/test => test/units}/plugins/test_connection.py (100%) rename {v2/test => test/units}/plugins/test_plugins.py (100%) rename {v2/test => test/units}/vars/__init__.py (100%) rename {v2/test => test/units}/vars/test_variable_manager.py (100%) rename {v2/ansible/utils => v1/ansible}/__init__.py (85%) rename {lib => v1}/ansible/cache/__init__.py (100%) rename {lib => v1}/ansible/cache/base.py (100%) rename {lib => v1}/ansible/cache/jsonfile.py (100%) rename {lib => v1}/ansible/cache/memcached.py (100%) rename {lib => v1}/ansible/cache/memory.py (100%) rename {lib => v1}/ansible/cache/redis.py (100%) rename {lib/ansible/runner/action_plugins => v1/ansible/callback_plugins}/__init__.py (100%) rename {lib => v1}/ansible/callback_plugins/noop.py (100%) rename {lib => v1}/ansible/callbacks.py (100%) rename {lib => v1}/ansible/color.py (100%) rename {v2 => v1}/ansible/constants.py (89%) rename {lib => v1}/ansible/errors.py (100%) rename {v2 => v1}/ansible/inventory/__init__.py (88%) rename {v2 => v1}/ansible/inventory/dir.py (91%) rename {v2 => v1}/ansible/inventory/expand_hosts.py (97%) rename {v2 => v1}/ansible/inventory/group.py (69%) create mode 100644 v1/ansible/inventory/host.py rename {v2 => v1}/ansible/inventory/ini.py (82%) rename {v2 => v1}/ansible/inventory/script.py (82%) rename {lib/ansible/runner/connection_plugins => v1/ansible/inventory/vars_plugins}/__init__.py (100%) rename {v2 => v1}/ansible/inventory/vars_plugins/noop.py (94%) rename {lib => v1}/ansible/module_common.py (100%) rename {v2 => v1}/ansible/module_utils/__init__.py (100%) rename {v2 => v1}/ansible/module_utils/a10.py (100%) rename {v2 => v1}/ansible/module_utils/basic.py (97%) rename {v2 => v1}/ansible/module_utils/cloudstack.py (100%) rename {v2 => v1}/ansible/module_utils/database.py (100%) rename {v2 => v1}/ansible/module_utils/ec2.py (100%) rename {v2 => v1}/ansible/module_utils/facts.py (100%) rename {v2 => v1}/ansible/module_utils/gce.py (100%) rename {v2 => v1}/ansible/module_utils/known_hosts.py (100%) rename {v2 => v1}/ansible/module_utils/openstack.py (100%) rename {v2 => v1}/ansible/module_utils/powershell.ps1 (97%) rename {v2 => v1}/ansible/module_utils/rax.py (100%) rename {v2 => v1}/ansible/module_utils/redhat.py (100%) rename {v2 => v1}/ansible/module_utils/splitter.py (100%) rename {v2 => v1}/ansible/module_utils/urls.py (100%) rename {lib => v1}/ansible/module_utils/vmware.py (100%) rename {lib/ansible/runner/filter_plugins => v1/ansible/modules}/__init__.py (100%) create mode 100644 v1/ansible/playbook/__init__.py create mode 100644 v1/ansible/playbook/play.py create mode 100644 v1/ansible/playbook/task.py rename {lib => v1}/ansible/runner/__init__.py (100%) rename {lib/ansible/runner/lookup_plugins => v1/ansible/runner/action_plugins}/__init__.py (100%) rename {lib => v1}/ansible/runner/action_plugins/add_host.py (100%) rename {lib => v1}/ansible/runner/action_plugins/assemble.py (100%) rename {lib => v1}/ansible/runner/action_plugins/assert.py (100%) rename {lib => v1}/ansible/runner/action_plugins/async.py (100%) rename {lib => v1}/ansible/runner/action_plugins/copy.py (100%) rename {lib => v1}/ansible/runner/action_plugins/debug.py (100%) rename {lib => v1}/ansible/runner/action_plugins/fail.py (100%) rename {lib => v1}/ansible/runner/action_plugins/fetch.py (100%) rename {lib => v1}/ansible/runner/action_plugins/group_by.py (100%) rename {lib => v1}/ansible/runner/action_plugins/include_vars.py (100%) rename {lib => v1}/ansible/runner/action_plugins/normal.py (100%) rename {lib => v1}/ansible/runner/action_plugins/patch.py (100%) rename {lib => v1}/ansible/runner/action_plugins/pause.py (100%) rename {lib => v1}/ansible/runner/action_plugins/raw.py (100%) rename {lib => v1}/ansible/runner/action_plugins/script.py (100%) rename {lib => v1}/ansible/runner/action_plugins/set_fact.py (100%) rename {lib => v1}/ansible/runner/action_plugins/synchronize.py (100%) rename {lib => v1}/ansible/runner/action_plugins/template.py (100%) rename {lib => v1}/ansible/runner/action_plugins/unarchive.py (100%) rename {lib => v1}/ansible/runner/action_plugins/win_copy.py (100%) rename {lib => v1}/ansible/runner/action_plugins/win_template.py (100%) rename {lib => v1}/ansible/runner/connection.py (100%) rename {lib/ansible/runner/shell_plugins => v1/ansible/runner/connection_plugins}/__init__.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/accelerate.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/chroot.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/fireball.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/funcd.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/jail.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/libvirt_lxc.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/local.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/paramiko_ssh.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/ssh.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/winrm.py (100%) rename {lib => v1}/ansible/runner/connection_plugins/zone.py (100%) rename {lib/ansible/utils/module_docs_fragments => v1/ansible/runner/filter_plugins}/__init__.py (100%) rename {lib => v1}/ansible/runner/filter_plugins/core.py (100%) rename {lib => v1}/ansible/runner/filter_plugins/ipaddr.py (100%) rename {lib => v1}/ansible/runner/filter_plugins/mathstuff.py (100%) rename {v2/ansible/inventory/vars_plugins => v1/ansible/runner/lookup_plugins}/__init__.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/cartesian.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/consul_kv.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/csvfile.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/dict.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/dig.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/dnstxt.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/env.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/etcd.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/file.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/fileglob.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/first_found.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/flattened.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/indexed_items.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/inventory_hostnames.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/items.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/lines.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/nested.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/password.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/pipe.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/random_choice.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/redis_kv.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/sequence.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/subelements.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/template.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/together.py (100%) rename {lib => v1}/ansible/runner/lookup_plugins/url.py (100%) rename {lib => v1}/ansible/runner/poller.py (100%) rename {lib => v1}/ansible/runner/return_data.py (100%) rename {v2/test/parsing/yaml => v1/ansible/runner/shell_plugins}/__init__.py (100%) rename {lib => v1}/ansible/runner/shell_plugins/csh.py (100%) rename {lib => v1}/ansible/runner/shell_plugins/fish.py (100%) rename {lib => v1}/ansible/runner/shell_plugins/powershell.py (100%) rename {lib => v1}/ansible/runner/shell_plugins/sh.py (100%) create mode 100644 v1/ansible/utils/__init__.py rename {lib => v1}/ansible/utils/cmd_functions.py (100%) rename {lib => v1}/ansible/utils/display_functions.py (100%) rename {v2 => v1}/ansible/utils/hashing.py (92%) rename {v2 => v1}/ansible/utils/module_docs.py (96%) create mode 100644 v1/ansible/utils/module_docs_fragments/__init__.py rename {lib => v1}/ansible/utils/module_docs_fragments/aws.py (100%) rename {lib => v1}/ansible/utils/module_docs_fragments/cloudstack.py (100%) rename {lib => v1}/ansible/utils/module_docs_fragments/files.py (100%) rename {lib => v1}/ansible/utils/module_docs_fragments/openstack.py (100%) rename {lib => v1}/ansible/utils/module_docs_fragments/rackspace.py (100%) rename {lib => v1}/ansible/utils/plugins.py (100%) rename {lib => v1}/ansible/utils/string_functions.py (100%) rename {lib => v1}/ansible/utils/su_prompts.py (100%) rename {lib => v1}/ansible/utils/template.py (100%) rename {v2 => v1}/ansible/utils/unicode.py (93%) create mode 100644 v1/ansible/utils/vault.py create mode 100755 v1/bin/ansible create mode 100755 v1/bin/ansible-doc create mode 100755 v1/bin/ansible-galaxy create mode 100755 v1/bin/ansible-playbook create mode 100755 v1/bin/ansible-pull create mode 100755 v1/bin/ansible-vault rename {test/units => v1/tests}/README.md (100%) rename {test/units => v1/tests}/TestConstants.py (100%) rename {test/units => v1/tests}/TestFilters.py (100%) rename {test/units => v1/tests}/TestInventory.py (100%) rename {test/units => v1/tests}/TestModuleUtilsBasic.py (100%) rename {test/units => v1/tests}/TestModuleUtilsDatabase.py (100%) rename {test/units => v1/tests}/TestModules.py (100%) rename {test/units => v1/tests}/TestPlayVarsFiles.py (100%) rename {test/units => v1/tests}/TestSynchronize.py (100%) rename {test/units => v1/tests}/TestUtils.py (100%) rename {test/units => v1/tests}/TestUtilsStringFunctions.py (100%) rename {test/units => v1/tests}/TestVault.py (100%) rename {test/units => v1/tests}/TestVaultEditor.py (100%) rename {test/units => v1/tests}/ansible.cfg (100%) rename {test/units => v1/tests}/inventory_test_data/ansible_hosts (100%) rename {test/units => v1/tests}/inventory_test_data/broken.yml (100%) rename {test/units => v1/tests}/inventory_test_data/common_vars.yml (100%) rename {test/units => v1/tests}/inventory_test_data/complex_hosts (100%) rename {test/units => v1/tests}/inventory_test_data/encrypted.yml (100%) rename {test/units => v1/tests}/inventory_test_data/hosts_list.yml (100%) rename {test/units => v1/tests}/inventory_test_data/inventory/test_alpha_end_before_beg (100%) rename {test/units => v1/tests}/inventory_test_data/inventory/test_combined_range (100%) rename {test/units => v1/tests}/inventory_test_data/inventory/test_incorrect_format (100%) rename {test/units => v1/tests}/inventory_test_data/inventory/test_incorrect_range (100%) rename {test/units => v1/tests}/inventory_test_data/inventory/test_leading_range (100%) rename {test/units => v1/tests}/inventory_test_data/inventory/test_missing_end (100%) rename {test/units => v1/tests}/inventory_test_data/inventory_api.py (100%) rename {test/units => v1/tests}/inventory_test_data/inventory_dir/0hosts (100%) rename {test/units => v1/tests}/inventory_test_data/inventory_dir/1mythology (100%) rename {test/units => v1/tests}/inventory_test_data/inventory_dir/2levels (100%) rename {test/units => v1/tests}/inventory_test_data/inventory_dir/3comments (100%) rename {test/units => v1/tests}/inventory_test_data/inventory_dir/4skip_extensions.ini (100%) rename {test/units => v1/tests}/inventory_test_data/large_range (100%) rename {test/units => v1/tests}/inventory_test_data/restrict_pattern (100%) rename {test/units => v1/tests}/inventory_test_data/simple_hosts (100%) rename {test/units => v1/tests}/module_tests/TestApt.py (100%) rename {test/units => v1/tests}/module_tests/TestDocker.py (100%) rename {test/units => v1/tests}/vault_test_data/foo-ansible-1.0.yml (100%) rename {test/units => v1/tests}/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml (100%) rename {test/units => v1/tests}/vault_test_data/foo-ansible-1.1.yml (100%) delete mode 100644 v2/README-tests.md delete mode 100644 v2/ansible/__init__.py delete mode 100644 v2/ansible/inventory/host.py delete mode 160000 v2/ansible/modules/core delete mode 160000 v2/ansible/modules/extras delete mode 100644 v2/ansible/playbook/__init__.py delete mode 100644 v2/ansible/playbook/play.py delete mode 100644 v2/ansible/playbook/task.py delete mode 100644 v2/ansible/utils/vault.py delete mode 100755 v2/bin/ansible delete mode 120000 v2/bin/ansible-doc delete mode 120000 v2/bin/ansible-galaxy delete mode 120000 v2/bin/ansible-playbook delete mode 120000 v2/bin/ansible-pull delete mode 120000 v2/bin/ansible-vault delete mode 100644 v2/hacking/README.md delete mode 100755 v2/hacking/authors.sh delete mode 100644 v2/hacking/env-setup delete mode 100644 v2/hacking/env-setup.fish delete mode 100755 v2/hacking/get_library.py delete mode 100755 v2/hacking/module_formatter.py delete mode 100644 v2/hacking/templates/rst.j2 delete mode 100755 v2/hacking/test-module delete mode 100644 v2/scripts/ansible delete mode 100644 v2/setup.py delete mode 100644 v2/test/mock/__init__.py diff --git a/.gitmodules b/.gitmodules index 3f14953ec8f250..e69de29bb2d1d6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +0,0 @@ -[submodule "lib/ansible/modules/core"] - path = lib/ansible/modules/core - url = https://github.com/ansible/ansible-modules-core.git - branch = devel -[submodule "lib/ansible/modules/extras"] - path = lib/ansible/modules/extras - url = https://github.com/ansible/ansible-modules-extras.git - branch = devel -[submodule "v2/ansible/modules/core"] - path = v2/ansible/modules/core - url = https://github.com/ansible/ansible-modules-core.git - branch = devel -[submodule "v2/ansible/modules/extras"] - path = v2/ansible/modules/extras - url = https://github.com/ansible/ansible-modules-extras.git - branch = devel diff --git a/bin/ansible b/bin/ansible index 7fec34ec81e9c6..467dd505a2e17a 100755 --- a/bin/ansible +++ b/bin/ansible @@ -18,6 +18,8 @@ # along with Ansible. If not, see . ######################################################## +from __future__ import (absolute_import) +__metaclass__ = type __requires__ = ['ansible'] try: @@ -33,175 +35,45 @@ except Exception: import os import sys -from ansible.runner import Runner -import ansible.constants as C -from ansible import utils -from ansible import errors -from ansible import callbacks -from ansible import inventory -######################################################## - -class Cli(object): - ''' code behind bin/ansible ''' - - # ---------------------------------------------- - - def __init__(self): - self.stats = callbacks.AggregateStats() - self.callbacks = callbacks.CliRunnerCallbacks() - if C.DEFAULT_LOAD_CALLBACK_PLUGINS: - callbacks.load_callback_plugins() - - # ---------------------------------------------- - - def parse(self): - ''' create an options parser for bin/ansible ''' - - parser = utils.base_parser( - constants=C, - runas_opts=True, - subset_opts=True, - async_opts=True, - output_opts=True, - connect_opts=True, - check_opts=True, - diff_opts=False, - usage='%prog [options]' - ) - - parser.add_option('-a', '--args', dest='module_args', - help="module arguments", default=C.DEFAULT_MODULE_ARGS) - parser.add_option('-m', '--module-name', dest='module_name', - help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, - default=C.DEFAULT_MODULE_NAME) - - options, args = parser.parse_args() - self.callbacks.options = options - - if len(args) == 0 or len(args) > 1: - parser.print_help() - sys.exit(1) - - # privlege escalation command line arguments need to be mutually exclusive - utils.check_mutually_exclusive_privilege(options, parser) - - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - - return (options, args) - - # ---------------------------------------------- - - def run(self, options, args): - ''' use Runner lib to do SSH things ''' - - pattern = args[0] - - sshpass = becomepass = vault_pass = become_method = None - - # Never ask for an SSH password when we run with local connection - if options.connection == "local": - options.ask_pass = False - else: - options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - - options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - - # become - utils.normalize_become_options(options) - prompt_method = utils.choose_pass_prompt(options) - (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method) - - # read vault_pass from a file - if not options.ask_vault_pass and options.vault_password_file: - vault_pass = utils.read_vault_file(options.vault_password_file) - - extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) - - inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass) - if options.subset: - inventory_manager.subset(options.subset) - hosts = inventory_manager.list_hosts(pattern) - - if len(hosts) == 0: - callbacks.display("No hosts matched", stderr=True) - sys.exit(0) - - if options.listhosts: - for host in hosts: - callbacks.display(' %s' % host) - sys.exit(0) - - if options.module_name in ['command','shell'] and not options.module_args: - callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True) - sys.exit(1) - - if options.tree: - utils.prepare_writeable_dir(options.tree) - - runner = Runner( - module_name=options.module_name, - module_path=options.module_path, - module_args=options.module_args, - remote_user=options.remote_user, - remote_pass=sshpass, - inventory=inventory_manager, - timeout=options.timeout, - private_key_file=options.private_key_file, - forks=options.forks, - pattern=pattern, - callbacks=self.callbacks, - transport=options.connection, - subset=options.subset, - check=options.check, - diff=options.check, - vault_pass=vault_pass, - become=options.become, - become_method=options.become_method, - become_pass=becomepass, - become_user=options.become_user, - extra_vars=extra_vars, - ) - - if options.seconds: - callbacks.display("background launch...\n\n", color='cyan') - results, poller = runner.run_async(options.seconds) - results = self.poll_while_needed(poller, options) - else: - results = runner.run() - - return (runner, results) - - # ---------------------------------------------- - - def poll_while_needed(self, poller, options): - ''' summarize results from Runner ''' - - # BACKGROUND POLL LOGIC when -B and -P are specified - if options.seconds and options.poll_interval > 0: - poller.wait(options.seconds, options.poll_interval) - - return poller.results - +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.utils.display import Display ######################################################## if __name__ == '__main__': - callbacks.display("", log_only=True) - callbacks.display(" ".join(sys.argv), log_only=True) - callbacks.display("", log_only=True) - cli = Cli() - (options, args) = cli.parse() + cli = None + display = Display() + me = os.path.basename(__file__) + try: - (runner, results) = cli.run(options, args) - for result in results['contacted'].values(): - if 'failed' in result or result.get('rc', 0) != 0: - sys.exit(2) - if results['dark']: - sys.exit(3) - except errors.AnsibleError, e: - # Generic handler for ansible specific errors - callbacks.display("ERROR: %s" % str(e), stderr=True, color='red') - sys.exit(1) + if me == 'ansible-playbook': + from ansible.cli.playbook import PlaybookCLI as mycli + elif me == 'ansible': + from ansible.cli.adhoc import AdHocCLI as mycli + elif me == 'ansible-pull': + from ansible.cli.pull import PullCLI as mycli + elif me == 'ansible-doc': + from ansible.cli.doc import DocCLI as mycli + elif me == 'ansible-vault': + from ansible.cli.vault import VaultCLI as mycli + elif me == 'ansible-galaxy': + from ansible.cli.galaxy import GalaxyCLI as mycli + + cli = mycli(sys.argv, display=display) + if cli: + cli.parse() + sys.exit(cli.run()) + else: + raise AnsibleError("Program not implemented: %s" % me) + except AnsibleOptionsError as e: + cli.parser.print_help() + display.display(str(e), stderr=True, color='red') + sys.exit(1) + except AnsibleError as e: + display.display(str(e), stderr=True, color='red') + sys.exit(2) + except KeyboardInterrupt: + display.error("interrupted") + sys.exit(4) diff --git a/bin/ansible-doc b/bin/ansible-doc deleted file mode 100755 index dff7cecce7903a..00000000000000 --- a/bin/ansible-doc +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import os -import sys -import textwrap -import re -import optparse -import datetime -import subprocess -import fcntl -import termios -import struct - -from ansible import utils -from ansible.utils import module_docs -import ansible.constants as C -from ansible.utils import version -import traceback - -MODULEDIR = C.DEFAULT_MODULE_PATH - -BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') -IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"] - -_ITALIC = re.compile(r"I\(([^)]+)\)") -_BOLD = re.compile(r"B\(([^)]+)\)") -_MODULE = re.compile(r"M\(([^)]+)\)") -_URL = re.compile(r"U\(([^)]+)\)") -_CONST = re.compile(r"C\(([^)]+)\)") -PAGER = 'less' -LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) - # -S (chop long lines) -X (disable termcap init and de-init) - -def pager_print(text): - ''' just print text ''' - print text - -def pager_pipe(text, cmd): - ''' pipe text through a pager ''' - if 'LESS' not in os.environ: - os.environ['LESS'] = LESS_OPTS - try: - cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) - cmd.communicate(input=text) - except IOError: - pass - except KeyboardInterrupt: - pass - -def pager(text): - ''' find reasonable way to display text ''' - # this is a much simpler form of what is in pydoc.py - if not sys.stdout.isatty(): - pager_print(text) - elif 'PAGER' in os.environ: - if sys.platform == 'win32': - pager_print(text) - else: - pager_pipe(text, os.environ['PAGER']) - elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: - pager_pipe(text, 'less') - else: - pager_print(text) - -def tty_ify(text): - - t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' - t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* - t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] - t = _URL.sub(r"\1", t) # U(word) => word - t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' - - return t - -def get_man_text(doc): - - opt_indent=" " - text = [] - text.append("> %s\n" % doc['module'].upper()) - - desc = " ".join(doc['description']) - - text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" ")) - - if 'option_keys' in doc and len(doc['option_keys']) > 0: - text.append("Options (= is mandatory):\n") - - for o in sorted(doc['option_keys']): - opt = doc['options'][o] - - if opt.get('required', False): - opt_leadin = "=" - else: - opt_leadin = "-" - - text.append("%s %s" % (opt_leadin, o)) - - desc = " ".join(opt['description']) - - if 'choices' in opt: - choices = ", ".join(str(i) for i in opt['choices']) - desc = desc + " (Choices: " + choices + ")" - if 'default' in opt: - default = str(opt['default']) - desc = desc + " [Default: " + default + "]" - text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent, - subsequent_indent=opt_indent)) - - if 'notes' in doc and len(doc['notes']) > 0: - notes = " ".join(doc['notes']) - text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ", - subsequent_indent=opt_indent)) - - - if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: - req = ", ".join(doc['requirements']) - text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ", - subsequent_indent=opt_indent)) - - if 'examples' in doc and len(doc['examples']) > 0: - text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) - for ex in doc['examples']: - text.append("%s\n" % (ex['code'])) - - if 'plainexamples' in doc and doc['plainexamples'] is not None: - text.append("EXAMPLES:") - text.append(doc['plainexamples']) - if 'returndocs' in doc and doc['returndocs'] is not None: - text.append("RETURN VALUES:") - text.append(doc['returndocs']) - text.append('') - - return "\n".join(text) - - -def get_snippet_text(doc): - - text = [] - desc = tty_ify(" ".join(doc['short_description'])) - text.append("- name: %s" % (desc)) - text.append(" action: %s" % (doc['module'])) - - for o in sorted(doc['options'].keys()): - opt = doc['options'][o] - desc = tty_ify(" ".join(opt['description'])) - - if opt.get('required', False): - s = o + "=" - else: - s = o - - text.append(" %-20s # %s" % (s, desc)) - text.append('') - - return "\n".join(text) - -def get_module_list_text(module_list): - tty_size = 0 - if os.isatty(0): - tty_size = struct.unpack('HHHH', - fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1] - columns = max(60, tty_size) - displace = max(len(x) for x in module_list) - linelimit = columns - displace - 5 - text = [] - deprecated = [] - for module in sorted(set(module_list)): - - if module in module_docs.BLACKLIST_MODULES: - continue - - filename = utils.plugins.module_finder.find_plugin(module) - - if filename is None: - continue - if filename.endswith(".ps1"): - continue - if os.path.isdir(filename): - continue - - try: - doc, plainexamples, returndocs = module_docs.get_docstring(filename) - desc = tty_ify(doc.get('short_description', '?')).strip() - if len(desc) > linelimit: - desc = desc[:linelimit] + '...' - - if module.startswith('_'): # Handle deprecated - deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) - else: - text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) - except: - traceback.print_exc() - sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) - - if len(deprecated) > 0: - text.append("\nDEPRECATED:") - text.extend(deprecated) - return "\n".join(text) - -def find_modules(path, module_list): - - if os.path.isdir(path): - for module in os.listdir(path): - if module.startswith('.'): - continue - elif os.path.isdir(module): - find_modules(module, module_list) - elif any(module.endswith(x) for x in BLACKLIST_EXTS): - continue - elif module.startswith('__'): - continue - elif module in IGNORE_FILES: - continue - elif module.startswith('_'): - fullpath = '/'.join([path,module]) - if os.path.islink(fullpath): # avoids aliases - continue - - module = os.path.splitext(module)[0] # removes the extension - module_list.append(module) - -def main(): - - p = optparse.OptionParser( - version=version("%prog"), - usage='usage: %prog [options] [module...]', - description='Show Ansible module documentation', - ) - - p.add_option("-M", "--module-path", - action="store", - dest="module_path", - default=MODULEDIR, - help="Ansible modules/ directory") - p.add_option("-l", "--list", - action="store_true", - default=False, - dest='list_dir', - help='List available modules') - p.add_option("-s", "--snippet", - action="store_true", - default=False, - dest='show_snippet', - help='Show playbook snippet for specified module(s)') - p.add_option('-v', action='version', help='Show version number and exit') - - (options, args) = p.parse_args() - - if options.module_path is not None: - for i in options.module_path.split(os.pathsep): - utils.plugins.module_finder.add_directory(i) - - if options.list_dir: - # list modules - paths = utils.plugins.module_finder._get_paths() - module_list = [] - for path in paths: - find_modules(path, module_list) - - pager(get_module_list_text(module_list)) - sys.exit() - - if len(args) == 0: - p.print_help() - - def print_paths(finder): - ''' Returns a string suitable for printing of the search path ''' - - # Uses a list to get the order right - ret = [] - for i in finder._get_paths(): - if i not in ret: - ret.append(i) - return os.pathsep.join(ret) - - text = '' - for module in args: - - filename = utils.plugins.module_finder.find_plugin(module) - if filename is None: - sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder))) - continue - - if any(filename.endswith(x) for x in BLACKLIST_EXTS): - continue - - try: - doc, plainexamples, returndocs = module_docs.get_docstring(filename) - except: - traceback.print_exc() - sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) - continue - - if doc is not None: - - all_keys = [] - for (k,v) in doc['options'].iteritems(): - all_keys.append(k) - all_keys = sorted(all_keys) - doc['option_keys'] = all_keys - - doc['filename'] = filename - doc['docuri'] = doc['module'].replace('_', '-') - doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') - doc['plainexamples'] = plainexamples - doc['returndocs'] = returndocs - - if options.show_snippet: - text += get_snippet_text(doc) - else: - text += get_man_text(doc) - else: - # this typically means we couldn't even parse the docstring, not just that the YAML is busted, - # probably a quoting issue. - sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module) - pager(text) - -if __name__ == '__main__': - main() diff --git a/bin/ansible-doc b/bin/ansible-doc new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-doc @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy deleted file mode 100755 index a6d625671ec548..00000000000000 --- a/bin/ansible-galaxy +++ /dev/null @@ -1,957 +0,0 @@ -#!/usr/bin/env python - -######################################################################## -# -# (C) 2013, James Cammarata -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -######################################################################## - -import datetime -import json -import os -import os.path -import shutil -import subprocess -import sys -import tarfile -import tempfile -import urllib -import urllib2 -import yaml - -from collections import defaultdict -from distutils.version import LooseVersion -from jinja2 import Environment -from optparse import OptionParser - -import ansible.constants as C -import ansible.utils -from ansible.errors import AnsibleError - -default_meta_template = """--- -galaxy_info: - author: {{ author }} - description: {{description}} - company: {{ company }} - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: {{ issue_tracker_url }} - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: {{ license }} - min_ansible_version: {{ min_ansible_version }} - # - # Below are all platforms currently available. Just uncomment - # the ones that apply to your role. If you don't see your - # platform on this list, let us know and we'll get it added! - # - #platforms: - {%- for platform,versions in platforms.iteritems() %} - #- name: {{ platform }} - # versions: - # - all - {%- for version in versions %} - # - {{ version }} - {%- endfor %} - {%- endfor %} - # - # Below are all categories currently available. Just as with - # the platforms above, uncomment those that apply to your role. - # - #categories: - {%- for category in categories %} - #- {{ category.name }} - {%- endfor %} -dependencies: [] - # List your role dependencies here, one per line. - # Be sure to remove the '[]' above if you add dependencies - # to this list. - {% for dependency in dependencies %} - #- {{ dependency }} - {% endfor %} - -""" - -default_readme_template = """Role Name -========= - -A brief description of the role goes here. - -Requirements ------------- - -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. - -Role Variables --------------- - -A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. - -Dependencies ------------- - -A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. - -Example Playbook ----------------- - -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } - -License -------- - -BSD - -Author Information ------------------- - -An optional section for the role authors to include contact information, or a website (HTML is not allowed). -""" - -#------------------------------------------------------------------------------------- -# Utility functions for parsing actions/options -#------------------------------------------------------------------------------------- - -VALID_ACTIONS = ("init", "info", "install", "list", "remove") -SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - -def get_action(args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - return arg - return None - -def build_option_parser(action): - """ - Builds an option parser object based on the action - the user wants to execute. - """ - - usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS) - epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) - OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(usage=usage, epilog=epilog) - - if not action: - parser.print_help() - sys.exit() - - # options for all actions - # - none yet - - # options specific to actions - if action == "info": - parser.set_usage("usage: %prog info [options] role_name[,version]") - elif action == "init": - parser.set_usage("usage: %prog init [options] role_name") - parser.add_option( - '-p', '--init-path', dest='init_path', default="./", - help='The path in which the skeleton role will be created. ' - 'The default is the current working directory.') - parser.add_option( - '--offline', dest='offline', default=False, action='store_true', - help="Don't query the galaxy API when creating roles") - elif action == "install": - parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") - parser.add_option( - '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, - help='Ignore errors and continue with the next specified role.') - parser.add_option( - '-n', '--no-deps', dest='no_deps', action='store_true', default=False, - help='Don\'t download roles listed as dependencies') - parser.add_option( - '-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') - elif action == "remove": - parser.set_usage("usage: %prog remove role1 role2 ...") - elif action == "list": - parser.set_usage("usage: %prog list [role_name]") - - # options that apply to more than one action - if action != "init": - parser.add_option( - '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, - help='The path to the directory containing your roles. ' - 'The default is the roles_path configured in your ' - 'ansible.cfg file (/etc/ansible/roles if not configured)') - - if action in ("info","init","install"): - parser.add_option( - '-s', '--server', dest='api_server', default="galaxy.ansible.com", - help='The API server destination') - - if action in ("init","install"): - parser.add_option( - '-f', '--force', dest='force', action='store_true', default=False, - help='Force overwriting an existing role') - # done, return the parser - return parser - -def get_opt(options, k, defval=""): - """ - Returns an option from an Optparse values instance. - """ - try: - data = getattr(options, k) - except: - return defval - if k == "roles_path": - if os.pathsep in data: - data = data.split(os.pathsep)[0] - return data - -def exit_without_ignore(options, rc=1): - """ - Exits with the specified return code unless the - option --ignore-errors was specified - """ - - if not get_opt(options, "ignore_errors", False): - print '- you can use --ignore-errors to skip failed roles.' - sys.exit(rc) - - -#------------------------------------------------------------------------------------- -# Galaxy API functions -#------------------------------------------------------------------------------------- - -def api_get_config(api_server): - """ - Fetches the Galaxy API current version to ensure - the API server is up and reachable. - """ - - try: - url = 'https://%s/api/' % api_server - data = json.load(urllib2.urlopen(url)) - if not data.get("current_version",None): - return None - else: - return data - except: - return None - -def api_lookup_role_by_name(api_server, role_name, notify=True): - """ - Uses the Galaxy API to do a lookup on the role owner/name. - """ - - role_name = urllib.quote(role_name) - - try: - parts = role_name.split(".") - user_name = ".".join(parts[0:-1]) - role_name = parts[-1] - if notify: - print "- downloading role '%s', owned by %s" % (role_name, user_name) - except: - parser.print_help() - print "- invalid role name (%s). Specify role as format: username.rolename" % role_name - sys.exit(1) - - url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name) - try: - data = json.load(urllib2.urlopen(url)) - if len(data["results"]) == 0: - return None - else: - return data["results"][0] - except: - return None - -def api_fetch_role_related(api_server, related, role_id): - """ - Uses the Galaxy API to fetch the list of related items for - the given role. The url comes from the 'related' field of - the role. - """ - - try: - url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related) - data = json.load(urllib2.urlopen(url)) - results = data['results'] - done = (data.get('next', None) == None) - while not done: - url = 'https://%s%s' % (api_server, data['next']) - print url - data = json.load(urllib2.urlopen(url)) - results += data['results'] - done = (data.get('next', None) == None) - return results - except: - return None - -def api_get_list(api_server, what): - """ - Uses the Galaxy API to fetch the list of items specified. - """ - - try: - url = 'https://%s/api/v1/%s/?page_size' % (api_server, what) - data = json.load(urllib2.urlopen(url)) - if "results" in data: - results = data['results'] - else: - results = data - done = True - if "next" in data: - done = (data.get('next', None) == None) - while not done: - url = 'https://%s%s' % (api_server, data['next']) - print url - data = json.load(urllib2.urlopen(url)) - results += data['results'] - done = (data.get('next', None) == None) - return results - except: - print "- failed to download the %s list" % what - return None - -#------------------------------------------------------------------------------------- -# scm repo utility functions -#------------------------------------------------------------------------------------- - -def scm_archive_role(scm, role_url, role_version, role_name): - if scm not in ['hg', 'git']: - print "- scm %s is not currently supported" % scm - return False - tempdir = tempfile.mkdtemp() - clone_cmd = [scm, 'clone', role_url, role_name] - with open('/dev/null', 'w') as devnull: - try: - print "- executing: %s" % " ".join(clone_cmd) - popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull) - except: - raise AnsibleError("error executing: %s" % " ".join(clone_cmd)) - rc = popen.wait() - if rc != 0: - print "- command %s failed" % ' '.join(clone_cmd) - print " in directory %s" % tempdir - return False - - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar') - if scm == 'hg': - archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name] - if role_version: - archive_cmd.extend(['-r', role_version]) - archive_cmd.append(temp_file.name) - if scm == 'git': - archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name] - if role_version: - archive_cmd.append(role_version) - else: - archive_cmd.append('HEAD') - - with open('/dev/null', 'w') as devnull: - print "- executing: %s" % " ".join(archive_cmd) - popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name), - stderr=devnull, stdout=devnull) - rc = popen.wait() - if rc != 0: - print "- command %s failed" % ' '.join(archive_cmd) - print " in directory %s" % tempdir - return False - - shutil.rmtree(tempdir, ignore_errors=True) - - return temp_file.name - - -#------------------------------------------------------------------------------------- -# Role utility functions -#------------------------------------------------------------------------------------- - -def get_role_path(role_name, options): - """ - Returns the role path based on the roles_path option - and the role name. - """ - roles_path = get_opt(options,'roles_path') - roles_path = os.path.join(roles_path, role_name) - roles_path = os.path.expanduser(roles_path) - return roles_path - -def get_role_metadata(role_name, options): - """ - Returns the metadata as YAML, if the file 'meta/main.yml' - exists in the specified role_path - """ - role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml') - try: - if os.path.isfile(role_path): - f = open(role_path, 'r') - meta_data = yaml.safe_load(f) - f.close() - return meta_data - else: - return None - except: - return None - -def get_galaxy_install_info(role_name, options): - """ - Returns the YAML data contained in 'meta/.galaxy_install_info', - if it exists. - """ - - try: - info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') - if os.path.isfile(info_path): - f = open(info_path, 'r') - info_data = yaml.safe_load(f) - f.close() - return info_data - else: - return None - except: - return None - -def write_galaxy_install_info(role_name, role_version, options): - """ - Writes a YAML-formatted file to the role's meta/ directory - (named .galaxy_install_info) which contains some information - we can use later for commands like 'list' and 'info'. - """ - - info = dict( - version = role_version, - install_date = datetime.datetime.utcnow().strftime("%c"), - ) - try: - info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') - f = open(info_path, 'w+') - info_data = yaml.safe_dump(info, f) - f.close() - except: - return False - return True - - -def remove_role(role_name, options): - """ - Removes the specified role from the roles path. There is a - sanity check to make sure there's a meta/main.yml file at this - path so the user doesn't blow away random directories - """ - if get_role_metadata(role_name, options): - role_path = get_role_path(role_name, options) - shutil.rmtree(role_path) - return True - else: - return False - -def fetch_role(role_name, target, role_data, options): - """ - Downloads the archived role from github to a temp location, extracts - it, and then copies the extracted role to the role library path. - """ - - # first grab the file and save it to a temp location - if '://' in role_name: - archive_url = role_name - else: - archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target) - print "- downloading role from %s" % archive_url - - try: - url_file = urllib2.urlopen(archive_url) - temp_file = tempfile.NamedTemporaryFile(delete=False) - data = url_file.read() - while data: - temp_file.write(data) - data = url_file.read() - temp_file.close() - return temp_file.name - except Exception, e: - # TODO: better urllib2 error handling for error - # messages that are more exact - print "- error: failed to download the file." - return False - -def install_role(role_name, role_version, role_filename, options): - # the file is a tar, so open it that way and extract it - # to the specified (or default) roles directory - - if not tarfile.is_tarfile(role_filename): - print "- error: the file downloaded was not a tar.gz" - return False - else: - if role_filename.endswith('.gz'): - role_tar_file = tarfile.open(role_filename, "r:gz") - else: - role_tar_file = tarfile.open(role_filename, "r") - # verify the role's meta file - meta_file = None - members = role_tar_file.getmembers() - # next find the metadata file - for member in members: - if "/meta/main.yml" in member.name: - meta_file = member - break - if not meta_file: - print "- error: this role does not appear to have a meta/main.yml file." - return False - else: - try: - meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file)) - except: - print "- error: this role does not appear to have a valid meta/main.yml file." - return False - - # we strip off the top-level directory for all of the files contained within - # the tar file here, since the default is 'github_repo-target', and change it - # to the specified role's name - role_path = os.path.join(get_opt(options, 'roles_path'), role_name) - role_path = os.path.expanduser(role_path) - print "- extracting %s to %s" % (role_name, role_path) - try: - if os.path.exists(role_path): - if not os.path.isdir(role_path): - print "- error: the specified roles path exists and is not a directory." - return False - elif not get_opt(options, "force", False): - print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name - return False - else: - # using --force, remove the old path - if not remove_role(role_name, options): - print "- error: %s doesn't appear to contain a role." % role_path - print " please remove this directory manually if you really want to put the role here." - return False - else: - os.makedirs(role_path) - - # now we do the actual extraction to the role_path - for member in members: - # we only extract files, and remove any relative path - # bits that might be in the file for security purposes - # and drop the leading directory, as mentioned above - if member.isreg() or member.issym(): - parts = member.name.split("/")[1:] - final_parts = [] - for part in parts: - if part != '..' and '~' not in part and '$' not in part: - final_parts.append(part) - member.name = os.path.join(*final_parts) - role_tar_file.extract(member, role_path) - - # write out the install info file for later use - write_galaxy_install_info(role_name, role_version, options) - except OSError, e: - print "- error: you do not have permission to modify files in %s" % role_path - return False - - # return the parsed yaml metadata - print "- %s was installed successfully" % role_name - return meta_file_data - -#------------------------------------------------------------------------------------- -# Action functions -#------------------------------------------------------------------------------------- - -def execute_init(args, options, parser): - """ - Executes the init action, which creates the skeleton framework - of a role that complies with the galaxy metadata format. - """ - - init_path = get_opt(options, 'init_path', './') - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - force = get_opt(options, 'force', False) - offline = get_opt(options, 'offline', False) - - if not offline: - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - try: - role_name = args.pop(0).strip() - if role_name == "": - raise Exception("") - role_path = os.path.join(init_path, role_name) - if os.path.exists(role_path): - if os.path.isfile(role_path): - print "- the path %s already exists, but is a file - aborting" % role_path - sys.exit(1) - elif not force: - print "- the directory %s already exists." % role_path - print " you can use --force to re-initialize this directory,\n" + \ - " however it will reset any main.yml files that may have\n" + \ - " been modified there already." - sys.exit(1) - except Exception, e: - parser.print_help() - print "- no role name specified for init" - sys.exit(1) - - ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') - - # create the default README.md - if not os.path.exists(role_path): - os.makedirs(role_path) - readme_path = os.path.join(role_path, "README.md") - f = open(readme_path, "wb") - f.write(default_readme_template) - f.close - - for dir in ROLE_DIRS: - dir_path = os.path.join(init_path, role_name, dir) - main_yml_path = os.path.join(dir_path, 'main.yml') - # create the directory if it doesn't exist already - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - # now create the main.yml file for that directory - if dir == "meta": - # create a skeleton meta/main.yml with a valid galaxy_info - # datastructure in place, plus with all of the available - # tags/platforms included (but commented out) and the - # dependencies section - platforms = [] - if not offline: - platforms = api_get_list(api_server, "platforms") or [] - categories = [] - if not offline: - categories = api_get_list(api_server, "categories") or [] - - # group the list of platforms from the api based - # on their names, with the release field being - # appended to a list of versions - platform_groups = defaultdict(list) - for platform in platforms: - platform_groups[platform['name']].append(platform['release']) - platform_groups[platform['name']].sort() - - inject = dict( - author = 'your name', - company = 'your company (optional)', - license = 'license (GPLv2, CC-BY, etc)', - issue_tracker_url = 'http://example.com/issue/tracker', - min_ansible_version = '1.2', - platforms = platform_groups, - categories = categories, - ) - rendered_meta = Environment().from_string(default_meta_template).render(inject) - f = open(main_yml_path, 'w') - f.write(rendered_meta) - f.close() - pass - elif dir not in ('files','templates'): - # just write a (mostly) empty YAML file for main.yml - f = open(main_yml_path, 'w') - f.write('---\n# %s file for %s\n' % (dir,role_name)) - f.close() - print "- %s was created successfully" % role_name - -def execute_info(args, options, parser): - """ - Executes the info action. This action prints out detailed - information about an installed role as well as info available - from the galaxy API. - """ - - if len(args) == 0: - # the user needs to specify a role - parser.print_help() - print "- you must specify a user/role name" - sys.exit(1) - - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - api_config = api_get_config(api_server) - roles_path = get_opt(options, "roles_path") - - for role in args: - - role_info = {} - - install_info = get_galaxy_install_info(role, options) - if install_info: - if 'version' in install_info: - install_info['intalled_version'] = install_info['version'] - del install_info['version'] - role_info.update(install_info) - - remote_data = api_lookup_role_by_name(api_server, role, False) - if remote_data: - role_info.update(remote_data) - - metadata = get_role_metadata(role, options) - if metadata: - role_info.update(metadata) - - role_spec = ansible.utils.role_spec_parse(role) - if role_spec: - role_info.update(role_spec) - - if role_info: - print "- %s:" % (role) - for k in sorted(role_info.keys()): - - if k in SKIP_INFO_KEYS: - continue - - if isinstance(role_info[k], dict): - print "\t%s: " % (k) - for key in sorted(role_info[k].keys()): - if key in SKIP_INFO_KEYS: - continue - print "\t\t%s: %s" % (key, role_info[k][key]) - else: - print "\t%s: %s" % (k, role_info[k]) - else: - print "- the role %s was not found" % role - -def execute_install(args, options, parser): - """ - Executes the installation action. The args list contains the - roles to be installed, unless -f was specified. The list of roles - can be a name (which will be downloaded via the galaxy API and github), - or it can be a local .tar.gz file. - """ - - role_file = get_opt(options, "role_file", None) - - if len(args) == 0 and role_file is None: - # the user needs to specify one of either --role-file - # or specify a single user/role name - parser.print_help() - print "- you must specify a user/role name or a roles file" - sys.exit() - elif len(args) == 1 and not role_file is None: - # using a role file is mutually exclusive of specifying - # the role name on the command line - parser.print_help() - print "- please specify a user/role name, or a roles file, but not both" - sys.exit(1) - - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - no_deps = get_opt(options, "no_deps", False) - roles_path = get_opt(options, "roles_path") - - roles_done = [] - if role_file: - f = open(role_file, 'r') - if role_file.endswith('.yaml') or role_file.endswith('.yml'): - roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) - else: - # roles listed in a file, one per line - roles_left = map(ansible.utils.role_spec_parse, f.readlines()) - f.close() - else: - # roles were specified directly, so we'll just go out grab them - # (and their dependencies, unless the user doesn't want us to). - roles_left = map(ansible.utils.role_spec_parse, args) - - while len(roles_left) > 0: - # query the galaxy API for the role data - role_data = None - role = roles_left.pop(0) - role_src = role.get("src") - role_scm = role.get("scm") - role_path = role.get("path") - - if role_path: - options.roles_path = role_path - else: - options.roles_path = roles_path - - if os.path.isfile(role_src): - # installing a local tar.gz - tmp_file = role_src - else: - if role_scm: - # create tar file from scm url - tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name")) - elif '://' in role_src: - # just download a URL - version will probably be in the URL - tmp_file = fetch_role(role_src, None, None, options) - else: - # installing from galaxy - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - role_data = api_lookup_role_by_name(api_server, role_src) - if not role_data: - print "- sorry, %s was not found on %s." % (role_src, api_server) - exit_without_ignore(options) - continue - - role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) - if "version" not in role or role['version'] == '': - # convert the version names to LooseVersion objects - # and sort them to get the latest version. If there - # are no versions in the list, we'll grab the head - # of the master branch - if len(role_versions) > 0: - loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] - loose_versions.sort() - role["version"] = str(loose_versions[-1]) - else: - role["version"] = 'master' - elif role['version'] != 'master': - if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]: - print 'role is %s' % role - print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions) - exit_without_ignore(options) - continue - - # download the role. if --no-deps was specified, we stop here, - # otherwise we recursively grab roles and all of their deps. - tmp_file = fetch_role(role_src, role["version"], role_data, options) - installed = False - if tmp_file: - installed = install_role(role.get("name"), role.get("version"), tmp_file, options) - # we're done with the temp file, clean it up - if tmp_file != role_src: - os.unlink(tmp_file) - # install dependencies, if we want them - if not no_deps and installed: - if not role_data: - role_data = get_role_metadata(role.get("name"), options) - role_dependencies = role_data['dependencies'] - else: - role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) - for dep in role_dependencies: - if isinstance(dep, basestring): - dep = ansible.utils.role_spec_parse(dep) - else: - dep = ansible.utils.role_yaml_parse(dep) - if not get_role_metadata(dep["name"], options): - if dep not in roles_left: - print '- adding dependency: %s' % dep["name"] - roles_left.append(dep) - else: - print '- dependency %s already pending installation.' % dep["name"] - else: - print '- dependency %s is already installed, skipping.' % dep["name"] - if not tmp_file or not installed: - print "- %s was NOT installed successfully." % role.get("name") - exit_without_ignore(options) - sys.exit(0) - -def execute_remove(args, options, parser): - """ - Executes the remove action. The args list contains the list - of roles to be removed. This list can contain more than one role. - """ - - if len(args) == 0: - parser.print_help() - print '- you must specify at least one role to remove.' - sys.exit() - - for role in args: - if get_role_metadata(role, options): - if remove_role(role, options): - print '- successfully removed %s' % role - else: - print "- failed to remove role: %s" % role - else: - print '- %s is not installed, skipping.' % role - sys.exit(0) - -def execute_list(args, options, parser): - """ - Executes the list action. The args list can contain zero - or one role. If one is specified, only that role will be - shown, otherwise all roles in the specified directory will - be shown. - """ - - if len(args) > 1: - print "- please specify only one role to list, or specify no roles to see a full list" - sys.exit(1) - - if len(args) == 1: - # show only the request role, if it exists - role_name = args[0] - metadata = get_role_metadata(role_name, options) - if metadata: - install_info = get_galaxy_install_info(role_name, options) - version = None - if install_info: - version = install_info.get("version", None) - if not version: - version = "(unknown version)" - # show some more info about single roles here - print "- %s, %s" % (role_name, version) - else: - print "- the role %s was not found" % role_name - else: - # show all valid roles in the roles_path directory - roles_path = get_opt(options, 'roles_path') - roles_path = os.path.expanduser(roles_path) - if not os.path.exists(roles_path): - parser.print_help() - print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path - sys.exit(1) - elif not os.path.isdir(roles_path): - print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path - parser.print_help() - sys.exit(1) - path_files = os.listdir(roles_path) - for path_file in path_files: - if get_role_metadata(path_file, options): - install_info = get_galaxy_install_info(path_file, options) - version = None - if install_info: - version = install_info.get("version", None) - if not version: - version = "(unknown version)" - print "- %s, %s" % (path_file, version) - sys.exit(0) - -#------------------------------------------------------------------------------------- -# The main entry point -#------------------------------------------------------------------------------------- - -def main(): - # parse the CLI options - action = get_action(sys.argv) - parser = build_option_parser(action) - (options, args) = parser.parse_args() - - # execute the desired action - if 1: #try: - fn = globals()["execute_%s" % action] - fn(args, options, parser) - #except KeyError, e: - # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS)) - # sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-galaxy @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-playbook b/bin/ansible-playbook deleted file mode 100755 index 3d6e1f9f4029de..00000000000000 --- a/bin/ansible-playbook +++ /dev/null @@ -1,330 +0,0 @@ -#!/usr/bin/env python -# (C) 2012, Michael DeHaan, - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -####################################################### - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - -import sys -import os -import stat - -# Augment PYTHONPATH to find Python modules relative to this file path -# This is so that we can find the modules when running from a local checkout -# installed as editable with `pip install -e ...` or `python setup.py develop` -local_module_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', 'lib') -) -sys.path.append(local_module_path) - -import ansible.playbook -import ansible.constants as C -import ansible.utils.template -from ansible import errors -from ansible import callbacks -from ansible import utils -from ansible.color import ANSIBLE_COLOR, stringc -from ansible.callbacks import display - -def colorize(lead, num, color): - """ Print 'lead' = 'num' in 'color' """ - if num != 0 and ANSIBLE_COLOR and color is not None: - return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color)) - else: - return "%s=%-4s" % (lead, str(num)) - -def hostcolor(host, stats, color=True): - if ANSIBLE_COLOR and color: - if stats['failures'] != 0 or stats['unreachable'] != 0: - return "%-37s" % stringc(host, 'red') - elif stats['changed'] != 0: - return "%-37s" % stringc(host, 'yellow') - else: - return "%-37s" % stringc(host, 'green') - return "%-26s" % host - - -def main(args): - ''' run ansible-playbook operations ''' - - # create parser for CLI options - parser = utils.base_parser( - constants=C, - usage = "%prog playbook.yml", - connect_opts=True, - runas_opts=True, - subset_opts=True, - check_opts=True, - diff_opts=True - ) - #parser.add_option('--vault-password', dest="vault_password", - # help="password for vault encrypted files") - parser.add_option('-t', '--tags', dest='tags', default='all', - help="only run plays and tasks tagged with these values") - parser.add_option('--skip-tags', dest='skip_tags', - help="only run plays and tasks whose tags do not match these values") - parser.add_option('--syntax-check', dest='syntax', action='store_true', - help="perform a syntax check on the playbook, but do not execute it") - parser.add_option('--list-tasks', dest='listtasks', action='store_true', - help="list all tasks that would be executed") - parser.add_option('--list-tags', dest='listtags', action='store_true', - help="list all available tags") - parser.add_option('--step', dest='step', action='store_true', - help="one-step-at-a-time: confirm each task before running") - parser.add_option('--start-at-task', dest='start_at', - help="start the playbook at the task matching this name") - parser.add_option('--force-handlers', dest='force_handlers', - default=C.DEFAULT_FORCE_HANDLERS, action='store_true', - help="run handlers even if a task fails") - parser.add_option('--flush-cache', dest='flush_cache', action='store_true', - help="clear the fact cache") - - options, args = parser.parse_args(args) - - if len(args) == 0: - parser.print_help(file=sys.stderr) - return 1 - - # privlege escalation command line arguments need to be mutually exclusive - utils.check_mutually_exclusive_privilege(options, parser) - - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - - sshpass = None - becomepass = None - vault_pass = None - - options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - - if options.listhosts or options.syntax or options.listtasks or options.listtags: - (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass) - else: - options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - # Never ask for an SSH password when we run with local connection - if options.connection == "local": - options.ask_pass = False - - # set pe options - utils.normalize_become_options(options) - prompt_method = utils.choose_pass_prompt(options) - (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, - become_ask_pass=options.become_ask_pass, - ask_vault_pass=options.ask_vault_pass, - become_method=prompt_method) - - # read vault_pass from a file - if not options.ask_vault_pass and options.vault_password_file: - vault_pass = utils.read_vault_file(options.vault_password_file) - - extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) - - only_tags = options.tags.split(",") - skip_tags = options.skip_tags - if options.skip_tags is not None: - skip_tags = options.skip_tags.split(",") - - for playbook in args: - if not os.path.exists(playbook): - raise errors.AnsibleError("the playbook: %s could not be found" % playbook) - if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): - raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook) - - inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass) - - # Note: slightly wrong, this is written so that implicit localhost - # (which is not returned in list_hosts()) is taken into account for - # warning if inventory is empty. But it can't be taken into account for - # checking if limit doesn't match any hosts. Instead we don't worry about - # limit if only implicit localhost was in inventory to start with. - # - # Fix this in v2 - no_hosts = False - if len(inventory.list_hosts()) == 0: - # Empty inventory - utils.warning("provided hosts list is empty, only localhost is available") - no_hosts = True - inventory.subset(options.subset) - if len(inventory.list_hosts()) == 0 and no_hosts is False: - # Invalid limit - raise errors.AnsibleError("Specified --limit does not match any hosts") - - # run all playbooks specified on the command line - for playbook in args: - - stats = callbacks.AggregateStats() - playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY) - if options.step: - playbook_cb.step = options.step - if options.start_at: - playbook_cb.start_at = options.start_at - runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY) - - pb = ansible.playbook.PlayBook( - playbook=playbook, - module_path=options.module_path, - inventory=inventory, - forks=options.forks, - remote_user=options.remote_user, - remote_pass=sshpass, - callbacks=playbook_cb, - runner_callbacks=runner_cb, - stats=stats, - timeout=options.timeout, - transport=options.connection, - become=options.become, - become_method=options.become_method, - become_user=options.become_user, - become_pass=becomepass, - extra_vars=extra_vars, - private_key_file=options.private_key_file, - only_tags=only_tags, - skip_tags=skip_tags, - check=options.check, - diff=options.diff, - vault_password=vault_pass, - force_handlers=options.force_handlers, - ) - - if options.flush_cache: - display(callbacks.banner("FLUSHING FACT CACHE")) - pb.SETUP_CACHE.flush() - - if options.listhosts or options.listtasks or options.syntax or options.listtags: - print '' - print 'playbook: %s' % playbook - print '' - playnum = 0 - for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs): - playnum += 1 - play = ansible.playbook.Play(pb, play_ds, play_basedir, - vault_password=pb.vault_password) - label = play.name - hosts = pb.inventory.list_hosts(play.hosts) - - if options.listhosts: - print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts)) - for host in hosts: - print ' %s' % host - - if options.listtags or options.listtasks: - print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags)))) - - if options.listtags: - tags = [] - for task in pb.tasks_to_run_in_play(play): - tags.extend(task.tags) - print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged'])))) - - if options.listtasks: - - for task in pb.tasks_to_run_in_play(play): - if getattr(task, 'name', None) is not None: - # meta tasks have no names - print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged'])))) - - if options.listhosts or options.listtasks or options.listtags: - print '' - continue - - if options.syntax: - # if we've not exited by now then we are fine. - print 'Playbook Syntax is fine' - return 0 - - failed_hosts = [] - unreachable_hosts = [] - - try: - - pb.run() - - hosts = sorted(pb.stats.processed.keys()) - display(callbacks.banner("PLAY RECAP")) - playbook_cb.on_stats(pb.stats) - - for h in hosts: - t = pb.stats.summarize(h) - if t['failures'] > 0: - failed_hosts.append(h) - if t['unreachable'] > 0: - unreachable_hosts.append(h) - - retries = failed_hosts + unreachable_hosts - - if C.RETRY_FILES_ENABLED and len(retries) > 0: - filename = pb.generate_retry_inventory(retries) - if filename: - display(" to retry, use: --limit @%s\n" % filename) - - for h in hosts: - t = pb.stats.summarize(h) - - display("%s : %s %s %s %s" % ( - hostcolor(h, t), - colorize('ok', t['ok'], 'green'), - colorize('changed', t['changed'], 'yellow'), - colorize('unreachable', t['unreachable'], 'red'), - colorize('failed', t['failures'], 'red')), - screen_only=True - ) - - display("%s : %s %s %s %s" % ( - hostcolor(h, t, False), - colorize('ok', t['ok'], None), - colorize('changed', t['changed'], None), - colorize('unreachable', t['unreachable'], None), - colorize('failed', t['failures'], None)), - log_only=True - ) - - - print "" - if len(failed_hosts) > 0: - return 2 - if len(unreachable_hosts) > 0: - return 3 - - except errors.AnsibleError, e: - display("ERROR: %s" % e, color='red') - return 1 - - return 0 - - -if __name__ == "__main__": - display(" ", log_only=True) - display(" ".join(sys.argv), log_only=True) - display(" ", log_only=True) - try: - sys.exit(main(sys.argv[1:])) - except errors.AnsibleError, e: - display("ERROR: %s" % e, color='red', stderr=True) - sys.exit(1) - except KeyboardInterrupt, ke: - display("ERROR: interrupted", color='red', stderr=True) - sys.exit(1) diff --git a/bin/ansible-playbook b/bin/ansible-playbook new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-playbook @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-pull b/bin/ansible-pull deleted file mode 100755 index d4887631e0fdfb..00000000000000 --- a/bin/ansible-pull +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Stephen Fromm -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# ansible-pull is a script that runs ansible in local mode -# after checking out a playbooks directory from source repo. There is an -# example playbook to bootstrap this script in the examples/ dir which -# installs ansible and sets it up to run on cron. - -# usage: -# ansible-pull -d /var/lib/ansible \ -# -U http://example.net/content.git [-C production] \ -# [path/playbook.yml] -# -# the -d and -U arguments are required; the -C argument is optional. -# -# ansible-pull accepts an optional argument to specify a playbook -# location underneath the workdir and then searches the source repo -# for playbooks in the following order, stopping at the first match: -# -# 1. $workdir/path/playbook.yml, if specified -# 2. $workdir/$fqdn.yml -# 3. $workdir/$hostname.yml -# 4. $workdir/local.yml -# -# the source repo must contain at least one of these playbooks. - -import os -import shutil -import sys -import datetime -import socket -import random -import time -from ansible import utils -from ansible.utils import cmd_functions -from ansible import errors -from ansible import inventory - -DEFAULT_REPO_TYPE = 'git' -DEFAULT_PLAYBOOK = 'local.yml' -PLAYBOOK_ERRORS = {1: 'File does not exist', - 2: 'File is not readable'} - -VERBOSITY=0 - -def increment_debug(option, opt, value, parser): - global VERBOSITY - VERBOSITY += 1 - -def try_playbook(path): - if not os.path.exists(path): - return 1 - if not os.access(path, os.R_OK): - return 2 - return 0 - - -def select_playbook(path, args): - playbook = None - if len(args) > 0 and args[0] is not None: - playbook = "%s/%s" % (path, args[0]) - rc = try_playbook(playbook) - if rc != 0: - print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc]) - return None - return playbook - else: - fqdn = socket.getfqdn() - hostpb = "%s/%s.yml" % (path, fqdn) - shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0]) - localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK) - errors = [] - for pb in [hostpb, shorthostpb, localpb]: - rc = try_playbook(pb) - if rc == 0: - playbook = pb - break - else: - errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc])) - if playbook is None: - print >>sys.stderr, "\n".join(errors) - return playbook - - -def main(args): - """ Set up and run a local playbook """ - usage = "%prog [options] [playbook.yml]" - parser = utils.SortedOptParser(usage=usage) - parser.add_option('--purge', default=False, action='store_true', - help='purge checkout after playbook run') - parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', - help='only run the playbook if the repository has been updated') - parser.add_option('-s', '--sleep', dest='sleep', default=None, - help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests') - parser.add_option('-f', '--force', dest='force', default=False, - action='store_true', - help='run the playbook even if the repository could ' - 'not be updated') - parser.add_option('-d', '--directory', dest='dest', default=None, - help='directory to checkout repository to') - #parser.add_option('-l', '--live', default=True, action='store_live', - # help='Print the ansible-playbook output while running') - parser.add_option('-U', '--url', dest='url', default=None, - help='URL of the playbook repository') - parser.add_option('-C', '--checkout', dest='checkout', - help='branch/tag/commit to checkout. ' - 'Defaults to behavior of repository module.') - parser.add_option('-i', '--inventory-file', dest='inventory', - help="location of the inventory host file") - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) - parser.add_option('-v', '--verbose', default=False, action="callback", - callback=increment_debug, - help='Pass -vvvv to ansible-playbook') - parser.add_option('-m', '--module-name', dest='module_name', - default=DEFAULT_REPO_TYPE, - help='Module name used to check out repository. ' - 'Default is %s.' % DEFAULT_REPO_TYPE) - parser.add_option('--vault-password-file', dest='vault_password_file', - help="vault password file") - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password') - parser.add_option('-t', '--tags', dest='tags', default=False, - help='only run plays and tasks tagged with these values') - parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', - help='adds the hostkey for the repo url if not already added') - parser.add_option('--key-file', dest='key_file', - help="Pass '-i ' to the SSH arguments used by git.") - options, args = parser.parse_args(args) - - hostname = socket.getfqdn() - if not options.dest: - # use a hostname dependent directory, in case of $HOME on nfs - options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname) - - options.dest = os.path.abspath(options.dest) - - if not options.url: - parser.error("URL for repository not specified, use -h for help") - return 1 - - now = datetime.datetime.now() - print now.strftime("Starting ansible-pull at %F %T") - - # Attempt to use the inventory passed in as an argument - # It might not yet have been downloaded so use localhost if note - if not options.inventory or not os.path.exists(options.inventory): - inv_opts = 'localhost,' - else: - inv_opts = options.inventory - limit_opts = 'localhost:%s:127.0.0.1' % hostname - repo_opts = "name=%s dest=%s" % (options.url, options.dest) - - if VERBOSITY == 0: - base_opts = '-c local --limit "%s"' % limit_opts - elif VERBOSITY > 0: - debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ]) - base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts) - - if options.checkout: - repo_opts += ' version=%s' % options.checkout - - # Only git module is supported - if options.module_name == DEFAULT_REPO_TYPE: - if options.accept_host_key: - repo_opts += ' accept_hostkey=yes' - - if options.key_file: - repo_opts += ' key_file=%s' % options.key_file - - path = utils.plugins.module_finder.find_plugin(options.module_name) - if path is None: - sys.stderr.write("module '%s' not found.\n" % options.module_name) - return 1 - - bin_path = os.path.dirname(os.path.abspath(__file__)) - cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( - bin_path, inv_opts, base_opts, options.module_name, repo_opts - ) - - for ev in options.extra_vars: - cmd += ' -e "%s"' % ev - - if options.sleep: - try: - secs = random.randint(0,int(options.sleep)); - except ValueError: - parser.error("%s is not a number." % options.sleep) - return 1 - - print >>sys.stderr, "Sleeping for %d seconds..." % secs - time.sleep(secs); - - - # RUN THe CHECKOUT COMMAND - rc, out, err = cmd_functions.run_cmd(cmd, live=True) - - if rc != 0: - if options.force: - print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook." - else: - return rc - elif options.ifchanged and '"changed": true' not in out: - print "Repository has not changed, quitting." - return 0 - - playbook = select_playbook(options.dest, args) - - if playbook is None: - print >>sys.stderr, "Could not find a playbook to run." - return 1 - - cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) - if options.vault_password_file: - cmd += " --vault-password-file=%s" % options.vault_password_file - if options.inventory: - cmd += ' -i "%s"' % options.inventory - for ev in options.extra_vars: - cmd += ' -e "%s"' % ev - if options.ask_sudo_pass: - cmd += ' -K' - if options.tags: - cmd += ' -t "%s"' % options.tags - os.chdir(options.dest) - - # RUN THE PLAYBOOK COMMAND - rc, out, err = cmd_functions.run_cmd(cmd, live=True) - - if options.purge: - os.chdir('/') - try: - shutil.rmtree(options.dest) - except Exception, e: - print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e)) - - return rc - -if __name__ == '__main__': - try: - sys.exit(main(sys.argv[1:])) - except KeyboardInterrupt, e: - print >>sys.stderr, "Exit on user request.\n" - sys.exit(1) diff --git a/bin/ansible-pull b/bin/ansible-pull new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-pull @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-vault b/bin/ansible-vault deleted file mode 100755 index 22cfc0e14877af..00000000000000 --- a/bin/ansible-vault +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, James Tanner -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# ansible-vault is a script that encrypts/decrypts YAML files. See -# http://docs.ansible.com/playbooks_vault.html for more details. - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - -import os -import sys -import traceback - -import ansible.constants as C - -from ansible import utils -from ansible import errors -from ansible.utils.vault import VaultEditor - -from optparse import OptionParser - -#------------------------------------------------------------------------------------- -# Utility functions for parsing actions/options -#------------------------------------------------------------------------------------- - -VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") - -def build_option_parser(action): - """ - Builds an option parser object based on the action - the user wants to execute. - """ - - usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS) - epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) - OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(usage=usage, epilog=epilog) - - if not action: - parser.print_help() - sys.exit() - - # options for all actions - #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use") - parser.add_option('--debug', dest='debug', action="store_true", help="debug") - parser.add_option('--vault-password-file', dest='password_file', - help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE) - - # options specific to actions - if action == "create": - parser.set_usage("usage: %prog create [options] file_name") - elif action == "decrypt": - parser.set_usage("usage: %prog decrypt [options] file_name") - elif action == "edit": - parser.set_usage("usage: %prog edit [options] file_name") - elif action == "view": - parser.set_usage("usage: %prog view [options] file_name") - elif action == "encrypt": - parser.set_usage("usage: %prog encrypt [options] file_name") - elif action == "rekey": - parser.set_usage("usage: %prog rekey [options] file_name") - - # done, return the parser - return parser - -def get_action(args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - return arg - return None - -def get_opt(options, k, defval=""): - """ - Returns an option from an Optparse values instance. - """ - try: - data = getattr(options, k) - except: - return defval - if k == "roles_path": - if os.pathsep in data: - data = data.split(os.pathsep)[0] - return data - -#------------------------------------------------------------------------------------- -# Command functions -#------------------------------------------------------------------------------------- - -def execute_create(args, options, parser): - if len(args) > 1: - raise errors.AnsibleError("'create' does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - this_editor = VaultEditor(cipher, password, args[0]) - this_editor.create_file() - -def execute_decrypt(args, options, parser): - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.decrypt_file() - - print "Decryption successful" - -def execute_edit(args, options, parser): - - if len(args) > 1: - raise errors.AnsibleError("edit does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.edit_file() - -def execute_view(args, options, parser): - - if len(args) > 1: - raise errors.AnsibleError("view does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.view_file() - -def execute_encrypt(args, options, parser): - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.encrypt_file() - - print "Encryption successful" - -def execute_rekey(args, options, parser): - - if not options.password_file: - password, __ = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True) - - cipher = None - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.rekey_file(new_password) - - print "Rekey successful" - -#------------------------------------------------------------------------------------- -# MAIN -#------------------------------------------------------------------------------------- - -def main(): - - action = get_action(sys.argv) - parser = build_option_parser(action) - (options, args) = parser.parse_args() - - if not len(args): - raise errors.AnsibleError( - "The '%s' command requires a filename as the first argument" % action - ) - - # execute the desired action - try: - fn = globals()["execute_%s" % action] - fn(args, options, parser) - except Exception, err: - if options.debug: - print traceback.format_exc() - print "ERROR:",err - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/bin/ansible-vault b/bin/ansible-vault new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-vault @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index ba5ca83b7231d1..8637adb54d6c16 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -14,5 +14,9 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -__version__ = '2.0.0' -__author__ = 'Michael DeHaan' + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +__version__ = '2.0' diff --git a/v2/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py similarity index 100% rename from v2/ansible/cli/__init__.py rename to lib/ansible/cli/__init__.py diff --git a/v2/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py similarity index 100% rename from v2/ansible/cli/adhoc.py rename to lib/ansible/cli/adhoc.py diff --git a/v2/ansible/cli/doc.py b/lib/ansible/cli/doc.py similarity index 100% rename from v2/ansible/cli/doc.py rename to lib/ansible/cli/doc.py diff --git a/v2/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py similarity index 100% rename from v2/ansible/cli/galaxy.py rename to lib/ansible/cli/galaxy.py diff --git a/v2/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py similarity index 100% rename from v2/ansible/cli/playbook.py rename to lib/ansible/cli/playbook.py diff --git a/v2/ansible/cli/pull.py b/lib/ansible/cli/pull.py similarity index 100% rename from v2/ansible/cli/pull.py rename to lib/ansible/cli/pull.py diff --git a/v2/ansible/cli/vault.py b/lib/ansible/cli/vault.py similarity index 100% rename from v2/ansible/cli/vault.py rename to lib/ansible/cli/vault.py diff --git a/v2/ansible/compat/__init__.py b/lib/ansible/compat/__init__.py similarity index 100% rename from v2/ansible/compat/__init__.py rename to lib/ansible/compat/__init__.py diff --git a/v2/ansible/compat/tests/__init__.py b/lib/ansible/compat/tests/__init__.py similarity index 100% rename from v2/ansible/compat/tests/__init__.py rename to lib/ansible/compat/tests/__init__.py diff --git a/v2/ansible/compat/tests/mock.py b/lib/ansible/compat/tests/mock.py similarity index 100% rename from v2/ansible/compat/tests/mock.py rename to lib/ansible/compat/tests/mock.py diff --git a/v2/ansible/compat/tests/unittest.py b/lib/ansible/compat/tests/unittest.py similarity index 100% rename from v2/ansible/compat/tests/unittest.py rename to lib/ansible/compat/tests/unittest.py diff --git a/v2/ansible/config/__init__.py b/lib/ansible/config/__init__.py similarity index 100% rename from v2/ansible/config/__init__.py rename to lib/ansible/config/__init__.py diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 089de5b7c5bf15..456beb8bbc40f4 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -15,10 +15,15 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import os import pwd import sys -import ConfigParser + +from six.moves import configparser from string import ascii_letters, digits # copied from utils, avoid circular reference fun :) @@ -35,13 +40,15 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False, ''' return a configuration variable with casting ''' value = _get_config(p, section, key, env_var, default) if boolean: - return mk_boolean(value) - if value and integer: - return int(value) - if value and floating: - return float(value) - if value and islist: - return [x.strip() for x in value.split(',')] + value = mk_boolean(value) + if value: + if integer: + value = int(value) + elif floating: + value = float(value) + elif islist: + if isinstance(value, basestring): + value = [x.strip() for x in value.split(',')] return value def _get_config(p, section, key, env_var, default): @@ -60,7 +67,7 @@ def _get_config(p, section, key, env_var, default): def load_config_file(): ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' - p = ConfigParser.ConfigParser() + p = configparser.ConfigParser() path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: @@ -73,8 +80,8 @@ def load_config_file(): if path is not None and os.path.exists(path): try: p.read(path) - except ConfigParser.Error as e: - print "Error reading config file: \n%s" % e + except configparser.Error as e: + print("Error reading config file: \n{0}".format(e)) sys.exit(1) return p return None @@ -98,7 +105,8 @@ def shell_expand_path(path): DEFAULTS='defaults' # configurable things -DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts'))) +DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) +DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts'))) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') @@ -112,6 +120,7 @@ def shell_expand_path(path): DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) +DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) @@ -122,7 +131,6 @@ def shell_expand_path(path): DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) -DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') @@ -141,7 +149,7 @@ def shell_expand_path(path): BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() -DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None) +DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # need to rethink impementing these 2 DEFAULT_BECOME_EXE = None @@ -156,6 +164,7 @@ def shell_expand_path(path): DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') +DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) @@ -173,8 +182,8 @@ def shell_expand_path(path): DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) -DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) - +RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) +RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') @@ -196,10 +205,16 @@ def shell_expand_path(path): ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) +# galaxy related +DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') +# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated +GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True) + # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things +MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None diff --git a/v2/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py similarity index 100% rename from v2/ansible/errors/__init__.py rename to lib/ansible/errors/__init__.py diff --git a/v2/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py similarity index 100% rename from v2/ansible/errors/yaml_strings.py rename to lib/ansible/errors/yaml_strings.py diff --git a/v2/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py similarity index 100% rename from v2/ansible/executor/__init__.py rename to lib/ansible/executor/__init__.py diff --git a/v2/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py similarity index 100% rename from v2/ansible/executor/connection_info.py rename to lib/ansible/executor/connection_info.py diff --git a/v2/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py similarity index 100% rename from v2/ansible/executor/module_common.py rename to lib/ansible/executor/module_common.py diff --git a/v2/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py similarity index 100% rename from v2/ansible/executor/play_iterator.py rename to lib/ansible/executor/play_iterator.py diff --git a/v2/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py similarity index 100% rename from v2/ansible/executor/playbook_executor.py rename to lib/ansible/executor/playbook_executor.py diff --git a/v2/ansible/executor/process/__init__.py b/lib/ansible/executor/process/__init__.py similarity index 100% rename from v2/ansible/executor/process/__init__.py rename to lib/ansible/executor/process/__init__.py diff --git a/v2/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py similarity index 100% rename from v2/ansible/executor/process/result.py rename to lib/ansible/executor/process/result.py diff --git a/v2/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py similarity index 100% rename from v2/ansible/executor/process/worker.py rename to lib/ansible/executor/process/worker.py diff --git a/v2/ansible/executor/stats.py b/lib/ansible/executor/stats.py similarity index 100% rename from v2/ansible/executor/stats.py rename to lib/ansible/executor/stats.py diff --git a/v2/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py similarity index 100% rename from v2/ansible/executor/task_executor.py rename to lib/ansible/executor/task_executor.py diff --git a/v2/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py similarity index 100% rename from v2/ansible/executor/task_queue_manager.py rename to lib/ansible/executor/task_queue_manager.py diff --git a/v2/ansible/executor/task_queue_manager.py: b/lib/ansible/executor/task_queue_manager.py: similarity index 100% rename from v2/ansible/executor/task_queue_manager.py: rename to lib/ansible/executor/task_queue_manager.py: diff --git a/v2/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py similarity index 100% rename from v2/ansible/executor/task_result.py rename to lib/ansible/executor/task_result.py diff --git a/v2/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py similarity index 100% rename from v2/ansible/galaxy/__init__.py rename to lib/ansible/galaxy/__init__.py diff --git a/v2/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py similarity index 100% rename from v2/ansible/galaxy/api.py rename to lib/ansible/galaxy/api.py diff --git a/v2/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2 similarity index 100% rename from v2/ansible/galaxy/data/metadata_template.j2 rename to lib/ansible/galaxy/data/metadata_template.j2 diff --git a/v2/ansible/galaxy/data/readme b/lib/ansible/galaxy/data/readme similarity index 100% rename from v2/ansible/galaxy/data/readme rename to lib/ansible/galaxy/data/readme diff --git a/v2/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py similarity index 100% rename from v2/ansible/galaxy/role.py rename to lib/ansible/galaxy/role.py diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 2048046d3c1f21..063398f17f9cdf 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -16,36 +16,44 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import fnmatch import os import sys import re +import stat import subprocess -import ansible.constants as C +from ansible import constants as C +from ansible.errors import * + from ansible.inventory.ini import InventoryParser from ansible.inventory.script import InventoryScript from ansible.inventory.dir import InventoryDirectory from ansible.inventory.group import Group from ansible.inventory.host import Host -from ansible import errors -from ansible import utils +from ansible.plugins import vars_loader +from ansible.utils.path import is_executable +from ansible.utils.vars import combine_vars class Inventory(object): """ Host inventory for ansible. """ - __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', - 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', - '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] + #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', + # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', + # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] - def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): + def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): # the host file file, or script path, or list of hosts # if a list, inventory data will NOT be loaded self.host_list = host_list - self._vault_password=vault_password + self._loader = loader + self._variable_manager = variable_manager # caching to avoid repeated calculations, particularly with # external inventory scripts. @@ -97,7 +105,7 @@ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): if os.path.isdir(host_list): # Ensure basedir is inside the directory self.host_list = os.path.join(self.host_list, "") - self.parser = InventoryDirectory(filename=host_list) + self.parser = InventoryDirectory(loader=self._loader, filename=host_list) self.groups = self.parser.groups.values() else: # check to see if the specified file starts with a @@ -113,9 +121,9 @@ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): except: pass - if utils.is_executable(host_list): + if is_executable(host_list): try: - self.parser = InventoryScript(filename=host_list) + self.parser = InventoryScript(loader=self._loader, filename=host_list) self.groups = self.parser.groups.values() except: if not shebang_present: @@ -134,19 +142,23 @@ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): else: raise - utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True) + vars_loader.add_directory(self.basedir(), with_subdir=True) else: raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?") - self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ] + self._vars_plugins = [ x for x in vars_loader.all(self) ] + # FIXME: shouldn't be required, since the group/host vars file + # management will be done in VariableManager # get group vars from group_vars/ files and vars plugins for group in self.groups: - group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password)) + # FIXME: combine_vars + group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) # get host vars from host_vars/ files and vars plugins for host in self.get_hosts(): - host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password)) + # FIXME: combine_vars + host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) def _match(self, str, pattern_str): @@ -192,9 +204,9 @@ def get_hosts(self, pattern="all"): # exclude hosts mentioned in any restriction (ex: failed hosts) if self._restriction is not None: - hosts = [ h for h in hosts if h.name in self._restriction ] + hosts = [ h for h in hosts if h in self._restriction ] if self._also_restriction is not None: - hosts = [ h for h in hosts if h.name in self._also_restriction ] + hosts = [ h for h in hosts if h in self._also_restriction ] return hosts @@ -320,6 +332,8 @@ def _create_implicit_localhost(self, pattern): new_host = Host(pattern) new_host.set_variable("ansible_python_interpreter", sys.executable) new_host.set_variable("ansible_connection", "local") + new_host.ipv4_address = '127.0.0.1' + ungrouped = self.get_group("ungrouped") if ungrouped is None: self.add_group(Group('ungrouped')) @@ -420,7 +434,7 @@ def _get_group_variables(self, groupname, vault_password=None): group = self.get_group(groupname) if group is None: - raise errors.AnsibleError("group not found: %s" % groupname) + raise Exception("group not found: %s" % groupname) vars = {} @@ -428,19 +442,21 @@ def _get_group_variables(self, groupname, vault_password=None): vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')] for updated in vars_results: if updated is not None: - vars = utils.combine_vars(vars, updated) + # FIXME: combine_vars + vars = combine_vars(vars, updated) # Read group_vars/ files - vars = utils.combine_vars(vars, self.get_group_vars(group)) + # FIXME: combine_vars + vars = combine_vars(vars, self.get_group_vars(group)) return vars - def get_variables(self, hostname, update_cached=False, vault_password=None): + def get_vars(self, hostname, update_cached=False, vault_password=None): host = self.get_host(hostname) if not host: - raise errors.AnsibleError("host not found: %s" % hostname) - return host.get_variables() + raise Exception("host not found: %s" % hostname) + return host.get_vars() def get_host_variables(self, hostname, update_cached=False, vault_password=None): @@ -460,22 +476,26 @@ def _get_host_variables(self, hostname, vault_password=None): vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')] for updated in vars_results: if updated is not None: - vars = utils.combine_vars(vars, updated) + # FIXME: combine_vars + vars = combine_vars(vars, updated) # plugin.get_host_vars retrieves just vars for specific host vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')] for updated in vars_results: if updated is not None: - vars = utils.combine_vars(vars, updated) + # FIXME: combine_vars + vars = combine_vars(vars, updated) # still need to check InventoryParser per host vars # which actually means InventoryScript per host, # which is not performant if self.parser is not None: - vars = utils.combine_vars(vars, self.parser.get_host_variables(host)) + # FIXME: combine_vars + vars = combine_vars(vars, self.parser.get_host_variables(host)) # Read host_vars/ files - vars = utils.combine_vars(vars, self.get_host_vars(host)) + # FIXME: combine_vars + vars = combine_vars(vars, self.get_host_vars(host)) return vars @@ -490,7 +510,7 @@ def list_hosts(self, pattern="all"): """ return a list of hostnames for a pattern """ - result = [ h.name for h in self.get_hosts(pattern) ] + result = [ h for h in self.get_hosts(pattern) ] if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]: result = [pattern] return result @@ -498,11 +518,7 @@ def list_hosts(self, pattern="all"): def list_groups(self): return sorted([ g.name for g in self.groups ], key=lambda x: x) - # TODO: remove this function - def get_restriction(self): - return self._restriction - - def restrict_to(self, restriction): + def restrict_to_hosts(self, restriction): """ Restrict list operations to the hosts given in restriction. This is used to exclude failed hosts in main playbook code, don't use this for other @@ -544,7 +560,7 @@ def subset(self, subset_pattern): results.append(x) self._subset = results - def lift_restriction(self): + def remove_restriction(self): """ Do not restrict list operations """ self._restriction = None @@ -588,10 +604,12 @@ def set_playbook_basedir(self, dir): self._playbook_basedir = dir # get group vars from group_vars/ files for group in self.groups: - group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) + # FIXME: combine_vars + group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) # get host vars from host_vars/ files for host in self.get_hosts(): - host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) + # FIXME: combine_vars + host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) # invalidate cache self._vars_per_host = {} self._vars_per_group = {} @@ -639,15 +657,15 @@ def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False): if _basedir == self._playbook_basedir and scan_pass != 1: continue + # FIXME: these should go to VariableManager if group and host is None: # load vars in dir/group_vars/name_of_group base_path = os.path.join(basedir, "group_vars/%s" % group.name) - results = utils.load_vars(base_path, results, vault_password=self._vault_password) - + self._variable_manager.add_group_vars_file(base_path, self._loader) elif host and group is None: # same for hostvars in dir/host_vars/name_of_host base_path = os.path.join(basedir, "host_vars/%s" % host.name) - results = utils.load_vars(base_path, results, vault_password=self._vault_password) + self._variable_manager.add_host_vars_file(base_path, self._loader) # all done, results is a dictionary of variables for this particular host. return results diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py index 9ac23fff89911f..735f32d62c35a6 100644 --- a/lib/ansible/inventory/dir.py +++ b/lib/ansible/inventory/dir.py @@ -17,20 +17,25 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os -import ansible.constants as C + +from ansible import constants as C +from ansible.errors import AnsibleError + from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.ini import InventoryParser from ansible.inventory.script import InventoryScript -from ansible import utils -from ansible import errors +from ansible.utils.path import is_executable +from ansible.utils.vars import combine_vars class InventoryDirectory(object): ''' Host inventory parser for ansible using a directory of inventories. ''' - def __init__(self, filename=C.DEFAULT_HOST_LIST): + def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): self.names = os.listdir(filename) self.names.sort() self.directory = filename @@ -38,10 +43,12 @@ def __init__(self, filename=C.DEFAULT_HOST_LIST): self.hosts = {} self.groups = {} + self._loader = loader + for i in self.names: # Skip files that end with certain extensions or characters - if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")): + if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")): continue # Skip hidden files if i.startswith('.') and not i.startswith('./'): @@ -51,9 +58,9 @@ def __init__(self, filename=C.DEFAULT_HOST_LIST): continue fullpath = os.path.join(self.directory, i) if os.path.isdir(fullpath): - parser = InventoryDirectory(filename=fullpath) - elif utils.is_executable(fullpath): - parser = InventoryScript(filename=fullpath) + parser = InventoryDirectory(loader=loader, filename=fullpath) + elif is_executable(fullpath): + parser = InventoryScript(loader=loader, filename=fullpath) else: parser = InventoryParser(filename=fullpath) self.parsers.append(parser) @@ -153,7 +160,7 @@ def _merge_groups(self, group, newgroup): # name if group.name != newgroup.name: - raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) + raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) # depth group.depth = max([group.depth, newgroup.depth]) @@ -196,14 +203,14 @@ def _merge_groups(self, group, newgroup): self.groups[newparent.name].add_child_group(group) # variables - group.vars = utils.combine_vars(group.vars, newgroup.vars) + group.vars = combine_vars(group.vars, newgroup.vars) def _merge_hosts(self,host, newhost): """ Merge all of instance newhost into host """ # name if host.name != newhost.name: - raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) + raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) # group membership relation for newgroup in newhost.groups: @@ -218,7 +225,7 @@ def _merge_hosts(self,host, newhost): self.groups[newgroup.name].add_host(host) # variables - host.vars = utils.combine_vars(host.vars, newhost.vars) + host.vars = combine_vars(host.vars, newhost.vars) def get_host_variables(self, host): """ Gets additional host variables from all inventories """ diff --git a/lib/ansible/inventory/expand_hosts.py b/lib/ansible/inventory/expand_hosts.py index f1297409355c22..b5a957c53fe89b 100644 --- a/lib/ansible/inventory/expand_hosts.py +++ b/lib/ansible/inventory/expand_hosts.py @@ -30,6 +30,9 @@ Note that when beg is specified with left zero padding, then the length of end must be the same as that of beg, else an exception is raised. ''' +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + import string from ansible import errors diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py index 262558e69c87e8..6525e69b466bd1 100644 --- a/lib/ansible/inventory/group.py +++ b/lib/ansible/inventory/group.py @@ -14,11 +14,15 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type -class Group(object): +from ansible.utils.debug import debug + +class Group: ''' a group of ansible hosts ''' - __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] + #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] def __init__(self, name=None): @@ -29,9 +33,49 @@ def __init__(self, name=None): self.child_groups = [] self.parent_groups = [] self._hosts_cache = None + #self.clear_hosts_cache() - if self.name is None: - raise Exception("group name is required") + #if self.name is None: + # raise Exception("group name is required") + + def __repr__(self): + return self.get_name() + + def __getstate__(self): + return self.serialize() + + def __setstate__(self, data): + return self.deserialize(data) + + def serialize(self): + parent_groups = [] + for parent in self.parent_groups: + parent_groups.append(parent.serialize()) + + result = dict( + name=self.name, + vars=self.vars.copy(), + parent_groups=parent_groups, + depth=self.depth, + ) + + debug("serializing group, result is: %s" % result) + return result + + def deserialize(self, data): + debug("deserializing group, data is: %s" % data) + self.__init__() + self.name = data.get('name') + self.vars = data.get('vars', dict()) + + parent_groups = data.get('parent_groups', []) + for parent_data in parent_groups: + g = Group() + g.deserialize(parent_data) + self.parent_groups.append(g) + + def get_name(self): + return self.name def add_child_group(self, group): @@ -100,7 +144,7 @@ def _get_hosts(self): hosts.append(mine) return hosts - def get_variables(self): + def get_vars(self): return self.vars.copy() def _get_ancestors(self): diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index d4dc20fa462588..29d6afd991208a 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -15,24 +15,88 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import ansible.constants as C -from ansible import utils +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type -class Host(object): +from ansible import constants as C +from ansible.inventory.group import Group +from ansible.utils.vars import combine_vars + +__all__ = ['Host'] + +class Host: ''' a single ansible host ''' - __slots__ = [ 'name', 'vars', 'groups' ] + #__slots__ = [ 'name', 'vars', 'groups' ] + + def __getstate__(self): + return self.serialize() + + def __setstate__(self, data): + return self.deserialize(data) + + def __eq__(self, other): + return self.name == other.name + + def serialize(self): + groups = [] + for group in self.groups: + groups.append(group.serialize()) + + return dict( + name=self.name, + vars=self.vars.copy(), + ipv4_address=self.ipv4_address, + ipv6_address=self.ipv6_address, + port=self.port, + gathered_facts=self._gathered_facts, + groups=groups, + ) + + def deserialize(self, data): + self.__init__() + + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.ipv4_address = data.get('ipv4_address', '') + self.ipv6_address = data.get('ipv6_address', '') + self.port = data.get('port') + + groups = data.get('groups', []) + for group_data in groups: + g = Group() + g.deserialize(group_data) + self.groups.append(g) def __init__(self, name=None, port=None): self.name = name self.vars = {} self.groups = [] + + self.ipv4_address = name + self.ipv6_address = name + if port and port != C.DEFAULT_REMOTE_PORT: - self.set_variable('ansible_ssh_port', int(port)) + self.port = int(port) + else: + self.port = C.DEFAULT_REMOTE_PORT + + self._gathered_facts = False - if self.name is None: - raise Exception("host name is required") + def __repr__(self): + return self.get_name() + + def get_name(self): + return self.name + + @property + def gathered_facts(self): + return self._gathered_facts + + def set_gathered_facts(self, gathered): + self._gathered_facts = gathered def add_group(self, group): @@ -52,16 +116,15 @@ def get_groups(self): groups[a.name] = a return groups.values() - def get_variables(self): + def get_vars(self): results = {} groups = self.get_groups() for group in sorted(groups, key=lambda g: g.depth): - results = utils.combine_vars(results, group.get_variables()) - results = utils.combine_vars(results, self.vars) + results = combine_vars(results, group.get_vars()) + results = combine_vars(results, self.vars) results['inventory_hostname'] = self.name results['inventory_hostname_short'] = self.name.split('.')[0] results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) return results - diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index bd9a98e7f86249..e004ee8bb7584d 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -16,17 +16,20 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type -import ansible.constants as C +import ast +import shlex +import re + +from ansible import constants as C +from ansible.errors import * from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range -from ansible import errors -from ansible import utils -import shlex -import re -import ast +from ansible.utils.unicode import to_unicode class InventoryParser(object): """ @@ -34,9 +37,8 @@ class InventoryParser(object): """ def __init__(self, filename=C.DEFAULT_HOST_LIST): - + self.filename = filename with open(filename) as fh: - self.filename = filename self.lines = fh.readlines() self.groups = {} self.hosts = {} @@ -54,10 +56,7 @@ def _parse(self): def _parse_value(v): if "#" not in v: try: - ret = ast.literal_eval(v) - if not isinstance(ret, float): - # Do not trim floats. Eg: "1.20" to 1.2 - return ret + v = ast.literal_eval(v) # Using explicit exceptions. # Likely a string that literal_eval does not like. We wil then just set it. except ValueError: @@ -66,7 +65,7 @@ def _parse_value(v): except SyntaxError: # Is this a hash with an equals at the end? pass - return v + return to_unicode(v, nonstring='passthru', errors='strict') # [webservers] # alpha @@ -91,8 +90,8 @@ def _parse_base_groups(self): self.groups = dict(all=all, ungrouped=ungrouped) active_group_name = 'ungrouped' - for lineno in range(len(self.lines)): - line = utils.before_comment(self.lines[lineno]).strip() + for line in self.lines: + line = self._before_comment(line).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if ":vars" in line or ":children" in line: @@ -146,8 +145,11 @@ def _parse_base_groups(self): try: (k,v) = t.split("=", 1) except ValueError, e: - raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e))) - host.set_variable(k, self._parse_value(v)) + raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e))) + if k == 'ansible_ssh_host': + host.ipv4_address = self._parse_value(v) + else: + host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) # [southeast:children] @@ -157,8 +159,8 @@ def _parse_base_groups(self): def _parse_group_children(self): group = None - for lineno in range(len(self.lines)): - line = self.lines[lineno].strip() + for line in self.lines: + line = line.strip() if line is None or line == '': continue if line.startswith("[") and ":children]" in line: @@ -173,7 +175,7 @@ def _parse_group_children(self): elif group: kid_group = self.groups.get(line, None) if kid_group is None: - raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line)) + raise AnsibleError("child group is not defined: (%s)" % line) else: group.add_child_group(kid_group) @@ -184,13 +186,13 @@ def _parse_group_children(self): def _parse_group_variables(self): group = None - for lineno in range(len(self.lines)): - line = self.lines[lineno].strip() + for line in self.lines: + line = line.strip() if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: - raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line)) + raise AnsibleError("can't add vars to undefined group: %s" % line) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): @@ -199,10 +201,18 @@ def _parse_group_variables(self): pass elif group: if "=" not in line: - raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1)) + raise AnsibleError("variables assigned to group must be in key=value form") else: (k, v) = [e.strip() for e in line.split("=", 1)] group.set_variable(k, self._parse_value(v)) def get_host_variables(self, host): return {} + + def _before_comment(self, msg): + ''' what's the part of a string before a comment? ''' + msg = msg.replace("\#","**NOT_A_COMMENT**") + msg = msg.split("#")[0] + msg = msg.replace("**NOT_A_COMMENT**","#") + return msg + diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index b83cb9bcc7a732..9675d70f690910 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -16,22 +16,26 @@ # along with Ansible. If not, see . ############################################# +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import os import subprocess -import ansible.constants as C +import sys + +from ansible import constants as C +from ansible.errors import * from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_bytes_to_unicode -from ansible import utils -from ansible import errors -import sys -class InventoryScript(object): +class InventoryScript: ''' Host inventory parser for ansible using external inventory scripts. ''' - def __init__(self, filename=C.DEFAULT_HOST_LIST): + def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): + + self._loader = loader # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working @@ -41,11 +45,11 @@ def __init__(self, filename=C.DEFAULT_HOST_LIST): try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: - raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) + raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() if sp.returncode != 0: - raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) + raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) self.data = stdout # see comment about _meta below @@ -58,7 +62,7 @@ def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted - self.raw = utils.parse_json(self.data) + self.raw = self._loader.load(self.data) self.raw = json_dict_bytes_to_unicode(self.raw) all = Group('all') @@ -68,7 +72,7 @@ def _parse(self, err): if 'failed' in self.raw: sys.stderr.write(err + "\n") - raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) + raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): @@ -92,12 +96,12 @@ def _parse(self, err): if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars - elif not any(k in data for k in ('hosts','vars','children')): + elif not any(k in data for k in ('hosts','vars')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): - raise errors.AnsibleError("You defined a group \"%s\" with bad " + raise AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: @@ -108,7 +112,7 @@ def _parse(self, err): if 'vars' in data: if not isinstance(data['vars'], dict): - raise errors.AnsibleError("You defined a group \"%s\" with bad " + raise AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in data['vars'].iteritems(): @@ -143,12 +147,12 @@ def get_host_variables(self, host): try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: - raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) + raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() if out.strip() == '': return dict() try: - return json_dict_bytes_to_unicode(utils.parse_json(out)) + return json_dict_bytes_to_unicode(self._loader.load(out)) except ValueError: - raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) + raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) diff --git a/lib/ansible/inventory/vars_plugins/noop.py b/lib/ansible/inventory/vars_plugins/noop.py index 5d4b4b6658c985..8f0c98cad56d35 100644 --- a/lib/ansible/inventory/vars_plugins/noop.py +++ b/lib/ansible/inventory/vars_plugins/noop.py @@ -15,6 +15,8 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type class VarsModule(object): diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 54a1a9cfff7f88..8f9b03f882d1a2 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -43,7 +43,7 @@ # can be inserted in any module source automatically by including # #<> on a blank line by itself inside # of an ansible module. The source of this common code lives -# in lib/ansible/module_common.py +# in ansible/executor/module_common.py import locale import os @@ -65,6 +65,7 @@ import platform import errno import tempfile +from itertools import imap, repeat try: import json @@ -234,7 +235,7 @@ def load_platform_subclass(cls, *args, **kwargs): return super(cls, subclass).__new__(subclass) -def json_dict_unicode_to_bytes(d): +def json_dict_unicode_to_bytes(d, encoding='utf-8'): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, @@ -242,17 +243,17 @@ def json_dict_unicode_to_bytes(d): ''' if isinstance(d, unicode): - return d.encode('utf-8') + return d.encode(encoding) elif isinstance(d, dict): - return dict(map(json_dict_unicode_to_bytes, d.iteritems())) + return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding))) elif isinstance(d, list): - return list(map(json_dict_unicode_to_bytes, d)) + return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding))) elif isinstance(d, tuple): - return tuple(map(json_dict_unicode_to_bytes, d)) + return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding))) else: return d -def json_dict_bytes_to_unicode(d): +def json_dict_bytes_to_unicode(d, encoding='utf-8'): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, @@ -260,13 +261,13 @@ def json_dict_bytes_to_unicode(d): ''' if isinstance(d, str): - return unicode(d, 'utf-8') + return unicode(d, encoding) elif isinstance(d, dict): - return dict(map(json_dict_bytes_to_unicode, d.iteritems())) + return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding))) elif isinstance(d, list): - return list(map(json_dict_bytes_to_unicode, d)) + return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding))) elif isinstance(d, tuple): - return tuple(map(json_dict_bytes_to_unicode, d)) + return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding))) else: return d @@ -359,9 +360,9 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, # reset to LANG=C if it's an invalid/unavailable locale self._check_locale() - (self.params, self.args) = self._load_params() + self.params = self._load_params() - self._legal_inputs = ['CHECKMODE', 'NO_LOG'] + self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log'] self.aliases = self._handle_aliases() @@ -888,7 +889,7 @@ def _handle_aliases(self): def _check_for_check_mode(self): for (k,v) in self.params.iteritems(): - if k == 'CHECKMODE': + if k == '_ansible_check_mode': if not self.supports_check_mode: self.exit_json(skipped=True, msg="remote module does not support check mode") if self.supports_check_mode: @@ -896,13 +897,13 @@ def _check_for_check_mode(self): def _check_for_no_log(self): for (k,v) in self.params.iteritems(): - if k == 'NO_LOG': + if k == '_ansible_no_log': self.no_log = self.boolean(v) def _check_invalid_arguments(self): for (k,v) in self.params.iteritems(): # these should be in legal inputs already - #if k in ('CHECKMODE', 'NO_LOG'): + #if k in ('_ansible_check_mode', '_ansible_no_log'): # continue if k not in self._legal_inputs: self.fail_json(msg="unsupported parameter for module: %s" % k) @@ -1075,20 +1076,11 @@ def _set_defaults(self, pre=True): def _load_params(self): ''' read the input and return a dictionary and the arguments string ''' - args = MODULE_ARGS - items = shlex.split(args) - params = {} - for x in items: - try: - (k, v) = x.split("=",1) - except Exception, e: - self.fail_json(msg="this module requires key=value arguments (%s)" % (items)) - if k in params: - self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v)) - params[k] = v - params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - params2.update(params) - return (params2, args) + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + if params is None: + params = dict() + return params + def _log_invocation(self): ''' log that ansible ran the module ''' @@ -1209,13 +1201,17 @@ def boolean(self, arg): self.fail_json(msg='Boolean %s not in either boolean list' % arg) def jsonify(self, data): - for encoding in ("utf-8", "latin-1", "unicode_escape"): + for encoding in ("utf-8", "latin-1"): try: return json.dumps(data, encoding=encoding) - # Old systems using simplejson module does not support encoding keyword. - except TypeError, e: - return json.dumps(data) - except UnicodeDecodeError, e: + # Old systems using old simplejson module does not support encoding keyword. + except TypeError: + try: + new_data = json_dict_bytes_to_unicode(data, encoding=encoding) + except UnicodeDecodeError: + continue + return json.dumps(new_data) + except UnicodeDecodeError: continue self.fail_json(msg='Invalid unicode encoding encountered') @@ -1452,7 +1448,7 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat msg = None st_in = None - # Set a temporart env path if a prefix is passed + # Set a temporary env path if a prefix is passed env=os.environ if path_prefix: env['PATH']="%s:%s" % (path_prefix, env['PATH']) diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index ee7d3ddeca4ba8..57d2c1b101caa7 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -142,14 +142,14 @@ Function ConvertTo-Bool return } -# Helper function to calculate a hash of a file in a way which powershell 3 +# Helper function to calculate md5 of a file in a way which powershell 3 # and above can handle: -Function Get-FileChecksum($path) +Function Get-FileMd5($path) { $hash = "" If (Test-Path -PathType Leaf $path) { - $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; + $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py index e69de29bb2d1d6..ae8ccff5952585 100644 --- a/lib/ansible/modules/__init__.py +++ b/lib/ansible/modules/__init__.py @@ -0,0 +1,20 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core deleted file mode 160000 index 9028e9d4be8a3d..00000000000000 --- a/lib/ansible/modules/core +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras deleted file mode 160000 index dd80fa221ce0ad..00000000000000 --- a/lib/ansible/modules/extras +++ /dev/null @@ -1 +0,0 @@ -Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc diff --git a/v2/ansible/new_inventory/__init__.py b/lib/ansible/new_inventory/__init__.py similarity index 100% rename from v2/ansible/new_inventory/__init__.py rename to lib/ansible/new_inventory/__init__.py diff --git a/v2/ansible/new_inventory/group.py b/lib/ansible/new_inventory/group.py similarity index 100% rename from v2/ansible/new_inventory/group.py rename to lib/ansible/new_inventory/group.py diff --git a/v2/ansible/new_inventory/host.py b/lib/ansible/new_inventory/host.py similarity index 100% rename from v2/ansible/new_inventory/host.py rename to lib/ansible/new_inventory/host.py diff --git a/v2/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py similarity index 100% rename from v2/ansible/parsing/__init__.py rename to lib/ansible/parsing/__init__.py diff --git a/v2/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py similarity index 100% rename from v2/ansible/parsing/mod_args.py rename to lib/ansible/parsing/mod_args.py diff --git a/v2/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py similarity index 100% rename from v2/ansible/parsing/splitter.py rename to lib/ansible/parsing/splitter.py diff --git a/v2/ansible/parsing/utils/__init__.py b/lib/ansible/parsing/utils/__init__.py similarity index 100% rename from v2/ansible/parsing/utils/__init__.py rename to lib/ansible/parsing/utils/__init__.py diff --git a/v2/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py similarity index 100% rename from v2/ansible/parsing/utils/jsonify.py rename to lib/ansible/parsing/utils/jsonify.py diff --git a/v2/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py similarity index 100% rename from v2/ansible/parsing/vault/__init__.py rename to lib/ansible/parsing/vault/__init__.py diff --git a/v2/ansible/parsing/yaml/__init__.py b/lib/ansible/parsing/yaml/__init__.py similarity index 100% rename from v2/ansible/parsing/yaml/__init__.py rename to lib/ansible/parsing/yaml/__init__.py diff --git a/v2/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py similarity index 100% rename from v2/ansible/parsing/yaml/constructor.py rename to lib/ansible/parsing/yaml/constructor.py diff --git a/v2/ansible/parsing/yaml/loader.py b/lib/ansible/parsing/yaml/loader.py similarity index 100% rename from v2/ansible/parsing/yaml/loader.py rename to lib/ansible/parsing/yaml/loader.py diff --git a/v2/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py similarity index 100% rename from v2/ansible/parsing/yaml/objects.py rename to lib/ansible/parsing/yaml/objects.py diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 24ba2d3c6e0c06..40e6638f23921e 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -15,860 +15,71 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import ansible.inventory -import ansible.constants as C -import ansible.runner -from ansible.utils.template import template -from ansible import utils -from ansible import errors -from ansible.module_utils.splitter import split_args, unquote -import ansible.callbacks -import ansible.cache -import os -import shlex -import collections -from play import Play -import StringIO -import pipes - -# the setup cache stores all variables about a host -# gathered during the setup step, while the vars cache -# holds all other variables about a host -SETUP_CACHE = ansible.cache.FactCache() -VARS_CACHE = collections.defaultdict(dict) -RESERVED_TAGS = ['all','tagged','untagged','always'] - - -class PlayBook(object): - ''' - runs an ansible playbook, given as a datastructure or YAML filename. - A playbook is a deployment, config management, or automation based - set of commands to run in series. - - multiple plays/tasks do not execute simultaneously, but tasks in each - pattern do execute in parallel (according to the number of forks - requested) among the hosts they address - ''' - - # ***************************************************** - - def __init__(self, - playbook = None, - host_list = C.DEFAULT_HOST_LIST, - module_path = None, - forks = C.DEFAULT_FORKS, - timeout = C.DEFAULT_TIMEOUT, - remote_user = C.DEFAULT_REMOTE_USER, - remote_pass = C.DEFAULT_REMOTE_PASS, - remote_port = None, - transport = C.DEFAULT_TRANSPORT, - private_key_file = C.DEFAULT_PRIVATE_KEY_FILE, - callbacks = None, - runner_callbacks = None, - stats = None, - extra_vars = None, - only_tags = None, - skip_tags = None, - subset = C.DEFAULT_SUBSET, - inventory = None, - check = False, - diff = False, - any_errors_fatal = False, - vault_password = False, - force_handlers = False, - # privilege escalation - become = C.DEFAULT_BECOME, - become_method = C.DEFAULT_BECOME_METHOD, - become_user = C.DEFAULT_BECOME_USER, - become_pass = None, - ): - - """ - playbook: path to a playbook file - host_list: path to a file like /etc/ansible/hosts - module_path: path to ansible modules, like /usr/share/ansible/ - forks: desired level of parallelism - timeout: connection timeout - remote_user: run as this user if not specified in a particular play - remote_pass: use this remote password (for all plays) vs using SSH keys - remote_port: default remote port to use if not specified with the host or play - transport: how to connect to hosts that don't specify a transport (local, paramiko, etc) - callbacks output callbacks for the playbook - runner_callbacks: more callbacks, this time for the runner API - stats: holds aggregrate data about events occurring to each host - inventory: can be specified instead of host_list to use a pre-existing inventory object - check: don't change anything, just try to detect some potential changes - any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed - force_handlers: continue to notify and run handlers even if a task fails - """ - - self.SETUP_CACHE = SETUP_CACHE - self.VARS_CACHE = VARS_CACHE - - arguments = [] - if playbook is None: - arguments.append('playbook') - if callbacks is None: - arguments.append('callbacks') - if runner_callbacks is None: - arguments.append('runner_callbacks') - if stats is None: - arguments.append('stats') - if arguments: - raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments)) - - if extra_vars is None: - extra_vars = {} - if only_tags is None: - only_tags = [ 'all' ] - if skip_tags is None: - skip_tags = [] - - self.check = check - self.diff = diff - self.module_path = module_path - self.forks = forks - self.timeout = timeout - self.remote_user = remote_user - self.remote_pass = remote_pass - self.remote_port = remote_port - self.transport = transport - self.callbacks = callbacks - self.runner_callbacks = runner_callbacks - self.stats = stats - self.extra_vars = extra_vars - self.global_vars = {} - self.private_key_file = private_key_file - self.only_tags = only_tags - self.skip_tags = skip_tags - self.any_errors_fatal = any_errors_fatal - self.vault_password = vault_password - self.force_handlers = force_handlers - - self.become = become - self.become_method = become_method - self.become_user = become_user - self.become_pass = become_pass - - self.callbacks.playbook = self - self.runner_callbacks.playbook = self - - if inventory is None: - self.inventory = ansible.inventory.Inventory(host_list) - self.inventory.subset(subset) - else: - self.inventory = inventory - - if self.module_path is not None: - utils.plugins.module_finder.add_directory(self.module_path) - - self.basedir = os.path.dirname(playbook) or '.' - utils.plugins.push_basedir(self.basedir) - - # let inventory know the playbook basedir so it can load more vars - self.inventory.set_playbook_basedir(self.basedir) - - vars = extra_vars.copy() - vars['playbook_dir'] = os.path.abspath(self.basedir) - if self.inventory.basedir() is not None: - vars['inventory_dir'] = self.inventory.basedir() - - if self.inventory.src() is not None: - vars['inventory_file'] = self.inventory.src() - - self.filename = playbook - (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars) - ansible.callbacks.load_callback_plugins() - ansible.callbacks.set_playbook(self.callbacks, self) - - self._ansible_version = utils.version_info(gitinfo=True) - - # ***************************************************** - - def _get_playbook_vars(self, play_ds, existing_vars): - ''' - Gets the vars specified with the play and blends them - with any existing vars that have already been read in - ''' - new_vars = existing_vars.copy() - if 'vars' in play_ds: - if isinstance(play_ds['vars'], dict): - new_vars.update(play_ds['vars']) - elif isinstance(play_ds['vars'], list): - for v in play_ds['vars']: - new_vars.update(v) - return new_vars - - # ***************************************************** - - def _get_include_info(self, play_ds, basedir, existing_vars={}): - ''' - Gets any key=value pairs specified with the included file - name and returns the merged vars along with the path - ''' - new_vars = existing_vars.copy() - tokens = split_args(play_ds.get('include', '')) - for t in tokens[1:]: - try: - (k,v) = unquote(t).split("=", 1) - new_vars[k] = template(basedir, v, new_vars) - except ValueError, e: - raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t) - - return (new_vars, unquote(tokens[0])) - - # ***************************************************** - - def _get_playbook_vars_files(self, play_ds, existing_vars_files): - new_vars_files = list(existing_vars_files) - if 'vars_files' in play_ds: - new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files']) - return new_vars_files - - # ***************************************************** - - def _extend_play_vars(self, play, vars={}): - ''' - Extends the given play's variables with the additional specified vars. - ''' - - if 'vars' not in play or not play['vars']: - # someone left out or put an empty "vars:" entry in their playbook - return vars.copy() - - play_vars = None - if isinstance(play['vars'], dict): - play_vars = play['vars'].copy() - play_vars.update(vars) - elif isinstance(play['vars'], list): - # nobody should really do this, but handle vars: a=1 b=2 - play_vars = play['vars'][:] - play_vars.extend([{k:v} for k,v in vars.iteritems()]) - - return play_vars - - # ***************************************************** - - def _load_playbook_from_file(self, path, vars={}, vars_files=[]): - ''' - run top level error checking on playbooks and allow them to include other playbooks. - ''' - - playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password) - accumulated_plays = [] - play_basedirs = [] - - if type(playbook_data) != list: - raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data)) - - basedir = os.path.dirname(path) or '.' - utils.plugins.push_basedir(basedir) - for play in playbook_data: - if type(play) != dict: - raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play) - - if 'include' in play: - # a playbook (list of plays) decided to include some other list of plays - # from another file. The result is a flat list of plays in the end. - - play_vars = self._get_playbook_vars(play, vars) - play_vars_files = self._get_playbook_vars_files(play, vars_files) - inc_vars, inc_path = self._get_include_info(play, basedir, play_vars) - play_vars.update(inc_vars) - - included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars)) - (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files) - for p in plays: - # support for parameterized play includes works by passing - # those variables along to the subservient play - p['vars'] = self._extend_play_vars(p, play_vars) - # now add in the vars_files - p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files) - - accumulated_plays.extend(plays) - play_basedirs.extend(basedirs) - - else: - - # this is a normal (non-included play) - accumulated_plays.append(play) - play_basedirs.append(basedir) - - return (accumulated_plays, play_basedirs) - - # ***************************************************** - - def run(self): - ''' run all patterns in the playbook ''' - plays = [] - matched_tags_all = set() - unmatched_tags_all = set() - - # loop through all patterns and run them - self.callbacks.on_start() - for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs): - play = Play(self, play_ds, play_basedir, vault_password=self.vault_password) - assert play is not None - - matched_tags, unmatched_tags = play.compare_tags(self.only_tags) - - matched_tags_all = matched_tags_all | matched_tags - unmatched_tags_all = unmatched_tags_all | unmatched_tags - - # Remove tasks we wish to skip - matched_tags = matched_tags - set(self.skip_tags) - - # if we have matched_tags, the play must be run. - # if the play contains no tasks, assume we just want to gather facts - # in this case there are actually 3 meta tasks (handler flushes) not 0 - # tasks, so that's why there's a check against 3 - if (len(matched_tags) > 0 or len(play.tasks()) == 3): - plays.append(play) - - # if the playbook is invoked with --tags or --skip-tags that don't - # exist at all in the playbooks then we need to raise an error so that - # the user can correct the arguments. - unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) - - (matched_tags_all | unmatched_tags_all)) - - for t in RESERVED_TAGS: - unknown_tags.discard(t) - - if len(unknown_tags) > 0: - for t in RESERVED_TAGS: - unmatched_tags_all.discard(t) - msg = 'tag(s) not found in playbook: %s. possible values: %s' - unknown = ','.join(sorted(unknown_tags)) - unmatched = ','.join(sorted(unmatched_tags_all)) - raise errors.AnsibleError(msg % (unknown, unmatched)) - - for play in plays: - ansible.callbacks.set_play(self.callbacks, play) - ansible.callbacks.set_play(self.runner_callbacks, play) - if not self._run_play(play): - break - - ansible.callbacks.set_play(self.callbacks, None) - ansible.callbacks.set_play(self.runner_callbacks, None) - - # summarize the results - results = {} - for host in self.stats.processed.keys(): - results[host] = self.stats.summarize(host) - return results - - # ***************************************************** - - def _async_poll(self, poller, async_seconds, async_poll_interval): - ''' launch an async job, if poll_interval is set, wait for completion ''' - - results = poller.wait(async_seconds, async_poll_interval) - - # mark any hosts that are still listed as started as failed - # since these likely got killed by async_wrapper - for host in poller.hosts_to_poll: - reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' } - self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id']) - results['contacted'][host] = reason - - return results - - # ***************************************************** - - def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False): - ''' returns a list of hosts that haven't failed and aren't dark ''' - - return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)] - - # ***************************************************** - - def _run_task_internal(self, task, include_failed=False): - ''' run a particular module step in a playbook ''' - - hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed) - self.inventory.restrict_to(hosts) +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type - runner = ansible.runner.Runner( - pattern=task.play.hosts, - inventory=self.inventory, - module_name=task.module_name, - module_args=task.module_args, - forks=self.forks, - remote_pass=self.remote_pass, - module_path=self.module_path, - timeout=self.timeout, - remote_user=task.remote_user, - remote_port=task.play.remote_port, - module_vars=task.module_vars, - play_vars=task.play_vars, - play_file_vars=task.play_file_vars, - role_vars=task.role_vars, - role_params=task.role_params, - default_vars=task.default_vars, - extra_vars=self.extra_vars, - private_key_file=self.private_key_file, - setup_cache=self.SETUP_CACHE, - vars_cache=self.VARS_CACHE, - basedir=task.play.basedir, - conditional=task.when, - callbacks=self.runner_callbacks, - transport=task.transport, - is_playbook=True, - check=self.check, - diff=self.diff, - environment=task.environment, - complex_args=task.args, - accelerate=task.play.accelerate, - accelerate_port=task.play.accelerate_port, - accelerate_ipv6=task.play.accelerate_ipv6, - error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, - vault_pass = self.vault_password, - run_hosts=hosts, - no_log=task.no_log, - run_once=task.run_once, - become=task.become, - become_method=task.become_method, - become_user=task.become_user, - become_pass=task.become_pass, - ) - - runner.module_vars.update({'play_hosts': hosts}) - runner.module_vars.update({'ansible_version': self._ansible_version}) - - if task.async_seconds == 0: - results = runner.run() - else: - results, poller = runner.run_async(task.async_seconds) - self.stats.compute(results) - if task.async_poll_interval > 0: - # if not polling, playbook requested fire and forget, so don't poll - results = self._async_poll(poller, task.async_seconds, task.async_poll_interval) - else: - for (host, res) in results.get('contacted', {}).iteritems(): - self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id']) - - contacted = results.get('contacted',{}) - dark = results.get('dark', {}) - - self.inventory.lift_restriction() - - if len(contacted.keys()) == 0 and len(dark.keys()) == 0: - return None - - return results - - # ***************************************************** - - def _run_task(self, play, task, is_handler): - ''' run a single task in the playbook and recursively run any subtasks. ''' - - ansible.callbacks.set_task(self.callbacks, task) - ansible.callbacks.set_task(self.runner_callbacks, task) - - if task.role_name: - name = '%s | %s' % (task.role_name, task.name) - else: - name = task.name - - try: - # v1 HACK: we don't have enough information to template many names - # at this point. Rather than making this work for all cases in - # v1, just make this degrade gracefully. Will fix in v2 - name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False) - except: - pass - - self.callbacks.on_task_start(name, is_handler) - if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task: - ansible.callbacks.set_task(self.callbacks, None) - ansible.callbacks.set_task(self.runner_callbacks, None) - return True - - # template ignore_errors - # TODO: Is this needed here? cond is templated again in - # check_conditional after some more manipulations. - # TODO: we don't have enough information here to template cond either - # (see note on templating name above) - cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False) - task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) - - # load up an appropriate ansible runner to run the task in parallel - include_failed = is_handler and play.force_handlers - results = self._run_task_internal(task, include_failed=include_failed) - - # if no hosts are matched, carry on - hosts_remaining = True - if results is None: - hosts_remaining = False - results = {} - - contacted = results.get('contacted', {}) - self.stats.compute(results, ignore_errors=task.ignore_errors) - - def _register_play_vars(host, result): - # when 'register' is used, persist the result in the vars cache - # rather than the setup cache - vars should be transient between - # playbook executions - if 'stdout' in result and 'stdout_lines' not in result: - result['stdout_lines'] = result['stdout'].splitlines() - utils.update_hash(self.VARS_CACHE, host, {task.register: result}) - - def _save_play_facts(host, facts): - # saves play facts in SETUP_CACHE, unless the module executed was - # set_fact, in which case we add them to the VARS_CACHE - if task.module_name in ('set_fact', 'include_vars'): - utils.update_hash(self.VARS_CACHE, host, facts) - else: - utils.update_hash(self.SETUP_CACHE, host, facts) - - # add facts to the global setup cache - for host, result in contacted.iteritems(): - if 'results' in result: - # task ran with_ lookup plugin, so facts are encapsulated in - # multiple list items in the results key - for res in result['results']: - if type(res) == dict: - facts = res.get('ansible_facts', {}) - _save_play_facts(host, facts) - else: - # when facts are returned, persist them in the setup cache - facts = result.get('ansible_facts', {}) - _save_play_facts(host, facts) - - # if requested, save the result into the registered variable name - if task.register: - _register_play_vars(host, result) - - # also have to register some failed, but ignored, tasks - if task.ignore_errors and task.register: - failed = results.get('failed', {}) - for host, result in failed.iteritems(): - _register_play_vars(host, result) - - # flag which notify handlers need to be run - if len(task.notify) > 0: - for host, results in results.get('contacted',{}).iteritems(): - if results.get('changed', False): - for handler_name in task.notify: - self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host) - - ansible.callbacks.set_task(self.callbacks, None) - ansible.callbacks.set_task(self.runner_callbacks, None) - return hosts_remaining - - # ***************************************************** - - def _flag_handler(self, play, handler_name, host): - ''' - if a task has any notify elements, flag handlers for run - at end of execution cycle for hosts that have indicated - changes have been made - ''' - - found = False - for x in play.handlers(): - if handler_name == template(play.basedir, x.name, x.module_vars): - found = True - self.callbacks.on_notify(host, x.name) - x.notified_by.append(host) - if not found: - raise errors.AnsibleError("change handler (%s) is not defined" % handler_name) - - # ***************************************************** - - def _do_setup_step(self, play): - ''' get facts from the remote system ''' - - host_list = self._trim_unavailable_hosts(play._play_hosts) - - if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart': - host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]] - if len(host_list) == 0: - return {} - elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'): - return {} - - self.callbacks.on_setup() - self.inventory.restrict_to(host_list) - - ansible.callbacks.set_task(self.callbacks, None) - ansible.callbacks.set_task(self.runner_callbacks, None) - - # push any variables down to the system - setup_results = ansible.runner.Runner( - basedir=self.basedir, - pattern=play.hosts, - module_name='setup', - module_args={}, - inventory=self.inventory, - forks=self.forks, - module_path=self.module_path, - timeout=self.timeout, - remote_user=play.remote_user, - remote_pass=self.remote_pass, - remote_port=play.remote_port, - private_key_file=self.private_key_file, - setup_cache=self.SETUP_CACHE, - vars_cache=self.VARS_CACHE, - callbacks=self.runner_callbacks, - become=play.become, - become_method=play.become_method, - become_user=play.become_user, - become_pass=self.become_pass, - vault_pass=self.vault_password, - transport=play.transport, - is_playbook=True, - module_vars=play.vars, - play_vars=play.vars, - play_file_vars=play.vars_file_vars, - role_vars=play.role_vars, - default_vars=play.default_vars, - check=self.check, - diff=self.diff, - accelerate=play.accelerate, - accelerate_port=play.accelerate_port, - ).run() - self.stats.compute(setup_results, setup=True) - - self.inventory.lift_restriction() - - # now for each result, load into the setup cache so we can - # let runner template out future commands - setup_ok = setup_results.get('contacted', {}) - for (host, result) in setup_ok.iteritems(): - utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True}) - utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {})) - return setup_results - - # ***************************************************** - - - def generate_retry_inventory(self, replay_hosts): - ''' - called by /usr/bin/ansible when a playbook run fails. It generates an inventory - that allows re-running on ONLY the failed hosts. This may duplicate some - variable information in group_vars/host_vars but that is ok, and expected. - ''' - - buf = StringIO.StringIO() - for x in replay_hosts: - buf.write("%s\n" % x) - basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH) - filename = "%s.retry" % os.path.basename(self.filename) - filename = filename.replace(".yml","") - filename = os.path.join(basedir, filename) - - try: - if not os.path.exists(basedir): - os.makedirs(basedir) - - fd = open(filename, 'w') - fd.write(buf.getvalue()) - fd.close() - except: - ansible.callbacks.display( - "\nERROR: could not create retry file. Check the value of \n" - + "the configuration variable 'retry_files_save_path' or set \n" - + "'retry_files_enabled' to False to avoid this message.\n", - color='red' - ) - return None - - return filename - - # ***************************************************** - def tasks_to_run_in_play(self, play): - - tasks = [] - - for task in play.tasks(): - # only run the task if the requested tags match or has 'always' tag - u = set(['untagged']) - task_set = set(task.tags) - - if 'always' in task.tags: - should_run = True - else: - if 'all' in self.only_tags: - should_run = True - else: - should_run = False - if 'tagged' in self.only_tags: - if task_set != u: - should_run = True - elif 'untagged' in self.only_tags: - if task_set == u: - should_run = True - else: - if task_set.intersection(self.only_tags): - should_run = True - - # Check for tags that we need to skip - if 'all' in self.skip_tags: - should_run = False - else: - if 'tagged' in self.skip_tags: - if task_set != u: - should_run = False - elif 'untagged' in self.skip_tags: - if task_set == u: - should_run = False - else: - if should_run: - if task_set.intersection(self.skip_tags): - should_run = False - - if should_run: - tasks.append(task) +import os - return tasks +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.parsing import DataLoader +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.play import Play +from ansible.playbook.playbook_include import PlaybookInclude +from ansible.plugins import push_basedir - # ***************************************************** - def _run_play(self, play): - ''' run a list of tasks for a given pattern, in order ''' - self.callbacks.on_play_start(play.name) - # Get the hosts for this play - play._play_hosts = self.inventory.list_hosts(play.hosts) - # if no hosts matches this play, drop out - if not play._play_hosts: - self.callbacks.on_no_hosts_matched() - return True +__all__ = ['Playbook'] - # get facts from system - self._do_setup_step(play) - # now with that data, handle contentional variable file imports! - all_hosts = self._trim_unavailable_hosts(play._play_hosts) - play.update_vars_files(all_hosts, vault_password=self.vault_password) - hosts_count = len(all_hosts) +class Playbook: - if play.serial.endswith("%"): + def __init__(self, loader): + # Entries in the datastructure of a playbook may + # be either a play or an include statement + self._entries = [] + self._basedir = os.getcwd() + self._loader = loader - # This is a percentage, so calculate it based on the - # number of hosts - serial_pct = int(play.serial.replace("%","")) - serial = int((serial_pct/100.0) * len(all_hosts)) + @staticmethod + def load(file_name, variable_manager=None, loader=None): + pb = Playbook(loader=loader) + pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) + return pb - # Ensure that no matter how small the percentage, serial - # can never fall below 1, so that things actually happen - serial = max(serial, 1) - else: - serial = int(play.serial) + def _load_playbook_data(self, file_name, variable_manager): - serialized_batch = [] - if serial <= 0: - serialized_batch = [all_hosts] + if os.path.isabs(file_name): + self._basedir = os.path.dirname(file_name) else: - # do N forks all the way through before moving to next - while len(all_hosts) > 0: - play_hosts = [] - for x in range(serial): - if len(all_hosts) > 0: - play_hosts.append(all_hosts.pop(0)) - serialized_batch.append(play_hosts) - - task_errors = False - for on_hosts in serialized_batch: - - # restrict the play to just the hosts we have in our on_hosts block that are - # available. - play._play_hosts = self._trim_unavailable_hosts(on_hosts) - self.inventory.also_restrict_to(on_hosts) - - for task in self.tasks_to_run_in_play(play): - - if task.meta is not None: - # meta tasks can force handlers to run mid-play - if task.meta == 'flush_handlers': - self.run_handlers(play) - - # skip calling the handler till the play is finished - continue - - if not self._run_task(play, task, False): - # whether no hosts matched is fatal or not depends if it was on the initial step. - # if we got exactly no hosts on the first step (setup!) then the host group - # just didn't match anything and that's ok - return False + self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name))) - # Get a new list of what hosts are left as available, the ones that - # did not go fail/dark during the task - host_list = self._trim_unavailable_hosts(play._play_hosts) + # set the loaders basedir + self._loader.set_basedir(self._basedir) - # Set max_fail_pct to 0, So if any hosts fails, bail out - if task.any_errors_fatal and len(host_list) < hosts_count: - play.max_fail_pct = 0 + # also add the basedir to the list of module directories + push_basedir(self._basedir) - # If threshold for max nodes failed is exceeded, bail out. - if play.serial > 0: - # if serial is set, we need to shorten the size of host_count - play_count = len(play._play_hosts) - if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count): - host_list = None - else: - if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): - host_list = None + ds = self._loader.load_from_file(os.path.basename(file_name)) + if not isinstance(ds, list): + raise AnsibleParserError("playbooks must be a list of plays", obj=ds) - # if no hosts remain, drop out - if not host_list: - if play.force_handlers: - task_errors = True - break - else: - self.callbacks.on_no_hosts_remaining() - return False + # Parse the playbook entries. For plays, we simply parse them + # using the Play() object, and includes are parsed using the + # PlaybookInclude() object + for entry in ds: + if not isinstance(entry, dict): + raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry) - # lift restrictions after each play finishes - self.inventory.lift_also_restriction() - - if task_errors and not play.force_handlers: - # if there were failed tasks and handler execution - # is not forced, quit the play with an error - return False + if 'include' in entry: + pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader) + self._entries.extend(pb._entries) else: - # no errors, go ahead and execute all handlers - if not self.run_handlers(play): - return False - - return True - - - def run_handlers(self, play): - on_hosts = play._play_hosts - hosts_count = len(on_hosts) - for task in play.tasks(): - if task.meta is not None: - - fired_names = {} - for handler in play.handlers(): - if len(handler.notified_by) > 0: - self.inventory.restrict_to(handler.notified_by) - - # Resolve the variables first - handler_name = template(play.basedir, handler.name, handler.module_vars) - if handler_name not in fired_names: - self._run_task(play, handler, True) - # prevent duplicate handler includes from running more than once - fired_names[handler_name] = 1 - - host_list = self._trim_unavailable_hosts(play._play_hosts) - if handler.any_errors_fatal and len(host_list) < hosts_count: - play.max_fail_pct = 0 - if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): - host_list = None - if not host_list and not play.force_handlers: - self.callbacks.on_no_hosts_remaining() - return False - - self.inventory.lift_restriction() - new_list = handler.notified_by[:] - for host in handler.notified_by: - if host in on_hosts: - while host in new_list: - new_list.remove(host) - handler.notified_by = new_list + entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader) + self._entries.append(entry_obj) - continue + def get_loader(self): + return self._loader - return True + def get_plays(self): + return self._entries[:] diff --git a/v2/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py similarity index 100% rename from v2/ansible/playbook/attribute.py rename to lib/ansible/playbook/attribute.py diff --git a/v2/ansible/playbook/base.py b/lib/ansible/playbook/base.py similarity index 100% rename from v2/ansible/playbook/base.py rename to lib/ansible/playbook/base.py diff --git a/v2/ansible/playbook/become.py b/lib/ansible/playbook/become.py similarity index 100% rename from v2/ansible/playbook/become.py rename to lib/ansible/playbook/become.py diff --git a/v2/ansible/playbook/block.py b/lib/ansible/playbook/block.py similarity index 100% rename from v2/ansible/playbook/block.py rename to lib/ansible/playbook/block.py diff --git a/v2/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py similarity index 100% rename from v2/ansible/playbook/conditional.py rename to lib/ansible/playbook/conditional.py diff --git a/v2/ansible/playbook/handler.py b/lib/ansible/playbook/handler.py similarity index 100% rename from v2/ansible/playbook/handler.py rename to lib/ansible/playbook/handler.py diff --git a/v2/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py similarity index 100% rename from v2/ansible/playbook/helpers.py rename to lib/ansible/playbook/helpers.py diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 6ee85e0bf48939..b99c01fdf74e63 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -15,935 +15,249 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -############################################# - -from ansible.utils.template import template -from ansible import utils -from ansible import errors -from ansible.playbook.task import Task -from ansible.module_utils.splitter import split_args, unquote -import ansible.constants as C -import pipes -import shlex -import os -import sys -import uuid - - -class Play(object): - - _pb_common = [ - 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become', - 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts', - 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', - 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', - 'vault_password', - ] - - __slots__ = _pb_common + [ - '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir', - 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port', - 'role_vars', 'transport', 'vars_file_vars', - ] - - # to catch typos and so forth -- these are userland names - # and don't line up 1:1 with how they are stored - VALID_KEYS = frozenset(_pb_common + [ - 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks', - 'pre_tasks', 'role_names', 'tasks', 'user', - ]) - - # ************************************************* - - def __init__(self, playbook, ds, basedir, vault_password=None): - ''' constructor loads from a play datastructure ''' - - for x in ds.keys(): - if not x in Play.VALID_KEYS: - raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x) - - # allow all playbook keys to be set by --extra-vars - self.vars = ds.get('vars', {}) - self.vars_prompt = ds.get('vars_prompt', {}) - self.playbook = playbook - self.vars = self._get_vars() - self.vars_file_vars = dict() # these are vars read in from vars_files: - self.role_vars = dict() # these are vars read in from vars/main.yml files in roles - self.basedir = basedir - self.roles = ds.get('roles', None) - self.tags = ds.get('tags', None) - self.vault_password = vault_password - self.environment = ds.get('environment', {}) - - if self.tags is None: - self.tags = [] - elif type(self.tags) in [ str, unicode ]: - self.tags = self.tags.split(",") - elif type(self.tags) != list: - self.tags = [] - - # make sure we have some special internal variables set, which - # we use later when loading tasks and handlers - load_vars = dict() - load_vars['playbook_dir'] = os.path.abspath(self.basedir) - if self.playbook.inventory.basedir() is not None: - load_vars['inventory_dir'] = self.playbook.inventory.basedir() - if self.playbook.inventory.src() is not None: - load_vars['inventory_file'] = self.playbook.inventory.src() - - # We first load the vars files from the datastructure - # so we have the default variables to pass into the roles - self.vars_files = ds.get('vars_files', []) - if not isinstance(self.vars_files, list): - raise errors.AnsibleError('vars_files must be a list') - processed_vars_files = self._update_vars_files_for_host(None) - - # now we load the roles into the datastructure - self.included_roles = [] - ds = self._load_roles(self.roles, ds) - - # and finally re-process the vars files as they may have been updated - # by the included roles, but exclude any which have been processed - self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files) - if not isinstance(self.vars_files, list): - raise errors.AnsibleError('vars_files must be a list') - - self._update_vars_files_for_host(None) - - # template everything to be efficient, but do not pre-mature template - # tasks/handlers as they may have inventory scope overrides. We also - # create a set of temporary variables for templating, so we don't - # trample on the existing vars structures - _tasks = ds.pop('tasks', []) - _handlers = ds.pop('handlers', []) - - temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) - temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars) +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type - try: - ds = template(basedir, ds, temp_vars) - except errors.AnsibleError, e: - utils.warning("non fatal error while trying to template play variables: %s" % (str(e))) - - ds['tasks'] = _tasks - ds['handlers'] = _handlers - - self._ds = ds - - hosts = ds.get('hosts') - if hosts is None: - raise errors.AnsibleError('hosts declaration is required') - elif isinstance(hosts, list): - try: - hosts = ';'.join(hosts) - except TypeError,e: - raise errors.AnsibleError('improper host declaration: %s' % str(e)) - - self.serial = str(ds.get('serial', 0)) - self.hosts = hosts - self.name = ds.get('name', self.hosts) - self._tasks = ds.get('tasks', []) - self._handlers = ds.get('handlers', []) - self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user)) - self.remote_port = ds.get('port', self.playbook.remote_port) - self.transport = ds.get('connection', self.playbook.transport) - self.remote_port = self.remote_port - self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false')) - self.accelerate = utils.boolean(ds.get('accelerate', 'false')) - self.accelerate_port = ds.get('accelerate_port', None) - self.accelerate_ipv6 = ds.get('accelerate_ipv6', False) - self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) - self.no_log = utils.boolean(ds.get('no_log', 'false')) - self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers)) - - # Fail out if user specifies conflicting privilege escalations - if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')): - raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together') - if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')): - raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together') - if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')): - raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together') - - # become settings are inherited and updated normally - self.become = ds.get('become', self.playbook.become) - self.become_method = ds.get('become_method', self.playbook.become_method) - self.become_user = ds.get('become_user', self.playbook.become_user) - - # Make sure current play settings are reflected in become fields - if 'sudo' in ds: - self.become=ds['sudo'] - self.become_method='sudo' - if 'sudo_user' in ds: - self.become_user=ds['sudo_user'] - elif 'su' in ds: - self.become=True - self.become=ds['su'] - self.become_method='su' - if 'su_user' in ds: - self.become_user=ds['su_user'] - - # gather_facts is not a simple boolean, as None means that a 'smart' - # fact gathering mode will be used, so we need to be careful here as - # calling utils.boolean(None) returns False - self.gather_facts = ds.get('gather_facts', None) - if self.gather_facts is not None: - self.gather_facts = utils.boolean(self.gather_facts) - - load_vars['role_names'] = ds.get('role_names', []) - - self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) - self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars) - - # apply any missing tags to role tasks - self._late_merge_role_tags() - - # place holder for the discovered hosts to be used in this play - self._play_hosts = None - - # ************************************************* - - def _get_role_path(self, role): - """ - Returns the path on disk to the directory containing - the role directories like tasks, templates, etc. Also - returns any variables that were included with the role - """ - orig_path = template(self.basedir,role,self.vars) - - role_vars = {} - if type(orig_path) == dict: - # what, not a path? - role_name = orig_path.get('role', None) - if role_name is None: - raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path) - role_vars = orig_path - else: - role_name = utils.role_spec_parse(orig_path)["name"] - - role_path = None - - possible_paths = [ - utils.path_dwim(self.basedir, os.path.join('roles', role_name)), - utils.path_dwim(self.basedir, role_name) - ] - - if C.DEFAULT_ROLES_PATH: - search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep) - for loc in search_locations: - loc = os.path.expanduser(loc) - possible_paths.append(utils.path_dwim(loc, role_name)) - - for path_option in possible_paths: - if os.path.isdir(path_option): - role_path = path_option - break - - if role_path is None: - raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths)) - - return (role_path, role_vars) - - def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0): - # this number is arbitrary, but it seems sane - if level > 20: - raise errors.AnsibleError("too many levels of recursion while resolving role dependencies") - for role in roles: - role_path,role_vars = self._get_role_path(role) - - # save just the role params for this role, which exclude the special - # keywords 'role', 'tags', and 'when'. - role_params = role_vars.copy() - for item in ('role', 'tags', 'when'): - if item in role_params: - del role_params[item] - - role_vars = utils.combine_vars(passed_vars, role_vars) - - vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))) - vars_data = {} - if os.path.isfile(vars): - vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password) - if vars_data: - if not isinstance(vars_data, dict): - raise errors.AnsibleError("vars from '%s' are not a dict" % vars) - role_vars = utils.combine_vars(vars_data, role_vars) - - defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))) - defaults_data = {} - if os.path.isfile(defaults): - defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password) - - # the meta directory contains the yaml that should - # hold the list of dependencies (if any) - meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))) - if os.path.isfile(meta): - data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password) - if data: - dependencies = data.get('dependencies',[]) - if dependencies is None: - dependencies = [] - for dep in dependencies: - allow_dupes = False - (dep_path,dep_vars) = self._get_role_path(dep) - - # save the dep params, just as we did above - dep_params = dep_vars.copy() - for item in ('role', 'tags', 'when'): - if item in dep_params: - del dep_params[item] - - meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta'))) - if os.path.isfile(meta): - meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password) - if meta_data: - allow_dupes = utils.boolean(meta_data.get('allow_duplicates','')) - - # if any tags were specified as role/dep variables, merge - # them into the current dep_vars so they're passed on to any - # further dependencies too, and so we only have one place - # (dep_vars) to look for tags going forward - def __merge_tags(var_obj): - old_tags = dep_vars.get('tags', []) - if isinstance(old_tags, basestring): - old_tags = [old_tags, ] - if isinstance(var_obj, dict): - new_tags = var_obj.get('tags', []) - if isinstance(new_tags, basestring): - new_tags = [new_tags, ] - else: - new_tags = [] - return list(set(old_tags).union(set(new_tags))) - - dep_vars['tags'] = __merge_tags(role_vars) - dep_vars['tags'] = __merge_tags(passed_vars) - - # if tags are set from this role, merge them - # into the tags list for the dependent role - if "tags" in passed_vars: - for included_role_dep in dep_stack: - included_dep_name = included_role_dep[0] - included_dep_vars = included_role_dep[2] - if included_dep_name == dep: - if "tags" in included_dep_vars: - included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"]))) - else: - included_dep_vars["tags"] = passed_vars["tags"][:] - - dep_vars = utils.combine_vars(passed_vars, dep_vars) - dep_vars = utils.combine_vars(role_vars, dep_vars) - - vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars'))) - vars_data = {} - if os.path.isfile(vars): - vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password) - if vars_data: - dep_vars = utils.combine_vars(dep_vars, vars_data) - pass - - defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults'))) - dep_defaults_data = {} - if os.path.isfile(defaults): - dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password) - if 'role' in dep_vars: - del dep_vars['role'] - - if not allow_dupes: - if dep in self.included_roles: - # skip back to the top, since we don't want to - # do anything else with this role - continue - else: - self.included_roles.append(dep) - - def _merge_conditional(cur_conditionals, new_conditionals): - if isinstance(new_conditionals, (basestring, bool)): - cur_conditionals.append(new_conditionals) - elif isinstance(new_conditionals, list): - cur_conditionals.extend(new_conditionals) - - # pass along conditionals from roles to dep roles - passed_when = passed_vars.get('when') - role_when = role_vars.get('when') - dep_when = dep_vars.get('when') - - tmpcond = [] - _merge_conditional(tmpcond, passed_when) - _merge_conditional(tmpcond, role_when) - _merge_conditional(tmpcond, dep_when) - - if len(tmpcond) > 0: - dep_vars['when'] = tmpcond - - self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1) - dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data]) - - # only add the current role when we're at the top level, - # otherwise we'll end up in a recursive loop - if level == 0: - self.included_roles.append(role) - dep_stack.append([role, role_path, role_vars, role_params, defaults_data]) - return dep_stack - - def _load_role_vars_files(self, vars_files): - # process variables stored in vars/main.yml files - role_vars = {} - for filename in vars_files: - if os.path.exists(filename): - new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password) - if new_vars: - if type(new_vars) != dict: - raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars))) - role_vars = utils.combine_vars(role_vars, new_vars) - - return role_vars - - def _load_role_defaults(self, defaults_files): - # process default variables - default_vars = {} - for filename in defaults_files: - if os.path.exists(filename): - new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password) - if new_default_vars: - if type(new_default_vars) != dict: - raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars))) - default_vars = utils.combine_vars(default_vars, new_default_vars) - - return default_vars - - def _load_roles(self, roles, ds): - # a role is a name that auto-includes the following if they exist - # /tasks/main.yml - # /handlers/main.yml - # /vars/main.yml - # /library - # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found - - if roles is None: - roles = [] - if type(roles) != list: - raise errors.AnsibleError("value of 'roles:' must be a list") - - new_tasks = [] - new_handlers = [] - role_vars_files = [] - defaults_files = [] - - pre_tasks = ds.get('pre_tasks', None) - if type(pre_tasks) != list: - pre_tasks = [] - for x in pre_tasks: - new_tasks.append(x) - - # flush handlers after pre_tasks - new_tasks.append(dict(meta='flush_handlers')) - - roles = self._build_role_dependencies(roles, [], {}) - - # give each role an uuid and - # make role_path available as variable to the task - for idx, val in enumerate(roles): - this_uuid = str(uuid.uuid4()) - roles[idx][-3]['role_uuid'] = this_uuid - roles[idx][-3]['role_path'] = roles[idx][1] - - role_names = [] - - for (role, role_path, role_vars, role_params, default_vars) in roles: - # special vars must be extracted from the dict to the included tasks - special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ] - special_vars = {} - for k in special_keys: - if k in role_vars: - special_vars[k] = role_vars[k] - - task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks')) - handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers')) - vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')) - meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')) - defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')) - - task = self._resolve_main(task_basepath) - handler = self._resolve_main(handler_basepath) - vars_file = self._resolve_main(vars_basepath) - meta_file = self._resolve_main(meta_basepath) - defaults_file = self._resolve_main(defaults_basepath) - - library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library')) - - missing = lambda f: not os.path.isfile(f) - if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library): - raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library)) - - if isinstance(role, dict): - role_name = role['role'] - else: - role_name = utils.role_spec_parse(role)["name"] - - role_names.append(role_name) - if os.path.isfile(task): - nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name) - for k in special_keys: - if k in special_vars: - nt[k] = special_vars[k] - new_tasks.append(nt) - if os.path.isfile(handler): - nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name) - for k in special_keys: - if k in special_vars: - nt[k] = special_vars[k] - new_handlers.append(nt) - if os.path.isfile(vars_file): - role_vars_files.append(vars_file) - if os.path.isfile(defaults_file): - defaults_files.append(defaults_file) - if os.path.isdir(library): - utils.plugins.module_finder.add_directory(library) - - tasks = ds.get('tasks', None) - post_tasks = ds.get('post_tasks', None) - handlers = ds.get('handlers', None) - vars_files = ds.get('vars_files', None) - - if type(tasks) != list: - tasks = [] - if type(handlers) != list: - handlers = [] - if type(vars_files) != list: - vars_files = [] - if type(post_tasks) != list: - post_tasks = [] - - new_tasks.extend(tasks) - # flush handlers after tasks + role tasks - new_tasks.append(dict(meta='flush_handlers')) - new_tasks.extend(post_tasks) - # flush handlers after post tasks - new_tasks.append(dict(meta='flush_handlers')) - - new_handlers.extend(handlers) - - ds['tasks'] = new_tasks - ds['handlers'] = new_handlers - ds['role_names'] = role_names - - self.role_vars = self._load_role_vars_files(role_vars_files) - self.default_vars = self._load_role_defaults(defaults_files) - - return ds - - # ************************************************* - - def _resolve_main(self, basepath): - ''' flexibly handle variations in main filenames ''' - # these filenames are acceptable: - mains = ( - os.path.join(basepath, 'main'), - os.path.join(basepath, 'main.yml'), - os.path.join(basepath, 'main.yaml'), - os.path.join(basepath, 'main.json'), - ) - if sum([os.path.isfile(x) for x in mains]) > 1: - raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) - else: - for m in mains: - if os.path.isfile(m): - return m # exactly one main file - return mains[0] # zero mains (we still need to return something) - - # ************************************************* - - def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None, - additional_conditions=None, original_file=None, role_name=None): - ''' handle task and handler include statements ''' - - results = [] - if tasks is None: - # support empty handler files, and the like. - tasks = [] - if additional_conditions is None: - additional_conditions = [] - if vars is None: - vars = {} - if role_params is None: - role_params = {} - if default_vars is None: - default_vars = {} - if become_vars is None: - become_vars = {} - - old_conditions = list(additional_conditions) - - for x in tasks: - - # prevent assigning the same conditions to each task on an include - included_additional_conditions = list(old_conditions) - - if not isinstance(x, dict): - raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file)) - - # evaluate privilege escalation vars for current and child tasks - included_become_vars = {} - for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]: - if k in x: - included_become_vars[k] = x[k] - elif k in become_vars: - included_become_vars[k] = become_vars[k] - x[k] = become_vars[k] - - task_vars = vars.copy() - if original_file: - task_vars['_original_file'] = original_file - - if 'meta' in x: - if x['meta'] == 'flush_handlers': - if role_name and 'role_name' not in x: - x['role_name'] = role_name - results.append(Task(self, x, module_vars=task_vars, role_name=role_name)) - continue - - if 'include' in x: - tokens = split_args(str(x['include'])) - included_additional_conditions = list(additional_conditions) - include_vars = {} - for k in x: - if k.startswith("with_"): - if original_file: - offender = " (in %s)" % original_file - else: - offender = "" - utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True) - elif k.startswith("when_"): - utils.deprecated("\"when_:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True) - elif k == 'when': - if isinstance(x[k], (basestring, bool)): - included_additional_conditions.append(x[k]) - elif type(x[k]) is list: - included_additional_conditions.extend(x[k]) - elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"): - continue - else: - include_vars[k] = x[k] - - # get any role parameters specified - role_params = x.get('role_params', {}) - - # get any role default variables specified - default_vars = x.get('default_vars', {}) - if not default_vars: - default_vars = self.default_vars - else: - default_vars = utils.combine_vars(self.default_vars, default_vars) - - # append the vars defined with the include (from above) - # as well as the old-style 'vars' element. The old-style - # vars are given higher precedence here (just in case) - task_vars = utils.combine_vars(task_vars, include_vars) - if 'vars' in x: - task_vars = utils.combine_vars(task_vars, x['vars']) - - new_role = None - if 'role_name' in x: - new_role = x['role_name'] - - mv = task_vars.copy() - for t in tokens[1:]: - (k,v) = t.split("=", 1) - v = unquote(v) - mv[k] = template(self.basedir, v, mv) - dirname = self.basedir - if original_file: - dirname = os.path.dirname(original_file) - - # temp vars are used here to avoid trampling on the existing vars structures - temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) - temp_vars = utils.combine_vars(temp_vars, mv) - temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars) - include_file = template(dirname, tokens[0], temp_vars) - include_filename = utils.path_dwim(dirname, include_file) - - data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password) - if 'role_name' in x and data is not None: - for y in data: - if isinstance(y, dict) and 'include' in y: - y['role_name'] = new_role - loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) - results += loaded - elif type(x) == dict: - task = Task( - self, x, - module_vars=task_vars, - play_vars=self.vars, - play_file_vars=self.vars_file_vars, - role_vars=self.role_vars, - role_params=role_params, - default_vars=default_vars, - additional_conditions=list(additional_conditions), - role_name=role_name - ) - results.append(task) - else: - raise Exception("unexpected task type") +from ansible.errors import AnsibleError, AnsibleParserError + +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.become import Become +from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles +from ansible.playbook.role import Role +from ansible.playbook.taggable import Taggable +from ansible.playbook.block import Block - for x in results: - if self.tags is not None: - x.tags.extend(self.tags) +from ansible.utils.vars import combine_vars - return results - # ************************************************* +__all__ = ['Play'] - def tasks(self): - ''' return task objects for this play ''' - return self._tasks - def handlers(self): - ''' return handler objects for this play ''' - return self._handlers +class Play(Base, Taggable, Become): - # ************************************************* + """ + A play is a language feature that represents a list of roles and/or + task/handler blocks to execute on a given set of hosts. - def _get_vars(self): - ''' load the vars section from a play, accounting for all sorts of variable features - including loading from yaml files, prompting, and conditional includes of the first - file found in a list. ''' + Usage: - if self.vars is None: - self.vars = {} + Play.load(datastructure) -> Play + Play.something(...) + """ - if type(self.vars) not in [dict, list]: - raise errors.AnsibleError("'vars' section must contain only key/value pairs") + # ================================================================================= + # Connection-Related Attributes - vars = {} + # TODO: generalize connection + _accelerate = FieldAttribute(isa='bool', default=False) + _accelerate_ipv6 = FieldAttribute(isa='bool', default=False) + _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port - # translate a list of vars into a dict - if type(self.vars) == list: - for item in self.vars: - if getattr(item, 'items', None) is None: - raise errors.AnsibleError("expecting a key-value pair in 'vars' section") - k, v = item.items()[0] - vars[k] = v - else: - vars.update(self.vars) + # Connection + _gather_facts = FieldAttribute(isa='string', default='smart') + _hosts = FieldAttribute(isa='list', default=[], required=True) + _name = FieldAttribute(isa='string', default='') - if type(self.vars_prompt) == list: - for var in self.vars_prompt: - if not 'name' in var: - raise errors.AnsibleError("'vars_prompt' item is missing 'name:'") + # Variable Attributes + _vars_files = FieldAttribute(isa='list', default=[]) + _vars_prompt = FieldAttribute(isa='dict', default=dict()) + _vault_password = FieldAttribute(isa='string') - vname = var['name'] - prompt = var.get("prompt", vname) - default = var.get("default", None) - private = var.get("private", True) + # Block (Task) Lists Attributes + _handlers = FieldAttribute(isa='list', default=[]) + _pre_tasks = FieldAttribute(isa='list', default=[]) + _post_tasks = FieldAttribute(isa='list', default=[]) + _tasks = FieldAttribute(isa='list', default=[]) - confirm = var.get("confirm", False) - encrypt = var.get("encrypt", None) - salt_size = var.get("salt_size", None) - salt = var.get("salt", None) - - if vname not in self.playbook.extra_vars: - vars[vname] = self.playbook.callbacks.on_vars_prompt( - vname, private, prompt, encrypt, confirm, salt_size, salt, default - ) + # Role Attributes + _roles = FieldAttribute(isa='list', default=[]) - elif type(self.vars_prompt) == dict: - for (vname, prompt) in self.vars_prompt.iteritems(): - prompt_msg = "%s: " % prompt - if vname not in self.playbook.extra_vars: - vars[vname] = self.playbook.callbacks.on_vars_prompt( - varname=vname, private=False, prompt=prompt_msg, default=None - ) + # Flag/Setting Attributes + _any_errors_fatal = FieldAttribute(isa='bool', default=False) + _max_fail_percentage = FieldAttribute(isa='string', default='0') + _serial = FieldAttribute(isa='int', default=0) + _strategy = FieldAttribute(isa='string', default='linear') - else: - raise errors.AnsibleError("'vars_prompt' section is malformed, see docs") + # ================================================================================= - if type(self.playbook.extra_vars) == dict: - vars = utils.combine_vars(vars, self.playbook.extra_vars) + def __init__(self): + super(Play, self).__init__() - return vars + def __repr__(self): + return self.get_name() - # ************************************************* + def get_name(self): + ''' return the name of the Play ''' + return "PLAY: %s" % self._attributes.get('name') - def update_vars_files(self, hosts, vault_password=None): - ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in ''' - - # now loop through all the hosts... - for h in hosts: - self._update_vars_files_for_host(h, vault_password=vault_password) - - # ************************************************* - - def compare_tags(self, tags): - ''' given a list of tags that the user has specified, return two lists: - matched_tags: tags were found within the current play and match those given - by the user - unmatched_tags: tags that were found within the current play but do not match - any provided by the user ''' - - # gather all the tags in all the tasks and handlers into one list - # FIXME: isn't this in self.tags already? - - all_tags = [] - for task in self._tasks: - if not task.meta: - all_tags.extend(task.tags) - for handler in self._handlers: - all_tags.extend(handler.tags) - - # compare the lists of tags using sets and return the matched and unmatched - all_tags_set = set(all_tags) - tags_set = set(tags) - - matched_tags = all_tags_set.intersection(tags_set) - unmatched_tags = all_tags_set.difference(tags_set) - - a = set(['always']) - u = set(['untagged']) - if 'always' in all_tags_set: - matched_tags = matched_tags.union(a) - unmatched_tags = all_tags_set.difference(a) - - if 'all' in tags_set: - matched_tags = matched_tags.union(all_tags_set) - unmatched_tags = set() - - if 'tagged' in tags_set: - matched_tags = all_tags_set.difference(u) - unmatched_tags = u - - if 'untagged' in tags_set and 'untagged' in all_tags_set: - matched_tags = matched_tags.union(u) - unmatched_tags = unmatched_tags.difference(u) - - return matched_tags, unmatched_tags - - # ************************************************* - - def _late_merge_role_tags(self): - # build a local dict of tags for roles - role_tags = {} - for task in self._ds['tasks']: - if 'role_name' in task: - this_role = task['role_name'] + "-" + task['vars']['role_uuid'] - - if this_role not in role_tags: - role_tags[this_role] = [] - - if 'tags' in task['vars']: - if isinstance(task['vars']['tags'], basestring): - role_tags[this_role] += shlex.split(task['vars']['tags']) - else: - role_tags[this_role] += task['vars']['tags'] - - # apply each role's tags to its tasks - for idx, val in enumerate(self._tasks): - if getattr(val, 'role_name', None) is not None: - this_role = val.role_name + "-" + val.module_vars['role_uuid'] - if this_role in role_tags: - self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role])) - - # ************************************************* - - def _update_vars_files_for_host(self, host, vault_password=None): + @staticmethod + def load(data, variable_manager=None, loader=None): + p = Play() + return p.load_data(data, variable_manager=variable_manager, loader=loader) - def generate_filenames(host, inject, filename): - - """ Render the raw filename into 3 forms """ + def preprocess_data(self, ds): + ''' + Adjusts play datastructure to cleanup old/legacy items + ''' - # filename2 is the templated version of the filename, which will - # be fully rendered if any variables contained within it are - # non-inventory related - filename2 = template(self.basedir, filename, self.vars) + assert isinstance(ds, dict) - # filename3 is the same as filename2, but when the host object is - # available, inventory variables will be expanded as well since the - # name is templated with the injected variables - filename3 = filename2 - if host is not None: - filename3 = template(self.basedir, filename2, inject) + # The use of 'user' in the Play datastructure was deprecated to + # line up with the same change for Tasks, due to the fact that + # 'user' conflicted with the user module. + if 'user' in ds: + # this should never happen, but error out with a helpful message + # to the user if it does... + if 'remote_user' in ds: + raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds) - # filename4 is the dwim'd path, but may also be mixed-scope, so we use - # both play scoped vars and host scoped vars to template the filepath - if utils.contains_vars(filename3) and host is not None: - inject.update(self.vars) - filename4 = template(self.basedir, filename3, inject) - filename4 = utils.path_dwim(self.basedir, filename4) + ds['remote_user'] = ds['user'] + del ds['user'] + + return super(Play, self).preprocess_data(ds) + + def _load_vars(self, attr, ds): + ''' + Vars in a play can be specified either as a dictionary directly, or + as a list of dictionaries. If the later, this method will turn the + list into a single dictionary. + ''' + + try: + if isinstance(ds, dict): + return ds + elif isinstance(ds, list): + all_vars = dict() + for item in ds: + if not isinstance(item, dict): + raise ValueError + all_vars = combine_vars(all_vars, item) + return all_vars else: - filename4 = utils.path_dwim(self.basedir, filename3) - - return filename2, filename3, filename4 - - - def update_vars_cache(host, data, target_filename=None): - - """ update a host's varscache with new var data """ - - self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data) - if target_filename: - self.playbook.callbacks.on_import_for_host(host, target_filename) - - def process_files(filename, filename2, filename3, filename4, host=None): - - """ pseudo-algorithm for deciding where new vars should go """ - - data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password) - if data: - if type(data) != dict: - raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4) - if host is not None: - target_filename = None - if utils.contains_vars(filename2): - if not utils.contains_vars(filename3): - target_filename = filename3 - else: - target_filename = filename4 - update_vars_cache(host, data, target_filename=target_filename) - else: - self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data) - # we did process this file - return True - # we did not process this file - return False - - # Enforce that vars_files is always a list - if type(self.vars_files) != list: - self.vars_files = [ self.vars_files ] - - # Build an inject if this is a host run started by self.update_vars_files - if host is not None: - inject = {} - inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password)) - inject.update(self.playbook.SETUP_CACHE.get(host, {})) - inject.update(self.playbook.VARS_CACHE.get(host, {})) - else: - inject = None - - processed = [] - for filename in self.vars_files: - if type(filename) == list: - # loop over all filenames, loading the first one, and failing if none found - found = False - sequence = [] - for real_filename in filename: - filename2, filename3, filename4 = generate_filenames(host, inject, real_filename) - sequence.append(filename4) - if os.path.exists(filename4): - found = True - if process_files(filename, filename2, filename3, filename4, host=host): - processed.append(filename) - elif host is not None: - self.playbook.callbacks.on_not_import_for_host(host, filename4) - if found: - break - if not found and host is not None: - raise errors.AnsibleError( - "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence) - ) + raise ValueError + except ValueError: + raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds) + + def _load_tasks(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed tasks/blocks. + Bare tasks outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) + + def _load_pre_tasks(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed tasks/blocks. + Bare tasks outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) + + def _load_post_tasks(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed tasks/blocks. + Bare tasks outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) + + def _load_handlers(self, attr, ds): + ''' + Loads a list of blocks from a list which may be mixed handlers/blocks. + Bare handlers outside of a block are given an implicit block. + ''' + return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader) + + def _load_roles(self, attr, ds): + ''' + Loads and returns a list of RoleInclude objects from the datastructure + list of role definitions and creates the Role from those objects + ''' + + role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader) + + roles = [] + for ri in role_includes: + roles.append(Role.load(ri)) + return roles + + # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set + + def _compile_roles(self): + ''' + Handles the role compilation step, returning a flat list of tasks + with the lowest level dependencies first. For example, if a role R + has a dependency D1, which also has a dependency D2, the tasks from + D2 are merged first, followed by D1, and lastly by the tasks from + the parent role R last. This is done for all roles in the Play. + ''' + + block_list = [] + + if len(self.roles) > 0: + for r in self.roles: + block_list.extend(r.compile(play=self)) + + return block_list + + def compile(self): + ''' + Compiles and returns the task list for this play, compiled from the + roles (which are themselves compiled recursively) and/or the list of + tasks specified in the play. + ''' + + block_list = [] + + block_list.extend(self.pre_tasks) + block_list.extend(self._compile_roles()) + block_list.extend(self.tasks) + block_list.extend(self.post_tasks) + + return block_list + + def get_vars(self): + return self.vars.copy() + + def get_vars_files(self): + return self.vars_files + + def get_handlers(self): + return self.handlers[:] + + def get_roles(self): + return self.roles[:] + + def get_tasks(self): + tasklist = [] + for task in self.pre_tasks + self.tasks + self.post_tasks: + if isinstance(task, Block): + tasklist.append(task.block + task.rescue + task.always) else: - # just one filename supplied, load it! - filename2, filename3, filename4 = generate_filenames(host, inject, filename) - if utils.contains_vars(filename4): - continue - if process_files(filename, filename2, filename3, filename4, host=host): - processed.append(filename) - - return processed + tasklist.append(task) + return tasklist + + def serialize(self): + data = super(Play, self).serialize() + + roles = [] + for role in self.get_roles(): + roles.append(role.serialize()) + data['roles'] = roles + + return data + + def deserialize(self, data): + super(Play, self).deserialize(data) + + if 'roles' in data: + role_data = data.get('roles', []) + roles = [] + for role in role_data: + r = Role() + r.deserialize(role) + roles.append(r) + + setattr(self, 'roles', roles) + del data['roles'] + diff --git a/v2/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py similarity index 100% rename from v2/ansible/playbook/playbook_include.py rename to lib/ansible/playbook/playbook_include.py diff --git a/v2/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py similarity index 100% rename from v2/ansible/playbook/role/__init__.py rename to lib/ansible/playbook/role/__init__.py diff --git a/v2/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py similarity index 100% rename from v2/ansible/playbook/role/definition.py rename to lib/ansible/playbook/role/definition.py diff --git a/v2/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py similarity index 100% rename from v2/ansible/playbook/role/include.py rename to lib/ansible/playbook/role/include.py diff --git a/v2/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py similarity index 100% rename from v2/ansible/playbook/role/metadata.py rename to lib/ansible/playbook/role/metadata.py diff --git a/v2/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py similarity index 100% rename from v2/ansible/playbook/role/requirement.py rename to lib/ansible/playbook/role/requirement.py diff --git a/v2/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py similarity index 100% rename from v2/ansible/playbook/taggable.py rename to lib/ansible/playbook/taggable.py diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 70c1bc8df6bb00..060602579851d3 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -15,332 +15,296 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from ansible import errors -from ansible import utils -from ansible.module_utils.splitter import split_args -import os -import ansible.utils.template as template -import sys - -class Task(object): - - _t_common = [ - 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass', - 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when', - 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log', - 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user', - 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when', - ] - - __slots__ = [ - 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file', - 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars', - 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars', - ] + _t_common - - # to prevent typos and such - VALID_KEYS = frozenset([ - 'async', 'connection', 'include', 'poll', - ] + _t_common) - - def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None): - ''' constructor loads from a task or handler datastructure ''' - - # meta directives are used to tell things like ansible/playbook to run - # operations like handler execution. Meta tasks are not executed - # normally. - if 'meta' in ds: - self.meta = ds['meta'] - self.tags = [] - self.module_vars = module_vars - self.role_name = role_name - return - else: - self.meta = None - - - library = os.path.join(play.basedir, 'library') - if os.path.exists(library): - utils.plugins.module_finder.add_directory(library) - - for x in ds.keys(): - - # code to allow for saying "modulename: args" versus "action: modulename args" - if x in utils.plugins.module_finder: - - if 'action' in ds: - raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action']))) - if isinstance(ds[x], dict): - if 'args' in ds: - raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x])))) - ds['args'] = ds[x] - ds[x] = '' - elif ds[x] is None: - ds[x] = '' - if not isinstance(ds[x], basestring): - raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x]))) - ds['action'] = x + " " + ds[x] - ds.pop(x) - - # code to allow "with_glob" and to reference a lookup plugin named glob - elif x.startswith("with_"): - if isinstance(ds[x], basestring): - param = ds[x].strip() - - plugin_name = x.replace("with_","") - if plugin_name in utils.plugins.lookup_loader: - ds['items_lookup_plugin'] = plugin_name - ds['items_lookup_terms'] = ds[x] - ds.pop(x) - else: - raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) - - elif x in [ 'changed_when', 'failed_when', 'when']: - if isinstance(ds[x], basestring): - param = ds[x].strip() - # Only a variable, no logic - if (param.startswith('{{') and - param.find('}}') == len(ds[x]) - 2 and - param.find('|') == -1): - utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") - elif x.startswith("when_"): - utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) - - if 'when' in ds: - raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action']))) - when_name = x.replace("when_","") - ds['when'] = "%s %s" % (when_name, ds[x]) - ds.pop(x) - elif not x in Task.VALID_KEYS: - raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x) - - self.module_vars = module_vars - self.play_vars = play_vars - self.play_file_vars = play_file_vars - self.role_vars = role_vars - self.role_params = role_params - self.default_vars = default_vars - self.play = play - - # load various attributes - self.name = ds.get('name', None) - self.tags = [ 'untagged' ] - self.register = ds.get('register', None) - self.environment = ds.get('environment', play.environment) - self.role_name = role_name - self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log - self.run_once = utils.boolean(ds.get('run_once', 'false')) - - #Code to allow do until feature in a Task - if 'until' in ds: - if not ds.get('register'): - raise errors.AnsibleError("register keyword is mandatory when using do until feature") - self.module_vars['delay'] = ds.get('delay', 5) - self.module_vars['retries'] = ds.get('retries', 3) - self.module_vars['register'] = ds.get('register', None) - self.until = ds.get('until') - self.module_vars['until'] = self.until - - # rather than simple key=value args on the options line, these represent structured data and the values - # can be hashes and lists, not just scalars - self.args = ds.get('args', {}) - - # get remote_user for task, then play, then playbook - if ds.get('remote_user') is not None: - self.remote_user = ds.get('remote_user') - elif ds.get('remote_user', play.remote_user) is not None: - self.remote_user = ds.get('remote_user', play.remote_user) - else: - self.remote_user = ds.get('remote_user', play.playbook.remote_user) - - # Fail out if user specifies privilege escalation params in conflict - if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')): - raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) - - if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): - raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name) - - if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): - raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) - - self.become = utils.boolean(ds.get('become', play.become)) - self.become_method = ds.get('become_method', play.become_method) - self.become_user = ds.get('become_user', play.become_user) - self.become_pass = ds.get('become_pass', play.playbook.become_pass) - - # set only if passed in current task data - if 'sudo' in ds or 'sudo_user' in ds: - self.become_method='sudo' - - if 'sudo' in ds: - self.become=ds['sudo'] - del ds['sudo'] +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.errors import AnsibleError + +from ansible.parsing.mod_args import ModuleArgsParser +from ansible.parsing.splitter import parse_kv +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping + +from ansible.plugins import module_loader, lookup_loader + +from ansible.playbook.attribute import Attribute, FieldAttribute +from ansible.playbook.base import Base +from ansible.playbook.become import Become +from ansible.playbook.block import Block +from ansible.playbook.conditional import Conditional +from ansible.playbook.role import Role +from ansible.playbook.taggable import Taggable + +__all__ = ['Task'] + +class Task(Base, Conditional, Taggable, Become): + + """ + A task is a language feature that represents a call to a module, with given arguments and other parameters. + A handler is a subclass of a task. + + Usage: + + Task.load(datastructure) -> Task + Task.something(...) + """ + + # ================================================================================= + # ATTRIBUTES + # load_ and + # validate_ + # will be used if defined + # might be possible to define others + + _args = FieldAttribute(isa='dict', default=dict()) + _action = FieldAttribute(isa='string') + + _always_run = FieldAttribute(isa='bool') + _any_errors_fatal = FieldAttribute(isa='bool') + _async = FieldAttribute(isa='int', default=0) + _changed_when = FieldAttribute(isa='string') + _delay = FieldAttribute(isa='int', default=5) + _delegate_to = FieldAttribute(isa='string') + _failed_when = FieldAttribute(isa='string') + _first_available_file = FieldAttribute(isa='list') + _ignore_errors = FieldAttribute(isa='bool') + + _loop = FieldAttribute(isa='string', private=True) + _loop_args = FieldAttribute(isa='list', private=True) + _local_action = FieldAttribute(isa='string') + + # FIXME: this should not be a Task + _meta = FieldAttribute(isa='string') + + _name = FieldAttribute(isa='string', default='') + + _notify = FieldAttribute(isa='list') + _poll = FieldAttribute(isa='int') + _register = FieldAttribute(isa='string') + _retries = FieldAttribute(isa='int', default=1) + _run_once = FieldAttribute(isa='bool') + _until = FieldAttribute(isa='list') # ? + + def __init__(self, block=None, role=None, task_include=None): + ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' + + self._block = block + self._role = role + self._task_include = task_include + + super(Task, self).__init__() + + def get_name(self): + ''' return the name of the task ''' + + if self._role and self.name: + return "%s : %s" % (self._role.get_name(), self.name) + elif self.name: + return self.name + else: + flattened_args = self._merge_kv(self.args) + if self._role: + return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args) + else: + return "%s %s" % (self.action, flattened_args) + + def _merge_kv(self, ds): + if ds is None: + return "" + elif isinstance(ds, basestring): + return ds + elif isinstance(ds, dict): + buf = "" + for (k,v) in ds.iteritems(): + if k.startswith('_'): + continue + buf = buf + "%s=%s " % (k,v) + buf = buf.strip() + return buf + + @staticmethod + def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None): + t = Task(block=block, role=role, task_include=task_include) + return t.load_data(data, variable_manager=variable_manager, loader=loader) + + def __repr__(self): + ''' returns a human readable representation of the task ''' + return "TASK: %s" % self.get_name() + + def _preprocess_loop(self, ds, new_ds, k, v): + ''' take a lookup plugin name and store it correctly ''' + + loop_name = k.replace("with_", "") + if new_ds.get('loop') is not None: + raise AnsibleError("duplicate loop in task: %s" % loop_name) + new_ds['loop'] = loop_name + new_ds['loop_args'] = v + + def preprocess_data(self, ds): + ''' + tasks are especially complex arguments so need pre-processing. + keep it short. + ''' + + assert isinstance(ds, dict) + + # the new, cleaned datastructure, which will have legacy + # items reduced to a standard structure suitable for the + # attributes of the task class + new_ds = AnsibleMapping() + if isinstance(ds, AnsibleBaseYAMLObject): + new_ds.ansible_pos = ds.ansible_pos + + # use the args parsing class to determine the action, args, + # and the delegate_to value from the various possible forms + # supported as legacy + args_parser = ModuleArgsParser(task_ds=ds) + (action, args, delegate_to) = args_parser.parse() + + new_ds['action'] = action + new_ds['args'] = args + new_ds['delegate_to'] = delegate_to + + for (k,v) in ds.iteritems(): + if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell': + # we don't want to re-assign these values, which were + # determined by the ModuleArgsParser() above + continue + elif k.replace("with_", "") in lookup_loader: + self._preprocess_loop(ds, new_ds, k, v) else: - self.become=True - if 'sudo_user' in ds: - self.become_user = ds['sudo_user'] - del ds['sudo_user'] - if 'sudo_pass' in ds: - self.become_pass = ds['sudo_pass'] - del ds['sudo_pass'] - - elif 'su' in ds or 'su_user' in ds: - self.become_method='su' - - if 'su' in ds: - self.become=ds['su'] + new_ds[k] = v + + return super(Task, self).preprocess_data(new_ds) + + def post_validate(self, templar): + ''' + Override of base class post_validate, to also do final validation on + the block and task include (if any) to which this task belongs. + ''' + + if self._block: + self._block.post_validate(templar) + if self._task_include: + self._task_include.post_validate(templar) + + super(Task, self).post_validate(templar) + + def get_vars(self): + all_vars = self.vars.copy() + if self._block: + all_vars.update(self._block.get_vars()) + if self._task_include: + all_vars.update(self._task_include.get_vars()) + + all_vars.update(self.serialize()) + + if 'tags' in all_vars: + del all_vars['tags'] + if 'when' in all_vars: + del all_vars['when'] + return all_vars + + def copy(self, exclude_block=False): + new_me = super(Task, self).copy() + + new_me._block = None + if self._block and not exclude_block: + new_me._block = self._block.copy() + + new_me._role = None + if self._role: + new_me._role = self._role + + new_me._task_include = None + if self._task_include: + new_me._task_include = self._task_include.copy() + + return new_me + + def serialize(self): + data = super(Task, self).serialize() + + if self._block: + data['block'] = self._block.serialize() + + if self._role: + data['role'] = self._role.serialize() + + if self._task_include: + data['task_include'] = self._task_include.serialize() + + return data + + def deserialize(self, data): + + # import is here to avoid import loops + #from ansible.playbook.task_include import TaskInclude + + block_data = data.get('block') + + if block_data: + b = Block() + b.deserialize(block_data) + self._block = b + del data['block'] + + role_data = data.get('role') + if role_data: + r = Role() + r.deserialize(role_data) + self._role = r + del data['role'] + + ti_data = data.get('task_include') + if ti_data: + #ti = TaskInclude() + ti = Task() + ti.deserialize(ti_data) + self._task_include = ti + del data['task_include'] + + super(Task, self).deserialize(data) + + def evaluate_conditional(self, all_vars): + if self._block is not None: + if not self._block.evaluate_conditional(all_vars): + return False + if self._task_include is not None: + if not self._task_include.evaluate_conditional(all_vars): + return False + return super(Task, self).evaluate_conditional(all_vars) + + def set_loader(self, loader): + ''' + Sets the loader on this object and recursively on parent, child objects. + This is used primarily after the Task has been serialized/deserialized, which + does not preserve the loader. + ''' + + self._loader = loader + + if self._block: + self._block.set_loader(loader) + if self._task_include: + self._task_include.set_loader(loader) + + def _get_parent_attribute(self, attr, extend=False): + ''' + Generic logic to get the attribute or parent attribute for a task value. + ''' + value = self._attributes[attr] + if self._block and (not value or extend): + parent_value = getattr(self._block, attr) + if extend: + value = self._extend_value(value, parent_value) else: - self.become=True - del ds['su'] - if 'su_user' in ds: - self.become_user = ds['su_user'] - del ds['su_user'] - if 'su_pass' in ds: - self.become_pass = ds['su_pass'] - del ds['su_pass'] - - # Both are defined - if ('action' in ds) and ('local_action' in ds): - raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together") - # Both are NOT defined - elif (not 'action' in ds) and (not 'local_action' in ds): - raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '')) - # Only one of them is defined - elif 'local_action' in ds: - self.action = ds.get('local_action', '') - self.delegate_to = '127.0.0.1' - else: - self.action = ds.get('action', '') - self.delegate_to = ds.get('delegate_to', None) - self.transport = ds.get('connection', ds.get('transport', play.transport)) - - if isinstance(self.action, dict): - if 'module' not in self.action: - raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action)) - if self.args: - raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action)) - self.args = self.action - self.action = self.args.pop('module') - - # delegate_to can use variables - if not (self.delegate_to is None): - # delegate_to: localhost should use local transport - if self.delegate_to in ['127.0.0.1', 'localhost']: - self.transport = 'local' - - # notified by is used by Playbook code to flag which hosts - # need to run a notifier - self.notified_by = [] - - # if no name is specified, use the action line as the name - if self.name is None: - self.name = self.action - - # load various attributes - self.when = ds.get('when', None) - self.changed_when = ds.get('changed_when', None) - self.failed_when = ds.get('failed_when', None) - - # combine the default and module vars here for use in templating - all_vars = self.default_vars.copy() - all_vars = utils.combine_vars(all_vars, self.play_vars) - all_vars = utils.combine_vars(all_vars, self.play_file_vars) - all_vars = utils.combine_vars(all_vars, self.role_vars) - all_vars = utils.combine_vars(all_vars, self.module_vars) - all_vars = utils.combine_vars(all_vars, self.role_params) - - self.async_seconds = ds.get('async', 0) # not async by default - self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars) - self.async_seconds = int(self.async_seconds) - self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds - self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars) - self.async_poll_interval = int(self.async_poll_interval) - self.notify = ds.get('notify', []) - self.first_available_file = ds.get('first_available_file', None) - - self.items_lookup_plugin = ds.get('items_lookup_plugin', None) - self.items_lookup_terms = ds.get('items_lookup_terms', None) - - - self.ignore_errors = ds.get('ignore_errors', False) - self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal) - - self.always_run = ds.get('always_run', False) - - # action should be a string - if not isinstance(self.action, basestring): - raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name)) - - # notify can be a string or a list, store as a list - if isinstance(self.notify, basestring): - self.notify = [ self.notify ] - - # split the action line into a module name + arguments - try: - tokens = split_args(self.action) - except Exception, e: - if "unbalanced" in str(e): - raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \ - "Make sure quotes are matched or escaped properly") + value = parent_value + if self._task_include and (not value or extend): + parent_value = getattr(self._task_include, attr) + if extend: + value = self._extend_value(value, parent_value) else: - raise - if len(tokens) < 1: - raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name) - self.module_name = tokens[0] - self.module_args = '' - if len(tokens) > 1: - self.module_args = " ".join(tokens[1:]) - - import_tags = self.module_vars.get('tags',[]) - if type(import_tags) in [int,float]: - import_tags = str(import_tags) - elif type(import_tags) in [str,unicode]: - # allow the user to list comma delimited tags - import_tags = import_tags.split(",") - - # handle mutually incompatible options - incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ] - if len(incompatibles) > 1: - raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task") - - # make first_available_file accessible to Runner code - if self.first_available_file: - self.module_vars['first_available_file'] = self.first_available_file - # make sure that the 'item' variable is set when using - # first_available_file (issue #8220) - if 'item' not in self.module_vars: - self.module_vars['item'] = '' - - if self.items_lookup_plugin is not None: - self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin - self.module_vars['items_lookup_terms'] = self.items_lookup_terms - - # allow runner to see delegate_to option - self.module_vars['delegate_to'] = self.delegate_to - - # make some task attributes accessible to Runner code - self.module_vars['ignore_errors'] = self.ignore_errors - self.module_vars['register'] = self.register - self.module_vars['changed_when'] = self.changed_when - self.module_vars['failed_when'] = self.failed_when - self.module_vars['always_run'] = self.always_run - - # tags allow certain parts of a playbook to be run without running the whole playbook - apply_tags = ds.get('tags', None) - if apply_tags is not None: - if type(apply_tags) in [ str, unicode ]: - self.tags.append(apply_tags) - elif type(apply_tags) in [ int, float ]: - self.tags.append(str(apply_tags)) - elif type(apply_tags) == list: - self.tags.extend(apply_tags) - self.tags.extend(import_tags) - - if len(self.tags) > 1: - self.tags.remove('untagged') - - if additional_conditions: - new_conditions = additional_conditions[:] - if self.when: - new_conditions.append(self.when) - self.when = new_conditions + value = parent_value + return value + diff --git a/v2/ansible/playbook/vars.py b/lib/ansible/playbook/vars.py similarity index 100% rename from v2/ansible/playbook/vars.py rename to lib/ansible/playbook/vars.py diff --git a/v2/ansible/playbook/vars_file.py b/lib/ansible/playbook/vars_file.py similarity index 100% rename from v2/ansible/playbook/vars_file.py rename to lib/ansible/playbook/vars_file.py diff --git a/v2/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py similarity index 100% rename from v2/ansible/plugins/__init__.py rename to lib/ansible/plugins/__init__.py diff --git a/v2/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py similarity index 100% rename from v2/ansible/plugins/action/__init__.py rename to lib/ansible/plugins/action/__init__.py diff --git a/v2/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py similarity index 100% rename from v2/ansible/plugins/action/add_host.py rename to lib/ansible/plugins/action/add_host.py diff --git a/v2/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py similarity index 100% rename from v2/ansible/plugins/action/assemble.py rename to lib/ansible/plugins/action/assemble.py diff --git a/v2/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py similarity index 100% rename from v2/ansible/plugins/action/assert.py rename to lib/ansible/plugins/action/assert.py diff --git a/v2/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py similarity index 100% rename from v2/ansible/plugins/action/async.py rename to lib/ansible/plugins/action/async.py diff --git a/v2/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py similarity index 100% rename from v2/ansible/plugins/action/copy.py rename to lib/ansible/plugins/action/copy.py diff --git a/v2/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py similarity index 100% rename from v2/ansible/plugins/action/debug.py rename to lib/ansible/plugins/action/debug.py diff --git a/v2/ansible/plugins/action/fail.py b/lib/ansible/plugins/action/fail.py similarity index 100% rename from v2/ansible/plugins/action/fail.py rename to lib/ansible/plugins/action/fail.py diff --git a/v2/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py similarity index 100% rename from v2/ansible/plugins/action/fetch.py rename to lib/ansible/plugins/action/fetch.py diff --git a/v2/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py similarity index 100% rename from v2/ansible/plugins/action/group_by.py rename to lib/ansible/plugins/action/group_by.py diff --git a/v2/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py similarity index 100% rename from v2/ansible/plugins/action/include_vars.py rename to lib/ansible/plugins/action/include_vars.py diff --git a/v2/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py similarity index 100% rename from v2/ansible/plugins/action/normal.py rename to lib/ansible/plugins/action/normal.py diff --git a/v2/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py similarity index 100% rename from v2/ansible/plugins/action/patch.py rename to lib/ansible/plugins/action/patch.py diff --git a/v2/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py similarity index 100% rename from v2/ansible/plugins/action/pause.py rename to lib/ansible/plugins/action/pause.py diff --git a/v2/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py similarity index 100% rename from v2/ansible/plugins/action/raw.py rename to lib/ansible/plugins/action/raw.py diff --git a/v2/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py similarity index 100% rename from v2/ansible/plugins/action/script.py rename to lib/ansible/plugins/action/script.py diff --git a/v2/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py similarity index 100% rename from v2/ansible/plugins/action/set_fact.py rename to lib/ansible/plugins/action/set_fact.py diff --git a/v2/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py similarity index 100% rename from v2/ansible/plugins/action/synchronize.py rename to lib/ansible/plugins/action/synchronize.py diff --git a/v2/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py similarity index 100% rename from v2/ansible/plugins/action/template.py rename to lib/ansible/plugins/action/template.py diff --git a/v2/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py similarity index 100% rename from v2/ansible/plugins/action/unarchive.py rename to lib/ansible/plugins/action/unarchive.py diff --git a/v2/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py similarity index 100% rename from v2/ansible/plugins/cache/__init__.py rename to lib/ansible/plugins/cache/__init__.py diff --git a/v2/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py similarity index 100% rename from v2/ansible/plugins/cache/base.py rename to lib/ansible/plugins/cache/base.py diff --git a/v2/ansible/plugins/cache/memcached.py b/lib/ansible/plugins/cache/memcached.py similarity index 100% rename from v2/ansible/plugins/cache/memcached.py rename to lib/ansible/plugins/cache/memcached.py diff --git a/v2/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py similarity index 100% rename from v2/ansible/plugins/cache/memory.py rename to lib/ansible/plugins/cache/memory.py diff --git a/v2/ansible/plugins/cache/redis.py b/lib/ansible/plugins/cache/redis.py similarity index 100% rename from v2/ansible/plugins/cache/redis.py rename to lib/ansible/plugins/cache/redis.py diff --git a/v2/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py similarity index 100% rename from v2/ansible/plugins/callback/__init__.py rename to lib/ansible/plugins/callback/__init__.py diff --git a/v2/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py similarity index 100% rename from v2/ansible/plugins/callback/default.py rename to lib/ansible/plugins/callback/default.py diff --git a/v2/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py similarity index 100% rename from v2/ansible/plugins/callback/minimal.py rename to lib/ansible/plugins/callback/minimal.py diff --git a/v2/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py similarity index 100% rename from v2/ansible/plugins/connections/__init__.py rename to lib/ansible/plugins/connections/__init__.py diff --git a/v2/ansible/plugins/connections/accelerate.py b/lib/ansible/plugins/connections/accelerate.py similarity index 100% rename from v2/ansible/plugins/connections/accelerate.py rename to lib/ansible/plugins/connections/accelerate.py diff --git a/v2/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connections/chroot.py similarity index 100% rename from v2/ansible/plugins/connections/chroot.py rename to lib/ansible/plugins/connections/chroot.py diff --git a/v2/ansible/plugins/connections/funcd.py b/lib/ansible/plugins/connections/funcd.py similarity index 100% rename from v2/ansible/plugins/connections/funcd.py rename to lib/ansible/plugins/connections/funcd.py diff --git a/v2/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py similarity index 100% rename from v2/ansible/plugins/connections/jail.py rename to lib/ansible/plugins/connections/jail.py diff --git a/v2/ansible/plugins/connections/libvirt_lxc.py b/lib/ansible/plugins/connections/libvirt_lxc.py similarity index 100% rename from v2/ansible/plugins/connections/libvirt_lxc.py rename to lib/ansible/plugins/connections/libvirt_lxc.py diff --git a/v2/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py similarity index 100% rename from v2/ansible/plugins/connections/local.py rename to lib/ansible/plugins/connections/local.py diff --git a/v2/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py similarity index 100% rename from v2/ansible/plugins/connections/paramiko_ssh.py rename to lib/ansible/plugins/connections/paramiko_ssh.py diff --git a/v2/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py similarity index 100% rename from v2/ansible/plugins/connections/ssh.py rename to lib/ansible/plugins/connections/ssh.py diff --git a/v2/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py similarity index 100% rename from v2/ansible/plugins/connections/winrm.py rename to lib/ansible/plugins/connections/winrm.py diff --git a/v2/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py similarity index 100% rename from v2/ansible/plugins/connections/zone.py rename to lib/ansible/plugins/connections/zone.py diff --git a/v2/ansible/plugins/filter b/lib/ansible/plugins/filter similarity index 100% rename from v2/ansible/plugins/filter rename to lib/ansible/plugins/filter diff --git a/v2/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py similarity index 100% rename from v2/ansible/plugins/inventory/__init__.py rename to lib/ansible/plugins/inventory/__init__.py diff --git a/v2/ansible/plugins/inventory/aggregate.py b/lib/ansible/plugins/inventory/aggregate.py similarity index 100% rename from v2/ansible/plugins/inventory/aggregate.py rename to lib/ansible/plugins/inventory/aggregate.py diff --git a/v2/ansible/plugins/inventory/directory.py b/lib/ansible/plugins/inventory/directory.py similarity index 100% rename from v2/ansible/plugins/inventory/directory.py rename to lib/ansible/plugins/inventory/directory.py diff --git a/v2/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py similarity index 100% rename from v2/ansible/plugins/inventory/ini.py rename to lib/ansible/plugins/inventory/ini.py diff --git a/v2/ansible/plugins/lookup/__init__.py b/lib/ansible/plugins/lookup/__init__.py similarity index 100% rename from v2/ansible/plugins/lookup/__init__.py rename to lib/ansible/plugins/lookup/__init__.py diff --git a/v2/ansible/plugins/lookup/cartesian.py b/lib/ansible/plugins/lookup/cartesian.py similarity index 100% rename from v2/ansible/plugins/lookup/cartesian.py rename to lib/ansible/plugins/lookup/cartesian.py diff --git a/v2/ansible/plugins/lookup/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py similarity index 100% rename from v2/ansible/plugins/lookup/csvfile.py rename to lib/ansible/plugins/lookup/csvfile.py diff --git a/v2/ansible/plugins/lookup/dict.py b/lib/ansible/plugins/lookup/dict.py similarity index 100% rename from v2/ansible/plugins/lookup/dict.py rename to lib/ansible/plugins/lookup/dict.py diff --git a/v2/ansible/plugins/lookup/dnstxt.py b/lib/ansible/plugins/lookup/dnstxt.py similarity index 100% rename from v2/ansible/plugins/lookup/dnstxt.py rename to lib/ansible/plugins/lookup/dnstxt.py diff --git a/v2/ansible/plugins/lookup/env.py b/lib/ansible/plugins/lookup/env.py similarity index 100% rename from v2/ansible/plugins/lookup/env.py rename to lib/ansible/plugins/lookup/env.py diff --git a/v2/ansible/plugins/lookup/etcd.py b/lib/ansible/plugins/lookup/etcd.py similarity index 100% rename from v2/ansible/plugins/lookup/etcd.py rename to lib/ansible/plugins/lookup/etcd.py diff --git a/v2/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py similarity index 100% rename from v2/ansible/plugins/lookup/file.py rename to lib/ansible/plugins/lookup/file.py diff --git a/v2/ansible/plugins/lookup/fileglob.py b/lib/ansible/plugins/lookup/fileglob.py similarity index 100% rename from v2/ansible/plugins/lookup/fileglob.py rename to lib/ansible/plugins/lookup/fileglob.py diff --git a/v2/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py similarity index 100% rename from v2/ansible/plugins/lookup/first_found.py rename to lib/ansible/plugins/lookup/first_found.py diff --git a/v2/ansible/plugins/lookup/flattened.py b/lib/ansible/plugins/lookup/flattened.py similarity index 100% rename from v2/ansible/plugins/lookup/flattened.py rename to lib/ansible/plugins/lookup/flattened.py diff --git a/v2/ansible/plugins/lookup/indexed_items.py b/lib/ansible/plugins/lookup/indexed_items.py similarity index 100% rename from v2/ansible/plugins/lookup/indexed_items.py rename to lib/ansible/plugins/lookup/indexed_items.py diff --git a/v2/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py similarity index 100% rename from v2/ansible/plugins/lookup/inventory_hostnames.py rename to lib/ansible/plugins/lookup/inventory_hostnames.py diff --git a/v2/ansible/plugins/lookup/items.py b/lib/ansible/plugins/lookup/items.py similarity index 100% rename from v2/ansible/plugins/lookup/items.py rename to lib/ansible/plugins/lookup/items.py diff --git a/v2/ansible/plugins/lookup/lines.py b/lib/ansible/plugins/lookup/lines.py similarity index 100% rename from v2/ansible/plugins/lookup/lines.py rename to lib/ansible/plugins/lookup/lines.py diff --git a/v2/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py similarity index 100% rename from v2/ansible/plugins/lookup/nested.py rename to lib/ansible/plugins/lookup/nested.py diff --git a/v2/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py similarity index 100% rename from v2/ansible/plugins/lookup/password.py rename to lib/ansible/plugins/lookup/password.py diff --git a/v2/ansible/plugins/lookup/pipe.py b/lib/ansible/plugins/lookup/pipe.py similarity index 100% rename from v2/ansible/plugins/lookup/pipe.py rename to lib/ansible/plugins/lookup/pipe.py diff --git a/v2/ansible/plugins/lookup/random_choice.py b/lib/ansible/plugins/lookup/random_choice.py similarity index 100% rename from v2/ansible/plugins/lookup/random_choice.py rename to lib/ansible/plugins/lookup/random_choice.py diff --git a/v2/ansible/plugins/lookup/redis_kv.py b/lib/ansible/plugins/lookup/redis_kv.py similarity index 100% rename from v2/ansible/plugins/lookup/redis_kv.py rename to lib/ansible/plugins/lookup/redis_kv.py diff --git a/v2/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py similarity index 100% rename from v2/ansible/plugins/lookup/sequence.py rename to lib/ansible/plugins/lookup/sequence.py diff --git a/v2/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py similarity index 100% rename from v2/ansible/plugins/lookup/subelements.py rename to lib/ansible/plugins/lookup/subelements.py diff --git a/v2/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py similarity index 100% rename from v2/ansible/plugins/lookup/template.py rename to lib/ansible/plugins/lookup/template.py diff --git a/v2/ansible/plugins/lookup/together.py b/lib/ansible/plugins/lookup/together.py similarity index 100% rename from v2/ansible/plugins/lookup/together.py rename to lib/ansible/plugins/lookup/together.py diff --git a/v2/ansible/plugins/lookup/url.py b/lib/ansible/plugins/lookup/url.py similarity index 100% rename from v2/ansible/plugins/lookup/url.py rename to lib/ansible/plugins/lookup/url.py diff --git a/v2/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py similarity index 100% rename from v2/ansible/plugins/shell/__init__.py rename to lib/ansible/plugins/shell/__init__.py diff --git a/v2/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py similarity index 100% rename from v2/ansible/plugins/shell/csh.py rename to lib/ansible/plugins/shell/csh.py diff --git a/v2/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py similarity index 100% rename from v2/ansible/plugins/shell/fish.py rename to lib/ansible/plugins/shell/fish.py diff --git a/v2/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py similarity index 100% rename from v2/ansible/plugins/shell/powershell.py rename to lib/ansible/plugins/shell/powershell.py diff --git a/v2/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py similarity index 100% rename from v2/ansible/plugins/shell/sh.py rename to lib/ansible/plugins/shell/sh.py diff --git a/v2/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py similarity index 100% rename from v2/ansible/plugins/strategies/__init__.py rename to lib/ansible/plugins/strategies/__init__.py diff --git a/v2/ansible/plugins/strategies/free.py b/lib/ansible/plugins/strategies/free.py similarity index 100% rename from v2/ansible/plugins/strategies/free.py rename to lib/ansible/plugins/strategies/free.py diff --git a/v2/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py similarity index 100% rename from v2/ansible/plugins/strategies/linear.py rename to lib/ansible/plugins/strategies/linear.py diff --git a/v2/ansible/plugins/vars/__init__.py b/lib/ansible/plugins/vars/__init__.py similarity index 100% rename from v2/ansible/plugins/vars/__init__.py rename to lib/ansible/plugins/vars/__init__.py diff --git a/v2/ansible/template/__init__.py b/lib/ansible/template/__init__.py similarity index 100% rename from v2/ansible/template/__init__.py rename to lib/ansible/template/__init__.py diff --git a/v2/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py similarity index 100% rename from v2/ansible/template/safe_eval.py rename to lib/ansible/template/safe_eval.py diff --git a/v2/ansible/template/template.py b/lib/ansible/template/template.py similarity index 100% rename from v2/ansible/template/template.py rename to lib/ansible/template/template.py diff --git a/v2/ansible/template/vars.py b/lib/ansible/template/vars.py similarity index 100% rename from v2/ansible/template/vars.py rename to lib/ansible/template/vars.py diff --git a/v2/test-requirements.txt b/lib/ansible/test-requirements.txt similarity index 100% rename from v2/test-requirements.txt rename to lib/ansible/test-requirements.txt diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 7ed07a54c840d3..ae8ccff5952585 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -15,1646 +15,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import errno -import sys -import re -import os -import shlex -import yaml -import copy -import optparse -import operator -from ansible import errors -from ansible import __version__ -from ansible.utils.display_functions import * -from ansible.utils.plugins import * -from ansible.utils.su_prompts import * -from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s -from ansible.callbacks import display -from ansible.module_utils.splitter import split_args, unquote -from ansible.module_utils.basic import heuristic_log_sanitize -from ansible.utils.unicode import to_bytes, to_unicode -import ansible.constants as C -import ast -import time -import StringIO -import stat -import termios -import tty -import pipes -import random -import difflib -import warnings -import traceback -import getpass -import sys -import subprocess -import contextlib - -from vault import VaultLib - -VERBOSITY=0 - -MAX_FILE_SIZE_FOR_DIFF=1*1024*1024 - -# caching the compilation of the regex used -# to check for lookup calls within data -LOOKUP_REGEX = re.compile(r'lookup\s*\(') -PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})') -CODE_REGEX = re.compile(r'(?:{%|%})') - - -try: - # simplejson can be much faster if it's available - import simplejson as json -except ImportError: - import json - -try: - from yaml import CSafeLoader as Loader -except ImportError: - from yaml import SafeLoader as Loader - -PASSLIB_AVAILABLE = False -try: - import passlib.hash - PASSLIB_AVAILABLE = True -except: - pass - -try: - import builtin -except ImportError: - import __builtin__ as builtin - -KEYCZAR_AVAILABLE=False -try: - try: - # some versions of pycrypto may not have this? - from Crypto.pct_warnings import PowmInsecureWarning - except ImportError: - PowmInsecureWarning = RuntimeWarning - - with warnings.catch_warnings(record=True) as warning_handler: - warnings.simplefilter("error", PowmInsecureWarning) - try: - import keyczar.errors as key_errors - from keyczar.keys import AesKey - except PowmInsecureWarning: - system_warning( - "The version of gmp you have installed has a known issue regarding " + \ - "timing vulnerabilities when used with pycrypto. " + \ - "If possible, you should update it (i.e. yum update gmp)." - ) - warnings.resetwarnings() - warnings.simplefilter("ignore") - import keyczar.errors as key_errors - from keyczar.keys import AesKey - KEYCZAR_AVAILABLE=True -except ImportError: - pass - - -############################################################### -# Abstractions around keyczar -############################################################### - -def key_for_hostname(hostname): - # fireball mode is an implementation of ansible firing up zeromq via SSH - # to use no persistent daemons or key management - - if not KEYCZAR_AVAILABLE: - raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes") - - key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR) - if not os.path.exists(key_path): - os.makedirs(key_path, mode=0700) - os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8)) - elif not os.path.isdir(key_path): - raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.') - - if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8): - raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))) - - key_path = os.path.join(key_path, hostname) - - # use new AES keys every 2 hours, which means fireball must not allow running for longer either - if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2): - key = AesKey.Generate() - fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)) - fh = os.fdopen(fd, 'w') - fh.write(str(key)) - fh.close() - return key - else: - if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8): - raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path)) - fh = open(key_path) - key = AesKey.Read(fh.read()) - fh.close() - return key - -def encrypt(key, msg): - return key.Encrypt(msg) - -def decrypt(key, msg): - try: - return key.Decrypt(msg) - except key_errors.InvalidSignatureError: - raise errors.AnsibleError("decryption failed") - -############################################################### -# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS -############################################################### - -def read_vault_file(vault_password_file): - """Read a vault password from a file or if executable, execute the script and - retrieve password from STDOUT - """ - if vault_password_file: - this_path = os.path.realpath(os.path.expanduser(vault_password_file)) - if is_executable(this_path): - try: - # STDERR not captured to make it easier for users to prompt for input in their scripts - p = subprocess.Popen(this_path, stdout=subprocess.PIPE) - except OSError, e: - raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e)) - stdout, stderr = p.communicate() - vault_pass = stdout.strip('\r\n') - else: - try: - f = open(this_path, "rb") - vault_pass=f.read().strip() - f.close() - except (OSError, IOError), e: - raise errors.AnsibleError("Could not read %s: %s" % (this_path, e)) - - return vault_pass - else: - return None - -def err(msg): - ''' print an error message to stderr ''' - - print >> sys.stderr, msg - -def exit(msg, rc=1): - ''' quit with an error to stdout and a failure code ''' - - err(msg) - sys.exit(rc) - -def jsonify(result, format=False): - ''' format JSON output (uncompressed or uncompressed) ''' - - if result is None: - return "{}" - result2 = result.copy() - for key, value in result2.items(): - if type(value) is str: - result2[key] = value.decode('utf-8', 'ignore') - - indent = None - if format: - indent = 4 - - try: - return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False) - except UnicodeDecodeError: - return json.dumps(result2, sort_keys=True, indent=indent) - -def write_tree_file(tree, hostname, buf): - ''' write something into treedir/hostname ''' - - # TODO: might be nice to append playbook runs per host in a similar way - # in which case, we'd want append mode. - path = os.path.join(tree, hostname) - fd = open(path, "w+") - fd.write(buf) - fd.close() - -def is_failed(result): - ''' is a given JSON result a failed result? ''' - - return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true'])) - -def is_changed(result): - ''' is a given JSON result a changed result? ''' - - return (result.get('changed', False) in [ True, 'True', 'true']) - -def check_conditional(conditional, basedir, inject, fail_on_undefined=False): - from ansible.utils import template - - if conditional is None or conditional == '': - return True - - if isinstance(conditional, list): - for x in conditional: - if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined): - return False - return True - - if not isinstance(conditional, basestring): - return conditional - - conditional = conditional.replace("jinja2_compare ","") - # allow variable names - if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'): - conditional = to_unicode(inject[conditional], nonstring='simplerepr') - conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined) - original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","") - # a Jinja2 evaluation that results in something Python can eval! - presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional - conditional = template.template(basedir, presented, inject) - val = conditional.strip() - if val == presented: - # the templating failed, meaning most likely a - # variable was undefined. If we happened to be - # looking for an undefined variable, return True, - # otherwise fail - if "is undefined" in conditional: - return True - elif "is defined" in conditional: - return False - else: - raise errors.AnsibleError("error while evaluating conditional: %s" % original) - elif val == "True": - return True - elif val == "False": - return False - else: - raise errors.AnsibleError("unable to evaluate conditional: %s" % original) - -def is_executable(path): - '''is the given path executable?''' - return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] - or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] - or stat.S_IXOTH & os.stat(path)[stat.ST_MODE]) - -def unfrackpath(path): - ''' - returns a path that is free of symlinks, environment - variables, relative path traversals and symbols (~) - example: - '$HOME/../../var/mail' becomes '/var/spool/mail' - ''' - return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path)))) - -def prepare_writeable_dir(tree,mode=0777): - ''' make sure a directory exists and is writeable ''' - - # modify the mode to ensure the owner at least - # has read/write access to this directory - mode |= 0700 - - # make sure the tree path is always expanded - # and normalized and free of symlinks - tree = unfrackpath(tree) - - if not os.path.exists(tree): - try: - os.makedirs(tree, mode) - except (IOError, OSError), e: - raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e)) - if not os.access(tree, os.W_OK): - raise errors.AnsibleError("Cannot write to path %s" % tree) - return tree - -def path_dwim(basedir, given): - ''' - make relative paths work like folks expect. - ''' - - if given.startswith("'"): - given = given[1:-1] - - if given.startswith("/"): - return os.path.abspath(given) - elif given.startswith("~"): - return os.path.abspath(os.path.expanduser(given)) - else: - if basedir is None: - basedir = "." - return os.path.abspath(os.path.join(basedir, given)) - -def path_dwim_relative(original, dirname, source, playbook_base, check=True): - ''' find one file in a directory one level up in a dir named dirname relative to current ''' - # (used by roles code) - - from ansible.utils import template - - - basedir = os.path.dirname(original) - if os.path.islink(basedir): - basedir = unfrackpath(basedir) - template2 = os.path.join(basedir, dirname, source) - else: - template2 = os.path.join(basedir, '..', dirname, source) - source2 = path_dwim(basedir, template2) - if os.path.exists(source2): - return source2 - obvious_local_path = path_dwim(playbook_base, source) - if os.path.exists(obvious_local_path): - return obvious_local_path - if check: - raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path)) - return source2 # which does not exist - -def repo_url_to_role_name(repo_url): - # gets the role name out of a repo like - # http://git.example.com/repos/repo.git" => "repo" - - if '://' not in repo_url and '@' not in repo_url: - return repo_url - trailing_path = repo_url.split('/')[-1] - if trailing_path.endswith('.git'): - trailing_path = trailing_path[:-4] - if trailing_path.endswith('.tar.gz'): - trailing_path = trailing_path[:-7] - if ',' in trailing_path: - trailing_path = trailing_path.split(',')[0] - return trailing_path - - -def role_spec_parse(role_spec): - # takes a repo and a version like - # git+http://git.example.com/repos/repo.git,v1.0 - # and returns a list of properties such as: - # { - # 'scm': 'git', - # 'src': 'http://git.example.com/repos/repo.git', - # 'version': 'v1.0', - # 'name': 'repo' - # } - - role_spec = role_spec.strip() - role_version = '' - default_role_versions = dict(git='master', hg='tip') - if role_spec == "" or role_spec.startswith("#"): - return (None, None, None, None) - - tokens = [s.strip() for s in role_spec.split(',')] - - # assume https://github.com URLs are git+https:// URLs and not - # tarballs unless they end in '.zip' - if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): - tokens[0] = 'git+' + tokens[0] - - if '+' in tokens[0]: - (scm, role_url) = tokens[0].split('+') - else: - scm = None - role_url = tokens[0] - if len(tokens) >= 2: - role_version = tokens[1] - if len(tokens) == 3: - role_name = tokens[2] - else: - role_name = repo_url_to_role_name(tokens[0]) - if scm and not role_version: - role_version = default_role_versions.get(scm, '') - return dict(scm=scm, src=role_url, version=role_version, name=role_name) - - -def role_yaml_parse(role): - if 'role' in role: - # Old style: {role: "galaxy.role,version,name", other_vars: "here" } - role_info = role_spec_parse(role['role']) - if isinstance(role_info, dict): - # Warning: Slight change in behaviour here. name may be being - # overloaded. Previously, name was only a parameter to the role. - # Now it is both a parameter to the role and the name that - # ansible-galaxy will install under on the local system. - if 'name' in role and 'name' in role_info: - del role_info['name'] - role.update(role_info) - else: - # New style: { src: 'galaxy.role,version,name', other_vars: "here" } - if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'): - role["src"] = "git+" + role["src"] - - if '+' in role["src"]: - (scm, src) = role["src"].split('+') - role["scm"] = scm - role["src"] = src - - if 'name' not in role: - role["name"] = repo_url_to_role_name(role["src"]) - - if 'version' not in role: - role['version'] = '' - - if 'scm' not in role: - role['scm'] = None - - return role - - -def json_loads(data): - ''' parse a JSON string and return a data structure ''' - try: - loaded = json.loads(data) - except ValueError,e: - raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e)) - - return loaded - -def _clean_data(orig_data, from_remote=False, from_inventory=False): - ''' remove jinja2 template tags from a string ''' - - if not isinstance(orig_data, basestring): - return orig_data - - # when the data is marked as having come from a remote, we always - # replace any print blocks (ie. {{var}}), however when marked as coming - # from inventory we only replace print blocks that contain a call to - # a lookup plugin (ie. {{lookup('foo','bar'))}}) - replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None) - - regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX - - with contextlib.closing(StringIO.StringIO(orig_data)) as data: - # these variables keep track of opening block locations, as we only - # want to replace matched pairs of print/block tags - print_openings = [] - block_openings = [] - for mo in regex.finditer(orig_data): - token = mo.group(0) - token_start = mo.start(0) - - if token[0] == '{': - if token == '{%': - block_openings.append(token_start) - elif token == '{{': - print_openings.append(token_start) - - elif token[1] == '}': - prev_idx = None - if token == '%}' and block_openings: - prev_idx = block_openings.pop() - elif token == '}}' and print_openings: - prev_idx = print_openings.pop() - - if prev_idx is not None: - # replace the opening - data.seek(prev_idx, os.SEEK_SET) - data.write('{#') - # replace the closing - data.seek(token_start, os.SEEK_SET) - data.write('#}') - - else: - assert False, 'Unhandled regex match' - - return data.getvalue() - -def _clean_data_struct(orig_data, from_remote=False, from_inventory=False): - ''' - walk a complex data structure, and use _clean_data() to - remove any template tags that may exist - ''' - if not from_remote and not from_inventory: - raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory") - if isinstance(orig_data, dict): - data = orig_data.copy() - for key in data: - new_key = _clean_data_struct(key, from_remote, from_inventory) - new_val = _clean_data_struct(data[key], from_remote, from_inventory) - if key != new_key: - del data[key] - data[new_key] = new_val - elif isinstance(orig_data, list): - data = orig_data[:] - for i in range(0, len(data)): - data[i] = _clean_data_struct(data[i], from_remote, from_inventory) - elif isinstance(orig_data, basestring): - data = _clean_data(orig_data, from_remote, from_inventory) - else: - data = orig_data - return data - -def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False): - ''' this version for module return data only ''' - - orig_data = raw_data - - # ignore stuff like tcgetattr spewage or other warnings - data = filter_leading_non_json_lines(raw_data) - - try: - results = json.loads(data) - except: - if no_exceptions: - return dict(failed=True, parsed=False, msg=raw_data) - else: - raise - - if from_remote: - results = _clean_data_struct(results, from_remote, from_inventory) - - return results - -def serialize_args(args): - ''' - Flattens a dictionary args to a k=v string - ''' - module_args = "" - for (k,v) in args.iteritems(): - if isinstance(v, basestring): - module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) - elif isinstance(v, bool): - module_args = "%s=%s %s" % (k, str(v), module_args) - return module_args.strip() - -def merge_module_args(current_args, new_args): - ''' - merges either a dictionary or string of k=v pairs with another string of k=v pairs, - and returns a new k=v string without duplicates. - ''' - if not isinstance(current_args, basestring): - raise errors.AnsibleError("expected current_args to be a basestring") - # we use parse_kv to split up the current args into a dictionary - final_args = parse_kv(current_args) - if isinstance(new_args, dict): - final_args.update(new_args) - elif isinstance(new_args, basestring): - new_args_kv = parse_kv(new_args) - final_args.update(new_args_kv) - return serialize_args(final_args) - -def parse_yaml(data, path_hint=None): - ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!''' - - stripped_data = data.lstrip() - loaded = None - if stripped_data.startswith("{") or stripped_data.startswith("["): - # since the line starts with { or [ we can infer this is a JSON document. - try: - loaded = json.loads(data) - except ValueError, ve: - if path_hint: - raise errors.AnsibleError(path_hint + ": " + str(ve)) - else: - raise errors.AnsibleError(str(ve)) - else: - # else this is pretty sure to be a YAML document - loaded = yaml.load(data, Loader=Loader) - - return loaded - -def process_common_errors(msg, probline, column): - replaced = probline.replace(" ","") - - if ":{{" in replaced and "}}" in replaced: - msg = msg + """ -This one looks easy to fix. YAML thought it was looking for the start of a -hash/dictionary and was confused to see a second "{". Most likely this was -meant to be an ansible template evaluation instead, so we have to give the -parser a small hint that we wanted a string instead. The solution here is to -just quote the entire value. - -For instance, if the original line was: - - app_path: {{ base_path }}/foo - -It should be written as: - - app_path: "{{ base_path }}/foo" -""" - return msg - - elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1: - msg = msg + """ -This one looks easy to fix. There seems to be an extra unquoted colon in the line -and this is confusing the parser. It was only expecting to find one free -colon. The solution is just add some quotes around the colon, or quote the -entire line after the first colon. - -For instance, if the original line was: - - copy: src=file.txt dest=/path/filename:with_colon.txt - -It can be written as: - - copy: src=file.txt dest='/path/filename:with_colon.txt' - -Or: - - copy: 'src=file.txt dest=/path/filename:with_colon.txt' - - -""" - return msg - else: - parts = probline.split(":") - if len(parts) > 1: - middle = parts[1].strip() - match = False - unbalanced = False - if middle.startswith("'") and not middle.endswith("'"): - match = True - elif middle.startswith('"') and not middle.endswith('"'): - match = True - if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2: - unbalanced = True - if match: - msg = msg + """ -This one looks easy to fix. It seems that there is a value started -with a quote, and the YAML parser is expecting to see the line ended -with the same kind of quote. For instance: - - when: "ok" in result.stdout - -Could be written as: - - when: '"ok" in result.stdout' - -or equivalently: - - when: "'ok' in result.stdout" - -""" - return msg - - if unbalanced: - msg = msg + """ -We could be wrong, but this one looks like it might be an issue with -unbalanced quotes. If starting a value with a quote, make sure the -line ends with the same set of quotes. For instance this arbitrary -example: - - foo: "bad" "wolf" - -Could be written as: - - foo: '"bad" "wolf"' - -""" - return msg - - return msg - -def process_yaml_error(exc, data, path=None, show_content=True): - if hasattr(exc, 'problem_mark'): - mark = exc.problem_mark - if show_content: - if mark.line -1 >= 0: - before_probline = data.split("\n")[mark.line-1] - else: - before_probline = '' - probline = data.split("\n")[mark.line] - arrow = " " * mark.column + "^" - msg = """Syntax Error while loading YAML script, %s -Note: The error may actually appear before this position: line %s, column %s - -%s -%s -%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow) - - unquoted_var = None - if '{{' in probline and '}}' in probline: - if '"{{' not in probline or "'{{" not in probline: - unquoted_var = True - - if not unquoted_var: - msg = process_common_errors(msg, probline, mark.column) - else: - msg = msg + """ -We could be wrong, but this one looks like it might be an issue with -missing quotes. Always quote template expression brackets when they -start a value. For instance: - - with_items: - - {{ foo }} - -Should be written as: - - with_items: - - "{{ foo }}" - -""" - else: - # most likely displaying a file with sensitive content, - # so don't show any of the actual lines of yaml just the - # line number itself - msg = """Syntax error while loading YAML script, %s -The error appears to have been on line %s, column %s, but may actually -be before there depending on the exact syntax problem. -""" % (path, mark.line + 1, mark.column + 1) - - else: - # No problem markers means we have to throw a generic - # "stuff messed up" type message. Sry bud. - if path: - msg = "Could not parse YAML. Check over %s again." % path - else: - msg = "Could not parse YAML." - raise errors.AnsibleYAMLValidationFailed(msg) - - -def parse_yaml_from_file(path, vault_password=None): - ''' convert a yaml file to a data structure ''' - - data = None - show_content = True - - try: - data = open(path).read() - except IOError: - raise errors.AnsibleError("file could not read: %s" % path) - - vault = VaultLib(password=vault_password) - if vault.is_encrypted(data): - # if the file is encrypted and no password was specified, - # the decrypt call would throw an error, but we check first - # since the decrypt function doesn't know the file name - if vault_password is None: - raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path) - data = vault.decrypt(data) - show_content = False - - try: - return parse_yaml(data, path_hint=path) - except yaml.YAMLError, exc: - process_yaml_error(exc, data, path, show_content) - -def parse_kv(args): - ''' convert a string of key/value items to a dict ''' - options = {} - if args is not None: - try: - vargs = split_args(args) - except ValueError, ve: - if 'no closing quotation' in str(ve).lower(): - raise errors.AnsibleError("error parsing argument string, try quoting the entire line.") - else: - raise - for x in vargs: - if "=" in x: - k, v = x.split("=",1) - options[k.strip()] = unquote(v.strip()) - return options - -def _validate_both_dicts(a, b): - - if not (isinstance(a, dict) and isinstance(b, dict)): - raise errors.AnsibleError( - "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__) - ) - -def merge_hash(a, b): - ''' recursively merges hash b into a - keys from b take precedence over keys from a ''' - - result = {} - - # we check here as well as in combine_vars() since this - # function can work recursively with nested dicts - _validate_both_dicts(a, b) - - for dicts in a, b: - # next, iterate over b keys and values - for k, v in dicts.iteritems(): - # if there's already such key in a - # and that key contains dict - if k in result and isinstance(result[k], dict): - # merge those dicts recursively - result[k] = merge_hash(a[k], v) - else: - # otherwise, just copy a value from b to a - result[k] = v - - return result - -def default(value, function): - ''' syntactic sugar around lazy evaluation of defaults ''' - if value is None: - return function() - return value - - -def _git_repo_info(repo_path): - ''' returns a string containing git branch, commit id and commit date ''' - result = None - if os.path.exists(repo_path): - # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. - if os.path.isfile(repo_path): - try: - gitdir = yaml.safe_load(open(repo_path)).get('gitdir') - # There is a possibility the .git file to have an absolute path. - if os.path.isabs(gitdir): - repo_path = gitdir - else: - repo_path = os.path.join(repo_path[:-4], gitdir) - except (IOError, AttributeError): - return '' - f = open(os.path.join(repo_path, "HEAD")) - branch = f.readline().split('/')[-1].rstrip("\n") - f.close() - branch_path = os.path.join(repo_path, "refs", "heads", branch) - if os.path.exists(branch_path): - f = open(branch_path) - commit = f.readline()[:10] - f.close() - else: - # detached HEAD - commit = branch[:10] - branch = 'detached HEAD' - branch_path = os.path.join(repo_path, "HEAD") - - date = time.localtime(os.stat(branch_path).st_mtime) - if time.daylight == 0: - offset = time.timezone - else: - offset = time.altzone - result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, - time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) - else: - result = '' - return result - - -def _gitinfo(): - basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') - repo_path = os.path.join(basedir, '.git') - result = _git_repo_info(repo_path) - submodules = os.path.join(basedir, '.gitmodules') - if not os.path.exists(submodules): - return result - f = open(submodules) - for line in f: - tokens = line.strip().split(' ') - if tokens[0] == 'path': - submodule_path = tokens[2] - submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) - if not submodule_info: - submodule_info = ' not found - use git submodule update --init ' + submodule_path - result += "\n {0}: {1}".format(submodule_path, submodule_info) - f.close() - return result - - -def version(prog): - result = "{0} {1}".format(prog, __version__) - gitinfo = _gitinfo() - if gitinfo: - result = result + " {0}".format(gitinfo) - result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH - return result - -def version_info(gitinfo=False): - if gitinfo: - # expensive call, user with care - ansible_version_string = version('') - else: - ansible_version_string = __version__ - ansible_version = ansible_version_string.split()[0] - ansible_versions = ansible_version.split('.') - for counter in range(len(ansible_versions)): - if ansible_versions[counter] == "": - ansible_versions[counter] = 0 - try: - ansible_versions[counter] = int(ansible_versions[counter]) - except: - pass - if len(ansible_versions) < 3: - for counter in range(len(ansible_versions), 3): - ansible_versions.append(0) - return {'string': ansible_version_string.strip(), - 'full': ansible_version, - 'major': ansible_versions[0], - 'minor': ansible_versions[1], - 'revision': ansible_versions[2]} - -def getch(): - ''' read in a single character ''' - fd = sys.stdin.fileno() - old_settings = termios.tcgetattr(fd) - try: - tty.setraw(sys.stdin.fileno()) - ch = sys.stdin.read(1) - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - return ch - -def sanitize_output(arg_string): - ''' strips private info out of a string ''' - - private_keys = ('password', 'login_password') - - output = [] - for part in arg_string.split(): - try: - (k, v) = part.split('=', 1) - except ValueError: - v = heuristic_log_sanitize(part) - output.append(v) - continue - - if k in private_keys: - v = 'VALUE_HIDDEN' - else: - v = heuristic_log_sanitize(v) - output.append('%s=%s' % (k, v)) - - output = ' '.join(output) - return output - - -#################################################################### -# option handling code for /usr/bin/ansible and ansible-playbook -# below this line - -class SortedOptParser(optparse.OptionParser): - '''Optparser which sorts the options by opt before outputting --help''' - - def format_help(self, formatter=None): - self.option_list.sort(key=operator.methodcaller('get_opt_string')) - return optparse.OptionParser.format_help(self, formatter=None) - -def increment_debug(option, opt, value, parser): - global VERBOSITY - VERBOSITY += 1 - -def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, - async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): - ''' create an options parser for any ansible script ''' - - parser = SortedOptParser(usage, version=version("%prog")) - parser.add_option('-v','--verbose', default=False, action="callback", - callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") - - parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int', - help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS) - parser.add_option('-i', '--inventory-file', dest='inventory', - help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST, - default=constants.DEFAULT_HOST_LIST) - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) - parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user', - help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER) - parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', - help='ask for SSH password') - parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', - help='use this file to authenticate the connection') - parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', - help='ask for vault password') - parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE, - dest='vault_password_file', help="vault password file") - parser.add_option('--list-hosts', dest='listhosts', action='store_true', - help='outputs a list of matching hosts; does not execute anything else') - parser.add_option('-M', '--module-path', dest='module_path', - help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH, - default=None) - - if subset_opts: - parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset', - help='further limit selected hosts to an additional pattern') - - parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int', - dest='timeout', - help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT) - - if output_opts: - parser.add_option('-o', '--one-line', dest='one_line', action='store_true', - help='condense output') - parser.add_option('-t', '--tree', dest='tree', default=None, - help='log output to this directory') - - if runas_opts: - # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', - help='ask for su password (deprecated, use become)') - parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', - help="run operations with sudo (nopasswd) (deprecated, use become)") - parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, - help='desired sudo user (default=root) (deprecated, use become)') - parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true', - help='run operations with su (deprecated, use become)') - parser.add_option('-R', '--su-user', default=None, - help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER) - - # consolidated privilege escalation (become) - parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become', - help="run operations with become (nopasswd implied)") - parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string', - help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS))) - parser.add_option('--become-user', default=None, dest='become_user', type='string', - help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER) - parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', - help='ask for privilege escalation password') - - - if connect_opts: - parser.add_option('-c', '--connection', dest='connection', - default=constants.DEFAULT_TRANSPORT, - help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT) - - if async_opts: - parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int', - dest='poll_interval', - help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL) - parser.add_option('-B', '--background', dest='seconds', type='int', default=0, - help='run asynchronously, failing after X seconds (default=N/A)') - - if check_opts: - parser.add_option("-C", "--check", default=False, dest='check', action='store_true', - help="don't make any changes; instead, try to predict some of the changes that may occur" - ) - - if diff_opts: - parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', - help="when changing (small) files and templates, show the differences in those files; works great with --check" - ) - - return parser - -def parse_extra_vars(extra_vars_opts, vault_pass): - extra_vars = {} - for extra_vars_opt in extra_vars_opts: - extra_vars_opt = to_unicode(extra_vars_opt) - if extra_vars_opt.startswith(u"@"): - # Argument is a YAML file (JSON is a subset of YAML) - extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass)) - elif extra_vars_opt and extra_vars_opt[0] in u'[{': - # Arguments as YAML - extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt)) - else: - # Arguments as Key-value - extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt)) - return extra_vars - -def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False): - - vault_pass = None - new_vault_pass = None - - if ask_vault_pass: - vault_pass = getpass.getpass(prompt="Vault password: ") - - if ask_vault_pass and confirm_vault: - vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ") - if vault_pass != vault_pass2: - raise errors.AnsibleError("Passwords do not match") - - if ask_new_vault_pass: - new_vault_pass = getpass.getpass(prompt="New Vault password: ") - - if ask_new_vault_pass and confirm_new: - new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") - if new_vault_pass != new_vault_pass2: - raise errors.AnsibleError("Passwords do not match") - - # enforce no newline chars at the end of passwords - if vault_pass: - vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip() - if new_vault_pass: - new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip() - - return vault_pass, new_vault_pass - -def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD): - sshpass = None - becomepass = None - vaultpass = None - become_prompt = '' - - if ask_pass: - sshpass = getpass.getpass(prompt="SSH password: ") - become_prompt = "%s password[defaults to SSH password]: " % become_method.upper() - if sshpass: - sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') - else: - become_prompt = "%s password: " % become_method.upper() - - if become_ask_pass: - becomepass = getpass.getpass(prompt=become_prompt) - if ask_pass and becomepass == '': - becomepass = sshpass - if becomepass: - becomepass = to_bytes(becomepass) - - if ask_vault_pass: - vaultpass = getpass.getpass(prompt="Vault password: ") - if vaultpass: - vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip() - - return (sshpass, becomepass, vaultpass) - - -def choose_pass_prompt(options): - - if options.ask_su_pass: - return 'su' - elif options.ask_sudo_pass: - return 'sudo' - - return options.become_method - -def normalize_become_options(options): - - options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS - options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER - - if options.become: - pass - elif options.sudo: - options.become = True - options.become_method = 'sudo' - elif options.su: - options.become = True - options.become_method = 'su' - - -def do_encrypt(result, encrypt, salt_size=None, salt=None): - if PASSLIB_AVAILABLE: - try: - crypt = getattr(passlib.hash, encrypt) - except: - raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt) - - if salt_size: - result = crypt.encrypt(result, salt_size=salt_size) - elif salt: - result = crypt.encrypt(result, salt=salt) - else: - result = crypt.encrypt(result) - else: - raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values") - - return result - -def last_non_blank_line(buf): - - all_lines = buf.splitlines() - all_lines.reverse() - for line in all_lines: - if (len(line) > 0): - return line - # shouldn't occur unless there's no output - return "" - -def filter_leading_non_json_lines(buf): - ''' - used to avoid random output from SSH at the top of JSON output, like messages from - tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). - - need to filter anything which starts not with '{', '[', ', '=' or is an empty line. - filter only leading lines since multiline JSON is valid. - ''' - - filtered_lines = StringIO.StringIO() - stop_filtering = False - for line in buf.splitlines(): - if stop_filtering or line.startswith('{') or line.startswith('['): - stop_filtering = True - filtered_lines.write(line + '\n') - return filtered_lines.getvalue() - -def boolean(value): - val = str(value) - if val.lower() in [ "true", "t", "y", "1", "yes" ]: - return True - else: - return False - -def make_become_cmd(cmd, user, shell, method, flags=None, exe=None): - """ - helper function for connection plugins to create privilege escalation commands - """ - - randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) - success_key = 'BECOME-SUCCESS-%s' % randbits - prompt = None - becomecmd = None - - shell = shell or '$SHELL' - - if method == 'sudo': - # Rather than detect if sudo wants a password this time, -k makes sudo always ask for - # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) - # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted - # string to the user's shell. We loop reading output until we see the randomly-generated - # sudo prompt set with the -p option. - prompt = '[sudo via ansible, key=%s] password: ' % randbits - exe = exe or C.DEFAULT_SUDO_EXE - becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ - (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) - - elif method == 'su': - exe = exe or C.DEFAULT_SU_EXE - flags = flags or C.DEFAULT_SU_FLAGS - becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) - - elif method == 'pbrun': - prompt = 'assword:' - exe = exe or 'pbrun' - flags = flags or '' - becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd))) - - elif method == 'pfexec': - exe = exe or 'pfexec' - flags = flags or '' - # No user as it uses it's own exec_attr to figure it out - becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd))) - - if becomecmd is None: - raise errors.AnsibleError("Privilege escalation method not found: %s" % method) - - return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key) - - -def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd): - """ - helper function for connection plugins to create sudo commands - """ - return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe) - - -def make_su_cmd(su_user, executable, cmd): - """ - Helper function for connection plugins to create direct su commands - """ - return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE) - -def get_diff(diff): - # called by --diff usage in playbook and runner via callbacks - # include names in diffs 'before' and 'after' and do diff -U 10 - - try: - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - ret = [] - if 'dst_binary' in diff: - ret.append("diff skipped: destination file appears to be binary\n") - if 'src_binary' in diff: - ret.append("diff skipped: source file appears to be binary\n") - if 'dst_larger' in diff: - ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger']) - if 'src_larger' in diff: - ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) - if 'before' in diff and 'after' in diff: - if 'before_header' in diff: - before_header = "before: %s" % diff['before_header'] - else: - before_header = 'before' - if 'after_header' in diff: - after_header = "after: %s" % diff['after_header'] - else: - after_header = 'after' - differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10) - for line in list(differ): - ret.append(line) - return u"".join(ret) - except UnicodeDecodeError: - return ">> the files are different, but the diff library cannot compare unicode strings" - -def is_list_of_strings(items): - for x in items: - if not isinstance(x, basestring): - return False - return True - -def list_union(a, b): - result = [] - for x in a: - if x not in result: - result.append(x) - for x in b: - if x not in result: - result.append(x) - return result - -def list_intersection(a, b): - result = [] - for x in a: - if x in b and x not in result: - result.append(x) - return result - -def list_difference(a, b): - result = [] - for x in a: - if x not in b and x not in result: - result.append(x) - for x in b: - if x not in a and x not in result: - result.append(x) - return result - -def contains_vars(data): - ''' - returns True if the data contains a variable pattern - ''' - return "$" in data or "{{" in data - -def safe_eval(expr, locals={}, include_exceptions=False): - ''' - This is intended for allowing things like: - with_items: a_list_variable - - Where Jinja2 would return a string but we do not want to allow it to - call functions (outside of Jinja2, where the env is constrained). If - the input data to this function came from an untrusted (remote) source, - it should first be run through _clean_data_struct() to ensure the data - is further sanitized prior to evaluation. - - Based on: - http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe - ''' - - # this is the whitelist of AST nodes we are going to - # allow in the evaluation. Any node type other than - # those listed here will raise an exception in our custom - # visitor class defined below. - SAFE_NODES = set( - ( - ast.Add, - ast.BinOp, - ast.Call, - ast.Compare, - ast.Dict, - ast.Div, - ast.Expression, - ast.List, - ast.Load, - ast.Mult, - ast.Num, - ast.Name, - ast.Str, - ast.Sub, - ast.Tuple, - ast.UnaryOp, - ) - ) - - # AST node types were expanded after 2.6 - if not sys.version.startswith('2.6'): - SAFE_NODES.union( - set( - (ast.Set,) - ) - ) - - filter_list = [] - for filter in filter_loader.all(): - filter_list.extend(filter.filters().keys()) - - CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list - - class CleansingNodeVisitor(ast.NodeVisitor): - def generic_visit(self, node, inside_call=False): - if type(node) not in SAFE_NODES: - raise Exception("invalid expression (%s)" % expr) - elif isinstance(node, ast.Call): - inside_call = True - elif isinstance(node, ast.Name) and inside_call: - if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST: - raise Exception("invalid function: %s" % node.id) - # iterate over all child nodes - for child_node in ast.iter_child_nodes(node): - self.generic_visit(child_node, inside_call) - - if not isinstance(expr, basestring): - # already templated to a datastructure, perhaps? - if include_exceptions: - return (expr, None) - return expr - - cnv = CleansingNodeVisitor() - try: - parsed_tree = ast.parse(expr, mode='eval') - cnv.visit(parsed_tree) - compiled = compile(parsed_tree, expr, 'eval') - result = eval(compiled, {}, locals) - - if include_exceptions: - return (result, None) - else: - return result - except SyntaxError, e: - # special handling for syntax errors, we just return - # the expression string back as-is - if include_exceptions: - return (expr, None) - return expr - except Exception, e: - if include_exceptions: - return (expr, e) - return expr - - -def listify_lookup_plugin_terms(terms, basedir, inject): - - from ansible.utils import template - - if isinstance(terms, basestring): - # someone did: - # with_items: alist - # OR - # with_items: {{ alist }} - - stripped = terms.strip() - if not (stripped.startswith('{') or stripped.startswith('[')) and \ - not stripped.startswith("/") and \ - not stripped.startswith('set([') and \ - not LOOKUP_REGEX.search(terms): - # if not already a list, get ready to evaluate with Jinja2 - # not sure why the "/" is in above code :) - try: - new_terms = template.template(basedir, "{{ %s }}" % terms, inject) - if isinstance(new_terms, basestring) and "{{" in new_terms: - pass - else: - terms = new_terms - except: - pass - - if '{' in terms or '[' in terms: - # Jinja2 already evaluated a variable to a list. - # Jinja2-ified list needs to be converted back to a real type - # TODO: something a bit less heavy than eval - return safe_eval(terms) - - if isinstance(terms, basestring): - terms = [ terms ] - - return terms - -def combine_vars(a, b): - - _validate_both_dicts(a, b) - - if C.DEFAULT_HASH_BEHAVIOUR == "merge": - return merge_hash(a, b) - else: - return dict(a.items() + b.items()) - -def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS): - '''Return a random password string of length containing only chars.''' - - password = [] - while len(password) < length: - new_char = os.urandom(1) - if new_char in chars: - password.append(new_char) - - return ''.join(password) - -def before_comment(msg): - ''' what's the part of a string before a comment? ''' - msg = msg.replace("\#","**NOT_A_COMMENT**") - msg = msg.split("#")[0] - msg = msg.replace("**NOT_A_COMMENT**","#") - return msg - -def load_vars(basepath, results, vault_password=None): - """ - Load variables from any potential yaml filename combinations of basepath, - returning result. - """ - - paths_to_check = [ "".join([basepath, ext]) - for ext in C.YAML_FILENAME_EXTENSIONS ] - - found_paths = [] - - for path in paths_to_check: - found, results = _load_vars_from_path(path, results, vault_password=vault_password) - if found: - found_paths.append(path) - - - # disallow the potentially confusing situation that there are multiple - # variable files for the same name. For example if both group_vars/all.yml - # and group_vars/all.yaml - if len(found_paths) > 1: - raise errors.AnsibleError("Multiple variable files found. " - "There should only be one. %s" % ( found_paths, )) - - return results - -## load variables from yaml files/dirs -# e.g. host/group_vars -# -def _load_vars_from_path(path, results, vault_password=None): - """ - Robustly access the file at path and load variables, carefully reporting - errors in a friendly/informative way. - - Return the tuple (found, new_results, ) - """ - - try: - # in the case of a symbolic link, we want the stat of the link itself, - # not its target - pathstat = os.lstat(path) - except os.error, err: - # most common case is that nothing exists at that path. - if err.errno == errno.ENOENT: - return False, results - # otherwise this is a condition we should report to the user - raise errors.AnsibleError( - "%s is not accessible: %s." - " Please check its permissions." % ( path, err.strerror)) - - # symbolic link - if stat.S_ISLNK(pathstat.st_mode): - try: - target = os.path.realpath(path) - except os.error, err2: - raise errors.AnsibleError("The symbolic link at %s " - "is not readable: %s. Please check its permissions." - % (path, err2.strerror, )) - # follow symbolic link chains by recursing, so we repeat the same - # permissions checks above and provide useful errors. - return _load_vars_from_path(target, results, vault_password) - - # directory - if stat.S_ISDIR(pathstat.st_mode): - - # support organizing variables across multiple files in a directory - return True, _load_vars_from_folder(path, results, vault_password=vault_password) - - # regular file - elif stat.S_ISREG(pathstat.st_mode): - data = parse_yaml_from_file(path, vault_password=vault_password) - if data and type(data) != dict: - raise errors.AnsibleError( - "%s must be stored as a dictionary/hash" % path) - elif data is None: - data = {} - - # combine vars overrides by default but can be configured to do a - # hash merge in settings - results = combine_vars(results, data) - return True, results - - # something else? could be a fifo, socket, device, etc. - else: - raise errors.AnsibleError("Expected a variable file or directory " - "but found a non-file object at path %s" % (path, )) - -def _load_vars_from_folder(folder_path, results, vault_password=None): - """ - Load all variables within a folder recursively. - """ - - # this function and _load_vars_from_path are mutually recursive - - try: - names = os.listdir(folder_path) - except os.error, err: - raise errors.AnsibleError( - "This folder cannot be listed: %s: %s." - % ( folder_path, err.strerror)) - - # evaluate files in a stable order rather than whatever order the - # filesystem lists them. - names.sort() - - # do not parse hidden files or dirs, e.g. .svn/ - paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')] - for path in paths: - _found, results = _load_vars_from_path(path, results, vault_password=vault_password) - return results - -def update_hash(hash, key, new_value): - ''' used to avoid nested .update calls on the parent ''' - - value = hash.get(key, {}) - value.update(new_value) - hash[key] = value - -def censor_unlogged_data(data): - ''' - used when the no_log: True attribute is passed to a task to keep data from a callback. - NOT intended to prevent variable registration, but only things from showing up on - screen - ''' - new_data = {} - for (x,y) in data.iteritems(): - if x in [ 'skipped', 'changed', 'failed', 'rc' ]: - new_data[x] = y - new_data['censored'] = 'results hidden due to no_log parameter' - return new_data - -def check_mutually_exclusive_privilege(options, parser): - - # privilege escalation command line arguments need to be mutually exclusive - if (options.su or options.su_user or options.ask_su_pass) and \ - (options.sudo or options.sudo_user or options.ask_sudo_pass) or \ - (options.su or options.su_user or options.ask_su_pass) and \ - (options.become or options.become_user or options.become_ask_pass) or \ - (options.sudo or options.sudo_user or options.ask_sudo_pass) and \ - (options.become or options.become_user or options.become_ask_pass): - - parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') " - "and become arguments ('--become', '--become-user', and '--ask-become-pass')" - " are exclusive of each other") - - +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type diff --git a/v2/ansible/utils/boolean.py b/lib/ansible/utils/boolean.py similarity index 100% rename from v2/ansible/utils/boolean.py rename to lib/ansible/utils/boolean.py diff --git a/v2/ansible/utils/color.py b/lib/ansible/utils/color.py similarity index 100% rename from v2/ansible/utils/color.py rename to lib/ansible/utils/color.py diff --git a/v2/ansible/utils/debug.py b/lib/ansible/utils/debug.py similarity index 100% rename from v2/ansible/utils/debug.py rename to lib/ansible/utils/debug.py diff --git a/v2/ansible/utils/display.py b/lib/ansible/utils/display.py similarity index 100% rename from v2/ansible/utils/display.py rename to lib/ansible/utils/display.py diff --git a/v2/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py similarity index 100% rename from v2/ansible/utils/encrypt.py rename to lib/ansible/utils/encrypt.py diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py index a7d142e5bd4ba2..5e378db79f49c5 100644 --- a/lib/ansible/utils/hashing.py +++ b/lib/ansible/utils/hashing.py @@ -20,6 +20,7 @@ __metaclass__ = type import os +from ansible.errors import AnsibleError # Note, sha1 is the only hash algorithm compatible with python2.4 and with # FIPS-140 mode (as of 11-2014) @@ -43,6 +44,8 @@ def secure_hash_s(data, hash_func=sha1): digest = hash_func() try: + if not isinstance(data, basestring): + data = "%s" % data digest.update(data) except UnicodeEncodeError: digest.update(data.encode('utf-8')) @@ -62,8 +65,8 @@ def secure_hash(filename, hash_func=sha1): digest.update(block) block = infile.read(blocksize) infile.close() - except IOError, e: - raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) + except IOError as e: + raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) return digest.hexdigest() # The checksum algorithm must match with the algorithm in ShellModule.checksum() method diff --git a/v2/ansible/utils/listify.py b/lib/ansible/utils/listify.py similarity index 100% rename from v2/ansible/utils/listify.py rename to lib/ansible/utils/listify.py diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index ee99af2cb54dba..632b4a00c2a36a 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -23,7 +23,7 @@ import yaml import traceback -from ansible import utils +from ansible.plugins import fragment_loader # modules that are ok that they do not have documentation strings BLACKLIST_MODULES = [ @@ -66,7 +66,7 @@ def get_docstring(filename, verbose=False): if fragment_slug != 'doesnotexist': - fragment_class = utils.plugins.fragment_loader.get(fragment_name) + fragment_class = fragment_loader.get(fragment_name) assert fragment_class is not None fragment_yaml = getattr(fragment_class, fragment_var, '{}') diff --git a/v2/ansible/utils/module_docs_fragments b/lib/ansible/utils/module_docs_fragments similarity index 100% rename from v2/ansible/utils/module_docs_fragments rename to lib/ansible/utils/module_docs_fragments diff --git a/v2/ansible/utils/path.py b/lib/ansible/utils/path.py similarity index 100% rename from v2/ansible/utils/path.py rename to lib/ansible/utils/path.py diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py index 7bd035c0075609..2cff2e5e45c76d 100644 --- a/lib/ansible/utils/unicode.py +++ b/lib/ansible/utils/unicode.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from six import string_types, text_type, binary_type, PY3 + # to_bytes and to_unicode were written by Toshio Kuratomi for the # python-kitchen library https://pypi.python.org/pypi/kitchen # They are licensed in kitchen under the terms of the GPLv2+ @@ -35,6 +37,9 @@ # EXCEPTION_CONVERTERS is defined below due to using to_unicode +if PY3: + basestring = (str, bytes) + def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): '''Convert an object into a :class:`unicode` string @@ -89,12 +94,12 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): # Could use isbasestring/isunicode here but we want this code to be as # fast as possible if isinstance(obj, basestring): - if isinstance(obj, unicode): + if isinstance(obj, text_type): return obj if encoding in _UTF8_ALIASES: - return unicode(obj, 'utf-8', errors) + return text_type(obj, 'utf-8', errors) if encoding in _LATIN1_ALIASES: - return unicode(obj, 'latin-1', errors) + return text_type(obj, 'latin-1', errors) return obj.decode(encoding, errors) if not nonstring: @@ -110,19 +115,19 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): simple = None if not simple: try: - simple = str(obj) + simple = text_type(obj) except UnicodeError: try: simple = obj.__str__() except (UnicodeError, AttributeError): simple = u'' - if isinstance(simple, str): - return unicode(simple, encoding, errors) + if isinstance(simple, binary_type): + return text_type(simple, encoding, errors) return simple elif nonstring in ('repr', 'strict'): obj_repr = repr(obj) - if isinstance(obj_repr, str): - obj_repr = unicode(obj_repr, encoding, errors) + if isinstance(obj_repr, binary_type): + obj_repr = text_type(obj_repr, encoding, errors) if nonstring == 'repr': return obj_repr raise TypeError('to_unicode was given "%(obj)s" which is neither' @@ -198,19 +203,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): # Could use isbasestring, isbytestring here but we want this to be as fast # as possible if isinstance(obj, basestring): - if isinstance(obj, str): + if isinstance(obj, binary_type): return obj return obj.encode(encoding, errors) if not nonstring: nonstring = 'simplerepr' if nonstring == 'empty': - return '' + return b'' elif nonstring == 'passthru': return obj elif nonstring == 'simplerepr': try: - simple = str(obj) + simple = binary_type(obj) except UnicodeError: try: simple = obj.__str__() @@ -220,19 +225,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): try: simple = obj.__unicode__() except (AttributeError, UnicodeError): - simple = '' - if isinstance(simple, unicode): + simple = b'' + if isinstance(simple, text_type): simple = simple.encode(encoding, 'replace') return simple elif nonstring in ('repr', 'strict'): try: obj_repr = obj.__repr__() except (AttributeError, UnicodeError): - obj_repr = '' - if isinstance(obj_repr, unicode): + obj_repr = b'' + if isinstance(obj_repr, text_type): obj_repr = obj_repr.encode(encoding, errors) else: - obj_repr = str(obj_repr) + obj_repr = binary_type(obj_repr) if nonstring == 'repr': return obj_repr raise TypeError('to_bytes was given "%(obj)s" which is neither' diff --git a/v2/ansible/utils/vars.py b/lib/ansible/utils/vars.py similarity index 100% rename from v2/ansible/utils/vars.py rename to lib/ansible/utils/vars.py diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py index 842688a2c18fce..5c704afac59b2b 100644 --- a/lib/ansible/utils/vault.py +++ b/lib/ansible/utils/vault.py @@ -1,4 +1,6 @@ -# (c) 2014, James Tanner +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -12,574 +14,43 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# -# ansible-pull is a script that runs ansible in local mode -# after checking out a playbooks directory from source repo. There is an -# example playbook to bootstrap this script in the examples/ dir which -# installs ansible and sets it up to run on cron. -import os -import shlex -import shutil -import tempfile -from io import BytesIO -from subprocess import call -from ansible import errors -from hashlib import sha256 +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type -# Note: Only used for loading obsolete VaultAES files. All files are written -# using the newer VaultAES256 which does not require md5 -try: - from hashlib import md5 -except ImportError: - try: - from md5 import md5 - except ImportError: - # MD5 unavailable. Possibly FIPS mode - md5 = None +import os +import subprocess -from binascii import hexlify -from binascii import unhexlify from ansible import constants as C +from ansible.errors import AnsibleError +from ansible.utils.path import is_executable -try: - from Crypto.Hash import SHA256, HMAC - HAS_HASH = True -except ImportError: - HAS_HASH = False - -# Counter import fails for 2.0.1, requires >= 2.6.1 from pip -try: - from Crypto.Util import Counter - HAS_COUNTER = True -except ImportError: - HAS_COUNTER = False - -# KDF import fails for 2.0.1, requires >= 2.6.1 from pip -try: - from Crypto.Protocol.KDF import PBKDF2 - HAS_PBKDF2 = True -except ImportError: - HAS_PBKDF2 = False - -# AES IMPORTS -try: - from Crypto.Cipher import AES as AES - HAS_AES = True -except ImportError: - HAS_AES = False - -CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto" - -HEADER='$ANSIBLE_VAULT' -CIPHER_WHITELIST=['AES', 'AES256'] - -class VaultLib(object): - - def __init__(self, password): - self.password = password - self.cipher_name = None - self.version = '1.1' - - def is_encrypted(self, data): - if data.startswith(HEADER): - return True - else: - return False - - def encrypt(self, data): - - if self.is_encrypted(data): - raise errors.AnsibleError("data is already encrypted") - - if not self.cipher_name: - self.cipher_name = "AES256" - #raise errors.AnsibleError("the cipher must be set before encrypting data") - - if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: - cipher = globals()['Vault' + self.cipher_name] - this_cipher = cipher() - else: - raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) - - """ - # combine sha + data - this_sha = sha256(data).hexdigest() - tmp_data = this_sha + "\n" + data - """ - - # encrypt sha + data - enc_data = this_cipher.encrypt(data, self.password) - - # add header - tmp_data = self._add_header(enc_data) - return tmp_data - - def decrypt(self, data): - if self.password is None: - raise errors.AnsibleError("A vault password must be specified to decrypt data") - - if not self.is_encrypted(data): - raise errors.AnsibleError("data is not encrypted") - - # clean out header - data = self._split_header(data) - - # create the cipher object - if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: - cipher = globals()['Vault' + self.cipher_name] - this_cipher = cipher() - else: - raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) - - # try to unencrypt data - data = this_cipher.decrypt(data, self.password) - if data is None: - raise errors.AnsibleError("Decryption failed") - - return data - - def _add_header(self, data): - # combine header and encrypted data in 80 char columns - - #tmpdata = hexlify(data) - tmpdata = [data[i:i+80] for i in range(0, len(data), 80)] - - if not self.cipher_name: - raise errors.AnsibleError("the cipher must be set before adding a header") - - dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n" - - for l in tmpdata: - dirty_data += l + '\n' - - return dirty_data - - - def _split_header(self, data): - # used by decrypt - - tmpdata = data.split('\n') - tmpheader = tmpdata[0].strip().split(';') - - self.version = str(tmpheader[1].strip()) - self.cipher_name = str(tmpheader[2].strip()) - clean_data = '\n'.join(tmpdata[1:]) - - """ - # strip out newline, join, unhex - clean_data = [ x.strip() for x in clean_data ] - clean_data = unhexlify(''.join(clean_data)) - """ - - return clean_data - - def __enter__(self): - return self - - def __exit__(self, *err): - pass - -class VaultEditor(object): - # uses helper methods for write_file(self, filename, data) - # to write a file so that code isn't duplicated for simple - # file I/O, ditto read_file(self, filename) and launch_editor(self, filename) - # ... "Don't Repeat Yourself", etc. - - def __init__(self, cipher_name, password, filename): - # instantiates a member variable for VaultLib - self.cipher_name = cipher_name - self.password = password - self.filename = filename - - def _edit_file_helper(self, existing_data=None, cipher=None): - # make sure the umask is set to a sane value - old_umask = os.umask(0o077) - - # Create a tempfile - _, tmp_path = tempfile.mkstemp() - - if existing_data: - self.write_data(existing_data, tmp_path) - - # drop the user into an editor on the tmp file - try: - call(self._editor_shell_command(tmp_path)) - except OSError, e: - raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e))) - tmpdata = self.read_data(tmp_path) - - # create new vault - this_vault = VaultLib(self.password) - if cipher: - this_vault.cipher_name = cipher - - # encrypt new data and write out to tmp - enc_data = this_vault.encrypt(tmpdata) - self.write_data(enc_data, tmp_path) - - # shuffle tmp file into place - self.shuffle_files(tmp_path, self.filename) - - # and restore umask - os.umask(old_umask) - - def create_file(self): - """ create a new encrypted file """ - - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - if os.path.isfile(self.filename): - raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) - - # Let the user specify contents and save file - self._edit_file_helper(cipher=self.cipher_name) - - def decrypt_file(self): - - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - if not os.path.isfile(self.filename): - raise errors.AnsibleError("%s does not exist" % self.filename) - - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - if this_vault.is_encrypted(tmpdata): - dec_data = this_vault.decrypt(tmpdata) - if dec_data is None: - raise errors.AnsibleError("Decryption failed") - else: - self.write_data(dec_data, self.filename) - else: - raise errors.AnsibleError("%s is not encrypted" % self.filename) - - def edit_file(self): - - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - # decrypt to tmpfile - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - dec_data = this_vault.decrypt(tmpdata) - - # let the user edit the data and save - self._edit_file_helper(existing_data=dec_data) - ###we want the cipher to default to AES256 (get rid of files - # encrypted with the AES cipher) - #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name) - - - def view_file(self): - - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - # decrypt to tmpfile - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - dec_data = this_vault.decrypt(tmpdata) - old_umask = os.umask(0o077) - _, tmp_path = tempfile.mkstemp() - self.write_data(dec_data, tmp_path) - os.umask(old_umask) - - # drop the user into pager on the tmp file - call(self._pager_shell_command(tmp_path)) - os.remove(tmp_path) - - def encrypt_file(self): - - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - if not os.path.isfile(self.filename): - raise errors.AnsibleError("%s does not exist" % self.filename) - - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - this_vault.cipher_name = self.cipher_name - if not this_vault.is_encrypted(tmpdata): - enc_data = this_vault.encrypt(tmpdata) - self.write_data(enc_data, self.filename) - else: - raise errors.AnsibleError("%s is already encrypted" % self.filename) - - def rekey_file(self, new_password): - - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - # decrypt - tmpdata = self.read_data(self.filename) - this_vault = VaultLib(self.password) - dec_data = this_vault.decrypt(tmpdata) - - # create new vault - new_vault = VaultLib(new_password) - - # we want to force cipher to the default - #new_vault.cipher_name = this_vault.cipher_name - - # re-encrypt data and re-write file - enc_data = new_vault.encrypt(dec_data) - self.write_data(enc_data, self.filename) - - def read_data(self, filename): - f = open(filename, "rb") - tmpdata = f.read() - f.close() - return tmpdata - - def write_data(self, data, filename): - if os.path.isfile(filename): - os.remove(filename) - f = open(filename, "wb") - f.write(data) - f.close() - - def shuffle_files(self, src, dest): - # overwrite dest with src - if os.path.isfile(dest): - os.remove(dest) - shutil.move(src, dest) - - def _editor_shell_command(self, filename): - EDITOR = os.environ.get('EDITOR','vim') - editor = shlex.split(EDITOR) - editor.append(filename) - - return editor - - def _pager_shell_command(self, filename): - PAGER = os.environ.get('PAGER','less') - pager = shlex.split(PAGER) - pager.append(filename) - - return pager - -######################################## -# CIPHERS # -######################################## - -class VaultAES(object): - - # this version has been obsoleted by the VaultAES256 class - # which uses encrypt-then-mac (fixing order) and also improving the KDF used - # code remains for upgrade purposes only - # http://stackoverflow.com/a/16761459 - - def __init__(self): - if not md5: - raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.') - if not HAS_AES: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): - - """ Create a key and an initialization vector """ - - d = d_i = '' - while len(d) < key_length + iv_length: - d_i = md5(d_i + password + salt).digest() - d += d_i - - key = d[:key_length] - iv = d[key_length:key_length+iv_length] - - return key, iv - - def encrypt(self, data, password, key_length=32): - - """ Read plaintext data from in_file and write encrypted to out_file """ - - - # combine sha + data - this_sha = sha256(data).hexdigest() - tmp_data = this_sha + "\n" + data - - in_file = BytesIO(tmp_data) - in_file.seek(0) - out_file = BytesIO() - - bs = AES.block_size - - # Get a block of random data. EL does not have Crypto.Random.new() - # so os.urandom is used for cross platform purposes - salt = os.urandom(bs - len('Salted__')) - - key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) - cipher = AES.new(key, AES.MODE_CBC, iv) - out_file.write('Salted__' + salt) - finished = False - while not finished: - chunk = in_file.read(1024 * bs) - if len(chunk) == 0 or len(chunk) % bs != 0: - padding_length = (bs - len(chunk) % bs) or bs - chunk += padding_length * chr(padding_length) - finished = True - out_file.write(cipher.encrypt(chunk)) - - out_file.seek(0) - enc_data = out_file.read() - tmp_data = hexlify(enc_data) - - return tmp_data - - - def decrypt(self, data, password, key_length=32): - - """ Read encrypted data from in_file and write decrypted to out_file """ - - # http://stackoverflow.com/a/14989032 - - data = ''.join(data.split('\n')) - data = unhexlify(data) - - in_file = BytesIO(data) - in_file.seek(0) - out_file = BytesIO() - - bs = AES.block_size - salt = in_file.read(bs)[len('Salted__'):] - key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) - cipher = AES.new(key, AES.MODE_CBC, iv) - next_chunk = '' - finished = False - - while not finished: - chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs)) - if len(next_chunk) == 0: - padding_length = ord(chunk[-1]) - chunk = chunk[:-padding_length] - finished = True - out_file.write(chunk) - - # reset the stream pointer to the beginning - out_file.seek(0) - new_data = out_file.read() - - # split out sha and verify decryption - split_data = new_data.split("\n") - this_sha = split_data[0] - this_data = '\n'.join(split_data[1:]) - test_sha = sha256(this_data).hexdigest() - - if this_sha != test_sha: - raise errors.AnsibleError("Decryption failed") - - #return out_file.read() - return this_data - - -class VaultAES256(object): - +def read_vault_file(vault_password_file): """ - Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. - Keys are derived using PBKDF2 + Read a vault password from a file or if executable, execute the script and + retrieve password from STDOUT """ - # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html - - def __init__(self): - - if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) - - def gen_key_initctr(self, password, salt): - # 16 for AES 128, 32 for AES256 - keylength = 32 - - # match the size used for counter.new to avoid extra work - ivlength = 16 - - hash_function = SHA256 - - # make two keys and one iv - pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest() - - - derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, - count=10000, prf=pbkdf2_prf) - - key1 = derivedkey[:keylength] - key2 = derivedkey[keylength:(keylength * 2)] - iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength] - - return key1, key2, hexlify(iv) - - - def encrypt(self, data, password): + this_path = os.path.realpath(os.path.expanduser(vault_password_file)) + if not os.path.exists(this_path): + raise AnsibleError("The vault password file %s was not found" % this_path) - salt = os.urandom(32) - key1, key2, iv = self.gen_key_initctr(password, salt) - - # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3 - bs = AES.block_size - padding_length = (bs - len(data) % bs) or bs - data += padding_length * chr(padding_length) - - # COUNTER.new PARAMETERS - # 1) nbits (integer) - Length of the counter, in bits. - # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr - - ctr = Counter.new(128, initial_value=long(iv, 16)) - - # AES.new PARAMETERS - # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr - # 2) MODE_CTR, is the recommended mode - # 3) counter= - - cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) - - # ENCRYPT PADDED DATA - cryptedData = cipher.encrypt(data) - - # COMBINE SALT, DIGEST AND DATA - hmac = HMAC.new(key2, cryptedData, SHA256) - message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) ) - message = hexlify(message) - return message - - def decrypt(self, data, password): - - # SPLIT SALT, DIGEST, AND DATA - data = ''.join(data.split("\n")) - data = unhexlify(data) - salt, cryptedHmac, cryptedData = data.split("\n", 2) - salt = unhexlify(salt) - cryptedData = unhexlify(cryptedData) - - key1, key2, iv = self.gen_key_initctr(password, salt) - - # EXIT EARLY IF DIGEST DOESN'T MATCH - hmacDecrypt = HMAC.new(key2, cryptedData, SHA256) - if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()): - return None - - # SET THE COUNTER AND THE CIPHER - ctr = Counter.new(128, initial_value=long(iv, 16)) - cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) - - # DECRYPT PADDED DATA - decryptedData = cipher.decrypt(cryptedData) - - # UNPAD DATA - padding_length = ord(decryptedData[-1]) - decryptedData = decryptedData[:-padding_length] - - return decryptedData - - def is_equal(self, a, b): - # http://codahale.com/a-lesson-in-timing-attacks/ - if len(a) != len(b): - return False - - result = 0 - for x, y in zip(a, b): - result |= ord(x) ^ ord(y) - return result == 0 + if is_executable(this_path): + try: + # STDERR not captured to make it easier for users to prompt for input in their scripts + p = subprocess.Popen(this_path, stdout=subprocess.PIPE) + except OSError as e: + raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) + stdout, stderr = p.communicate() + vault_pass = stdout.strip('\r\n') + else: + try: + f = open(this_path, "rb") + vault_pass=f.read().strip() + f.close() + except (OSError, IOError) as e: + raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) + return vault_pass diff --git a/v2/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py similarity index 100% rename from v2/ansible/vars/__init__.py rename to lib/ansible/vars/__init__.py diff --git a/v2/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py similarity index 100% rename from v2/ansible/vars/hostvars.py rename to lib/ansible/vars/hostvars.py diff --git a/v2/samples/README.md b/samples/README.md similarity index 100% rename from v2/samples/README.md rename to samples/README.md diff --git a/v2/samples/common_include.yml b/samples/common_include.yml similarity index 100% rename from v2/samples/common_include.yml rename to samples/common_include.yml diff --git a/v2/samples/hosts b/samples/hosts similarity index 100% rename from v2/samples/hosts rename to samples/hosts diff --git a/v2/samples/ignore_errors.yml b/samples/ignore_errors.yml similarity index 100% rename from v2/samples/ignore_errors.yml rename to samples/ignore_errors.yml diff --git a/v2/samples/include.yml b/samples/include.yml similarity index 100% rename from v2/samples/include.yml rename to samples/include.yml diff --git a/v2/samples/inv_lg b/samples/inv_lg similarity index 100% rename from v2/samples/inv_lg rename to samples/inv_lg diff --git a/v2/samples/inv_md b/samples/inv_md similarity index 100% rename from v2/samples/inv_md rename to samples/inv_md diff --git a/v2/samples/inv_sm b/samples/inv_sm similarity index 100% rename from v2/samples/inv_sm rename to samples/inv_sm diff --git a/v2/samples/l1_include.yml b/samples/l1_include.yml similarity index 100% rename from v2/samples/l1_include.yml rename to samples/l1_include.yml diff --git a/v2/samples/l2_include.yml b/samples/l2_include.yml similarity index 100% rename from v2/samples/l2_include.yml rename to samples/l2_include.yml diff --git a/v2/samples/l3_include.yml b/samples/l3_include.yml similarity index 100% rename from v2/samples/l3_include.yml rename to samples/l3_include.yml diff --git a/v2/samples/localhost_include.yml b/samples/localhost_include.yml similarity index 100% rename from v2/samples/localhost_include.yml rename to samples/localhost_include.yml diff --git a/v2/samples/localhosts b/samples/localhosts similarity index 100% rename from v2/samples/localhosts rename to samples/localhosts diff --git a/v2/samples/lookup_file.yml b/samples/lookup_file.yml similarity index 100% rename from v2/samples/lookup_file.yml rename to samples/lookup_file.yml diff --git a/v2/samples/lookup_password.yml b/samples/lookup_password.yml similarity index 100% rename from v2/samples/lookup_password.yml rename to samples/lookup_password.yml diff --git a/v2/samples/lookup_pipe.py b/samples/lookup_pipe.py similarity index 100% rename from v2/samples/lookup_pipe.py rename to samples/lookup_pipe.py diff --git a/v2/samples/lookup_template.yml b/samples/lookup_template.yml similarity index 100% rename from v2/samples/lookup_template.yml rename to samples/lookup_template.yml diff --git a/v2/samples/multi.py b/samples/multi.py similarity index 100% rename from v2/samples/multi.py rename to samples/multi.py diff --git a/v2/samples/multi_queues.py b/samples/multi_queues.py similarity index 100% rename from v2/samples/multi_queues.py rename to samples/multi_queues.py diff --git a/v2/samples/roles/common/meta/main.yml b/samples/roles/common/meta/main.yml similarity index 100% rename from v2/samples/roles/common/meta/main.yml rename to samples/roles/common/meta/main.yml diff --git a/v2/samples/roles/common/tasks/main.yml b/samples/roles/common/tasks/main.yml similarity index 100% rename from v2/samples/roles/common/tasks/main.yml rename to samples/roles/common/tasks/main.yml diff --git a/v2/samples/roles/role_a/meta/main.yml b/samples/roles/role_a/meta/main.yml similarity index 100% rename from v2/samples/roles/role_a/meta/main.yml rename to samples/roles/role_a/meta/main.yml diff --git a/v2/samples/roles/role_a/tasks/main.yml b/samples/roles/role_a/tasks/main.yml similarity index 100% rename from v2/samples/roles/role_a/tasks/main.yml rename to samples/roles/role_a/tasks/main.yml diff --git a/v2/samples/roles/role_b/meta/main.yml b/samples/roles/role_b/meta/main.yml similarity index 100% rename from v2/samples/roles/role_b/meta/main.yml rename to samples/roles/role_b/meta/main.yml diff --git a/v2/samples/roles/role_b/tasks/main.yml b/samples/roles/role_b/tasks/main.yml similarity index 100% rename from v2/samples/roles/role_b/tasks/main.yml rename to samples/roles/role_b/tasks/main.yml diff --git a/v2/samples/roles/test_become_r1/meta/main.yml b/samples/roles/test_become_r1/meta/main.yml similarity index 100% rename from v2/samples/roles/test_become_r1/meta/main.yml rename to samples/roles/test_become_r1/meta/main.yml diff --git a/v2/samples/roles/test_become_r1/tasks/main.yml b/samples/roles/test_become_r1/tasks/main.yml similarity index 100% rename from v2/samples/roles/test_become_r1/tasks/main.yml rename to samples/roles/test_become_r1/tasks/main.yml diff --git a/v2/samples/roles/test_become_r2/meta/main.yml b/samples/roles/test_become_r2/meta/main.yml similarity index 100% rename from v2/samples/roles/test_become_r2/meta/main.yml rename to samples/roles/test_become_r2/meta/main.yml diff --git a/v2/samples/roles/test_become_r2/tasks/main.yml b/samples/roles/test_become_r2/tasks/main.yml similarity index 100% rename from v2/samples/roles/test_become_r2/tasks/main.yml rename to samples/roles/test_become_r2/tasks/main.yml diff --git a/v2/samples/roles/test_role/meta/main.yml b/samples/roles/test_role/meta/main.yml similarity index 100% rename from v2/samples/roles/test_role/meta/main.yml rename to samples/roles/test_role/meta/main.yml diff --git a/v2/samples/roles/test_role/tasks/main.yml b/samples/roles/test_role/tasks/main.yml similarity index 100% rename from v2/samples/roles/test_role/tasks/main.yml rename to samples/roles/test_role/tasks/main.yml diff --git a/v2/samples/roles/test_role_dep/tasks/main.yml b/samples/roles/test_role_dep/tasks/main.yml similarity index 100% rename from v2/samples/roles/test_role_dep/tasks/main.yml rename to samples/roles/test_role_dep/tasks/main.yml diff --git a/v2/samples/src b/samples/src similarity index 100% rename from v2/samples/src rename to samples/src diff --git a/v2/samples/template.j2 b/samples/template.j2 similarity index 100% rename from v2/samples/template.j2 rename to samples/template.j2 diff --git a/v2/samples/test_become.yml b/samples/test_become.yml similarity index 100% rename from v2/samples/test_become.yml rename to samples/test_become.yml diff --git a/v2/samples/test_big_debug.yml b/samples/test_big_debug.yml similarity index 100% rename from v2/samples/test_big_debug.yml rename to samples/test_big_debug.yml diff --git a/v2/samples/test_big_ping.yml b/samples/test_big_ping.yml similarity index 100% rename from v2/samples/test_big_ping.yml rename to samples/test_big_ping.yml diff --git a/v2/samples/test_block.yml b/samples/test_block.yml similarity index 100% rename from v2/samples/test_block.yml rename to samples/test_block.yml diff --git a/v2/samples/test_blocks_of_blocks.yml b/samples/test_blocks_of_blocks.yml similarity index 100% rename from v2/samples/test_blocks_of_blocks.yml rename to samples/test_blocks_of_blocks.yml diff --git a/v2/samples/test_fact_gather.yml b/samples/test_fact_gather.yml similarity index 100% rename from v2/samples/test_fact_gather.yml rename to samples/test_fact_gather.yml diff --git a/v2/samples/test_free.yml b/samples/test_free.yml similarity index 100% rename from v2/samples/test_free.yml rename to samples/test_free.yml diff --git a/v2/samples/test_include.yml b/samples/test_include.yml similarity index 100% rename from v2/samples/test_include.yml rename to samples/test_include.yml diff --git a/v2/samples/test_pb.yml b/samples/test_pb.yml similarity index 100% rename from v2/samples/test_pb.yml rename to samples/test_pb.yml diff --git a/v2/samples/test_role.yml b/samples/test_role.yml similarity index 100% rename from v2/samples/test_role.yml rename to samples/test_role.yml diff --git a/v2/samples/test_roles_complex.yml b/samples/test_roles_complex.yml similarity index 100% rename from v2/samples/test_roles_complex.yml rename to samples/test_roles_complex.yml diff --git a/v2/samples/test_run_once.yml b/samples/test_run_once.yml similarity index 100% rename from v2/samples/test_run_once.yml rename to samples/test_run_once.yml diff --git a/v2/samples/test_sudo.yml b/samples/test_sudo.yml similarity index 100% rename from v2/samples/test_sudo.yml rename to samples/test_sudo.yml diff --git a/v2/samples/test_tags.yml b/samples/test_tags.yml similarity index 100% rename from v2/samples/test_tags.yml rename to samples/test_tags.yml diff --git a/v2/samples/testing/extra_vars.yml b/samples/testing/extra_vars.yml similarity index 100% rename from v2/samples/testing/extra_vars.yml rename to samples/testing/extra_vars.yml diff --git a/v2/samples/testing/frag1 b/samples/testing/frag1 similarity index 100% rename from v2/samples/testing/frag1 rename to samples/testing/frag1 diff --git a/v2/samples/testing/frag2 b/samples/testing/frag2 similarity index 100% rename from v2/samples/testing/frag2 rename to samples/testing/frag2 diff --git a/v2/samples/testing/frag3 b/samples/testing/frag3 similarity index 100% rename from v2/samples/testing/frag3 rename to samples/testing/frag3 diff --git a/v2/samples/testing/vars.yml b/samples/testing/vars.yml similarity index 100% rename from v2/samples/testing/vars.yml rename to samples/testing/vars.yml diff --git a/v2/samples/with_dict.yml b/samples/with_dict.yml similarity index 100% rename from v2/samples/with_dict.yml rename to samples/with_dict.yml diff --git a/v2/samples/with_env.yml b/samples/with_env.yml similarity index 100% rename from v2/samples/with_env.yml rename to samples/with_env.yml diff --git a/v2/samples/with_fileglob.yml b/samples/with_fileglob.yml similarity index 100% rename from v2/samples/with_fileglob.yml rename to samples/with_fileglob.yml diff --git a/v2/samples/with_first_found.yml b/samples/with_first_found.yml similarity index 100% rename from v2/samples/with_first_found.yml rename to samples/with_first_found.yml diff --git a/v2/samples/with_flattened.yml b/samples/with_flattened.yml similarity index 100% rename from v2/samples/with_flattened.yml rename to samples/with_flattened.yml diff --git a/v2/samples/with_indexed_items.yml b/samples/with_indexed_items.yml similarity index 100% rename from v2/samples/with_indexed_items.yml rename to samples/with_indexed_items.yml diff --git a/v2/samples/with_items.yml b/samples/with_items.yml similarity index 100% rename from v2/samples/with_items.yml rename to samples/with_items.yml diff --git a/v2/samples/with_lines.yml b/samples/with_lines.yml similarity index 100% rename from v2/samples/with_lines.yml rename to samples/with_lines.yml diff --git a/v2/samples/with_nested.yml b/samples/with_nested.yml similarity index 100% rename from v2/samples/with_nested.yml rename to samples/with_nested.yml diff --git a/v2/samples/with_random_choice.yml b/samples/with_random_choice.yml similarity index 100% rename from v2/samples/with_random_choice.yml rename to samples/with_random_choice.yml diff --git a/v2/samples/with_sequence.yml b/samples/with_sequence.yml similarity index 100% rename from v2/samples/with_sequence.yml rename to samples/with_sequence.yml diff --git a/v2/samples/with_subelements.yml b/samples/with_subelements.yml similarity index 100% rename from v2/samples/with_subelements.yml rename to samples/with_subelements.yml diff --git a/v2/samples/with_together.yml b/samples/with_together.yml similarity index 100% rename from v2/samples/with_together.yml rename to samples/with_together.yml diff --git a/v2/test/__init__.py b/test/units/__init__.py similarity index 100% rename from v2/test/__init__.py rename to test/units/__init__.py diff --git a/v2/test/errors/__init__.py b/test/units/errors/__init__.py similarity index 100% rename from v2/test/errors/__init__.py rename to test/units/errors/__init__.py diff --git a/v2/test/errors/test_errors.py b/test/units/errors/test_errors.py similarity index 100% rename from v2/test/errors/test_errors.py rename to test/units/errors/test_errors.py diff --git a/v2/test/executor/__init__.py b/test/units/executor/__init__.py similarity index 100% rename from v2/test/executor/__init__.py rename to test/units/executor/__init__.py diff --git a/v2/test/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py similarity index 100% rename from v2/test/executor/test_play_iterator.py rename to test/units/executor/test_play_iterator.py diff --git a/v2/ansible/modules/__init__.py b/test/units/mock/__init__.py similarity index 100% rename from v2/ansible/modules/__init__.py rename to test/units/mock/__init__.py diff --git a/v2/test/mock/loader.py b/test/units/mock/loader.py similarity index 100% rename from v2/test/mock/loader.py rename to test/units/mock/loader.py diff --git a/v2/test/parsing/__init__.py b/test/units/parsing/__init__.py similarity index 100% rename from v2/test/parsing/__init__.py rename to test/units/parsing/__init__.py diff --git a/v2/test/parsing/test_data_loader.py b/test/units/parsing/test_data_loader.py similarity index 100% rename from v2/test/parsing/test_data_loader.py rename to test/units/parsing/test_data_loader.py diff --git a/v2/test/parsing/test_mod_args.py b/test/units/parsing/test_mod_args.py similarity index 100% rename from v2/test/parsing/test_mod_args.py rename to test/units/parsing/test_mod_args.py diff --git a/v2/test/parsing/test_splitter.py b/test/units/parsing/test_splitter.py similarity index 100% rename from v2/test/parsing/test_splitter.py rename to test/units/parsing/test_splitter.py diff --git a/v2/test/parsing/vault/__init__.py b/test/units/parsing/vault/__init__.py similarity index 100% rename from v2/test/parsing/vault/__init__.py rename to test/units/parsing/vault/__init__.py diff --git a/v2/test/parsing/vault/test_vault.py b/test/units/parsing/vault/test_vault.py similarity index 100% rename from v2/test/parsing/vault/test_vault.py rename to test/units/parsing/vault/test_vault.py diff --git a/v2/test/parsing/vault/test_vault_editor.py b/test/units/parsing/vault/test_vault_editor.py similarity index 100% rename from v2/test/parsing/vault/test_vault_editor.py rename to test/units/parsing/vault/test_vault_editor.py diff --git a/lib/ansible/callback_plugins/__init__.py b/test/units/parsing/yaml/__init__.py similarity index 100% rename from lib/ansible/callback_plugins/__init__.py rename to test/units/parsing/yaml/__init__.py diff --git a/v2/test/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py similarity index 100% rename from v2/test/parsing/yaml/test_loader.py rename to test/units/parsing/yaml/test_loader.py diff --git a/v2/test/playbook/__init__.py b/test/units/playbook/__init__.py similarity index 100% rename from v2/test/playbook/__init__.py rename to test/units/playbook/__init__.py diff --git a/v2/test/playbook/test_block.py b/test/units/playbook/test_block.py similarity index 100% rename from v2/test/playbook/test_block.py rename to test/units/playbook/test_block.py diff --git a/v2/test/playbook/test_play.py b/test/units/playbook/test_play.py similarity index 100% rename from v2/test/playbook/test_play.py rename to test/units/playbook/test_play.py diff --git a/v2/test/playbook/test_playbook.py b/test/units/playbook/test_playbook.py similarity index 100% rename from v2/test/playbook/test_playbook.py rename to test/units/playbook/test_playbook.py diff --git a/v2/test/playbook/test_role.py b/test/units/playbook/test_role.py similarity index 100% rename from v2/test/playbook/test_role.py rename to test/units/playbook/test_role.py diff --git a/v2/test/playbook/test_task.py b/test/units/playbook/test_task.py similarity index 100% rename from v2/test/playbook/test_task.py rename to test/units/playbook/test_task.py diff --git a/v2/test/plugins/__init__.py b/test/units/plugins/__init__.py similarity index 100% rename from v2/test/plugins/__init__.py rename to test/units/plugins/__init__.py diff --git a/v2/test/plugins/test_cache.py b/test/units/plugins/test_cache.py similarity index 100% rename from v2/test/plugins/test_cache.py rename to test/units/plugins/test_cache.py diff --git a/v2/test/plugins/test_connection.py b/test/units/plugins/test_connection.py similarity index 100% rename from v2/test/plugins/test_connection.py rename to test/units/plugins/test_connection.py diff --git a/v2/test/plugins/test_plugins.py b/test/units/plugins/test_plugins.py similarity index 100% rename from v2/test/plugins/test_plugins.py rename to test/units/plugins/test_plugins.py diff --git a/v2/test/vars/__init__.py b/test/units/vars/__init__.py similarity index 100% rename from v2/test/vars/__init__.py rename to test/units/vars/__init__.py diff --git a/v2/test/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py similarity index 100% rename from v2/test/vars/test_variable_manager.py rename to test/units/vars/test_variable_manager.py diff --git a/v2/ansible/utils/__init__.py b/v1/ansible/__init__.py similarity index 85% rename from v2/ansible/utils/__init__.py rename to v1/ansible/__init__.py index ae8ccff5952585..ba5ca83b7231d1 100644 --- a/v2/ansible/utils/__init__.py +++ b/v1/ansible/__init__.py @@ -14,7 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +__version__ = '2.0.0' +__author__ = 'Michael DeHaan' diff --git a/lib/ansible/cache/__init__.py b/v1/ansible/cache/__init__.py similarity index 100% rename from lib/ansible/cache/__init__.py rename to v1/ansible/cache/__init__.py diff --git a/lib/ansible/cache/base.py b/v1/ansible/cache/base.py similarity index 100% rename from lib/ansible/cache/base.py rename to v1/ansible/cache/base.py diff --git a/lib/ansible/cache/jsonfile.py b/v1/ansible/cache/jsonfile.py similarity index 100% rename from lib/ansible/cache/jsonfile.py rename to v1/ansible/cache/jsonfile.py diff --git a/lib/ansible/cache/memcached.py b/v1/ansible/cache/memcached.py similarity index 100% rename from lib/ansible/cache/memcached.py rename to v1/ansible/cache/memcached.py diff --git a/lib/ansible/cache/memory.py b/v1/ansible/cache/memory.py similarity index 100% rename from lib/ansible/cache/memory.py rename to v1/ansible/cache/memory.py diff --git a/lib/ansible/cache/redis.py b/v1/ansible/cache/redis.py similarity index 100% rename from lib/ansible/cache/redis.py rename to v1/ansible/cache/redis.py diff --git a/lib/ansible/runner/action_plugins/__init__.py b/v1/ansible/callback_plugins/__init__.py similarity index 100% rename from lib/ansible/runner/action_plugins/__init__.py rename to v1/ansible/callback_plugins/__init__.py diff --git a/lib/ansible/callback_plugins/noop.py b/v1/ansible/callback_plugins/noop.py similarity index 100% rename from lib/ansible/callback_plugins/noop.py rename to v1/ansible/callback_plugins/noop.py diff --git a/lib/ansible/callbacks.py b/v1/ansible/callbacks.py similarity index 100% rename from lib/ansible/callbacks.py rename to v1/ansible/callbacks.py diff --git a/lib/ansible/color.py b/v1/ansible/color.py similarity index 100% rename from lib/ansible/color.py rename to v1/ansible/color.py diff --git a/v2/ansible/constants.py b/v1/ansible/constants.py similarity index 89% rename from v2/ansible/constants.py rename to v1/ansible/constants.py index 456beb8bbc40f4..089de5b7c5bf15 100644 --- a/v2/ansible/constants.py +++ b/v1/ansible/constants.py @@ -15,15 +15,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import os import pwd import sys - -from six.moves import configparser +import ConfigParser from string import ascii_letters, digits # copied from utils, avoid circular reference fun :) @@ -40,15 +35,13 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False, ''' return a configuration variable with casting ''' value = _get_config(p, section, key, env_var, default) if boolean: - value = mk_boolean(value) - if value: - if integer: - value = int(value) - elif floating: - value = float(value) - elif islist: - if isinstance(value, basestring): - value = [x.strip() for x in value.split(',')] + return mk_boolean(value) + if value and integer: + return int(value) + if value and floating: + return float(value) + if value and islist: + return [x.strip() for x in value.split(',')] return value def _get_config(p, section, key, env_var, default): @@ -67,7 +60,7 @@ def _get_config(p, section, key, env_var, default): def load_config_file(): ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' - p = configparser.ConfigParser() + p = ConfigParser.ConfigParser() path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: @@ -80,8 +73,8 @@ def load_config_file(): if path is not None and os.path.exists(path): try: p.read(path) - except configparser.Error as e: - print("Error reading config file: \n{0}".format(e)) + except ConfigParser.Error as e: + print "Error reading config file: \n%s" % e sys.exit(1) return p return None @@ -105,8 +98,7 @@ def shell_expand_path(path): DEFAULTS='defaults' # configurable things -DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) -DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts'))) +DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts'))) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles')) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') @@ -120,7 +112,6 @@ def shell_expand_path(path): DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) -DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) @@ -131,6 +122,7 @@ def shell_expand_path(path): DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) +DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') @@ -149,7 +141,7 @@ def shell_expand_path(path): BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() -DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') +DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None) DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) # need to rethink impementing these 2 DEFAULT_BECOME_EXE = None @@ -164,7 +156,6 @@ def shell_expand_path(path): DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins') DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') -DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) @@ -182,8 +173,8 @@ def shell_expand_path(path): DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) -RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) -RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') +DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) + RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') @@ -205,16 +196,10 @@ def shell_expand_path(path): ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True) PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) -# galaxy related -DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') -# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated -GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True) - # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" # non-configurable things -MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] DEFAULT_BECOME_PASS = None DEFAULT_SUDO_PASS = None DEFAULT_REMOTE_PASS = None diff --git a/lib/ansible/errors.py b/v1/ansible/errors.py similarity index 100% rename from lib/ansible/errors.py rename to v1/ansible/errors.py diff --git a/v2/ansible/inventory/__init__.py b/v1/ansible/inventory/__init__.py similarity index 88% rename from v2/ansible/inventory/__init__.py rename to v1/ansible/inventory/__init__.py index 063398f17f9cdf..2048046d3c1f21 100644 --- a/v2/ansible/inventory/__init__.py +++ b/v1/ansible/inventory/__init__.py @@ -16,44 +16,36 @@ # along with Ansible. If not, see . ############################################# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import fnmatch import os import sys import re -import stat import subprocess -from ansible import constants as C -from ansible.errors import * - +import ansible.constants as C from ansible.inventory.ini import InventoryParser from ansible.inventory.script import InventoryScript from ansible.inventory.dir import InventoryDirectory from ansible.inventory.group import Group from ansible.inventory.host import Host -from ansible.plugins import vars_loader -from ansible.utils.path import is_executable -from ansible.utils.vars import combine_vars +from ansible import errors +from ansible import utils class Inventory(object): """ Host inventory for ansible. """ - #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', - # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', - # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] + __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', + 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', + '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] - def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): + def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): # the host file file, or script path, or list of hosts # if a list, inventory data will NOT be loaded self.host_list = host_list - self._loader = loader - self._variable_manager = variable_manager + self._vault_password=vault_password # caching to avoid repeated calculations, particularly with # external inventory scripts. @@ -105,7 +97,7 @@ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): if os.path.isdir(host_list): # Ensure basedir is inside the directory self.host_list = os.path.join(self.host_list, "") - self.parser = InventoryDirectory(loader=self._loader, filename=host_list) + self.parser = InventoryDirectory(filename=host_list) self.groups = self.parser.groups.values() else: # check to see if the specified file starts with a @@ -121,9 +113,9 @@ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): except: pass - if is_executable(host_list): + if utils.is_executable(host_list): try: - self.parser = InventoryScript(loader=self._loader, filename=host_list) + self.parser = InventoryScript(filename=host_list) self.groups = self.parser.groups.values() except: if not shebang_present: @@ -142,23 +134,19 @@ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): else: raise - vars_loader.add_directory(self.basedir(), with_subdir=True) + utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True) else: raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?") - self._vars_plugins = [ x for x in vars_loader.all(self) ] + self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ] - # FIXME: shouldn't be required, since the group/host vars file - # management will be done in VariableManager # get group vars from group_vars/ files and vars plugins for group in self.groups: - # FIXME: combine_vars - group.vars = combine_vars(group.vars, self.get_group_variables(group.name)) + group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password)) # get host vars from host_vars/ files and vars plugins for host in self.get_hosts(): - # FIXME: combine_vars - host.vars = combine_vars(host.vars, self.get_host_variables(host.name)) + host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password)) def _match(self, str, pattern_str): @@ -204,9 +192,9 @@ def get_hosts(self, pattern="all"): # exclude hosts mentioned in any restriction (ex: failed hosts) if self._restriction is not None: - hosts = [ h for h in hosts if h in self._restriction ] + hosts = [ h for h in hosts if h.name in self._restriction ] if self._also_restriction is not None: - hosts = [ h for h in hosts if h in self._also_restriction ] + hosts = [ h for h in hosts if h.name in self._also_restriction ] return hosts @@ -332,8 +320,6 @@ def _create_implicit_localhost(self, pattern): new_host = Host(pattern) new_host.set_variable("ansible_python_interpreter", sys.executable) new_host.set_variable("ansible_connection", "local") - new_host.ipv4_address = '127.0.0.1' - ungrouped = self.get_group("ungrouped") if ungrouped is None: self.add_group(Group('ungrouped')) @@ -434,7 +420,7 @@ def _get_group_variables(self, groupname, vault_password=None): group = self.get_group(groupname) if group is None: - raise Exception("group not found: %s" % groupname) + raise errors.AnsibleError("group not found: %s" % groupname) vars = {} @@ -442,21 +428,19 @@ def _get_group_variables(self, groupname, vault_password=None): vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')] for updated in vars_results: if updated is not None: - # FIXME: combine_vars - vars = combine_vars(vars, updated) + vars = utils.combine_vars(vars, updated) # Read group_vars/ files - # FIXME: combine_vars - vars = combine_vars(vars, self.get_group_vars(group)) + vars = utils.combine_vars(vars, self.get_group_vars(group)) return vars - def get_vars(self, hostname, update_cached=False, vault_password=None): + def get_variables(self, hostname, update_cached=False, vault_password=None): host = self.get_host(hostname) if not host: - raise Exception("host not found: %s" % hostname) - return host.get_vars() + raise errors.AnsibleError("host not found: %s" % hostname) + return host.get_variables() def get_host_variables(self, hostname, update_cached=False, vault_password=None): @@ -476,26 +460,22 @@ def _get_host_variables(self, hostname, vault_password=None): vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')] for updated in vars_results: if updated is not None: - # FIXME: combine_vars - vars = combine_vars(vars, updated) + vars = utils.combine_vars(vars, updated) # plugin.get_host_vars retrieves just vars for specific host vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')] for updated in vars_results: if updated is not None: - # FIXME: combine_vars - vars = combine_vars(vars, updated) + vars = utils.combine_vars(vars, updated) # still need to check InventoryParser per host vars # which actually means InventoryScript per host, # which is not performant if self.parser is not None: - # FIXME: combine_vars - vars = combine_vars(vars, self.parser.get_host_variables(host)) + vars = utils.combine_vars(vars, self.parser.get_host_variables(host)) # Read host_vars/ files - # FIXME: combine_vars - vars = combine_vars(vars, self.get_host_vars(host)) + vars = utils.combine_vars(vars, self.get_host_vars(host)) return vars @@ -510,7 +490,7 @@ def list_hosts(self, pattern="all"): """ return a list of hostnames for a pattern """ - result = [ h for h in self.get_hosts(pattern) ] + result = [ h.name for h in self.get_hosts(pattern) ] if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]: result = [pattern] return result @@ -518,7 +498,11 @@ def list_hosts(self, pattern="all"): def list_groups(self): return sorted([ g.name for g in self.groups ], key=lambda x: x) - def restrict_to_hosts(self, restriction): + # TODO: remove this function + def get_restriction(self): + return self._restriction + + def restrict_to(self, restriction): """ Restrict list operations to the hosts given in restriction. This is used to exclude failed hosts in main playbook code, don't use this for other @@ -560,7 +544,7 @@ def subset(self, subset_pattern): results.append(x) self._subset = results - def remove_restriction(self): + def lift_restriction(self): """ Do not restrict list operations """ self._restriction = None @@ -604,12 +588,10 @@ def set_playbook_basedir(self, dir): self._playbook_basedir = dir # get group vars from group_vars/ files for group in self.groups: - # FIXME: combine_vars - group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) + group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) # get host vars from host_vars/ files for host in self.get_hosts(): - # FIXME: combine_vars - host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) + host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) # invalidate cache self._vars_per_host = {} self._vars_per_group = {} @@ -657,15 +639,15 @@ def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False): if _basedir == self._playbook_basedir and scan_pass != 1: continue - # FIXME: these should go to VariableManager if group and host is None: # load vars in dir/group_vars/name_of_group base_path = os.path.join(basedir, "group_vars/%s" % group.name) - self._variable_manager.add_group_vars_file(base_path, self._loader) + results = utils.load_vars(base_path, results, vault_password=self._vault_password) + elif host and group is None: # same for hostvars in dir/host_vars/name_of_host base_path = os.path.join(basedir, "host_vars/%s" % host.name) - self._variable_manager.add_host_vars_file(base_path, self._loader) + results = utils.load_vars(base_path, results, vault_password=self._vault_password) # all done, results is a dictionary of variables for this particular host. return results diff --git a/v2/ansible/inventory/dir.py b/v1/ansible/inventory/dir.py similarity index 91% rename from v2/ansible/inventory/dir.py rename to v1/ansible/inventory/dir.py index 735f32d62c35a6..9ac23fff89911f 100644 --- a/v2/ansible/inventory/dir.py +++ b/v1/ansible/inventory/dir.py @@ -17,25 +17,20 @@ # along with Ansible. If not, see . ############################################# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type import os - -from ansible import constants as C -from ansible.errors import AnsibleError - +import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.ini import InventoryParser from ansible.inventory.script import InventoryScript -from ansible.utils.path import is_executable -from ansible.utils.vars import combine_vars +from ansible import utils +from ansible import errors class InventoryDirectory(object): ''' Host inventory parser for ansible using a directory of inventories. ''' - def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): + def __init__(self, filename=C.DEFAULT_HOST_LIST): self.names = os.listdir(filename) self.names.sort() self.directory = filename @@ -43,12 +38,10 @@ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): self.hosts = {} self.groups = {} - self._loader = loader - for i in self.names: # Skip files that end with certain extensions or characters - if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")): + if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")): continue # Skip hidden files if i.startswith('.') and not i.startswith('./'): @@ -58,9 +51,9 @@ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): continue fullpath = os.path.join(self.directory, i) if os.path.isdir(fullpath): - parser = InventoryDirectory(loader=loader, filename=fullpath) - elif is_executable(fullpath): - parser = InventoryScript(loader=loader, filename=fullpath) + parser = InventoryDirectory(filename=fullpath) + elif utils.is_executable(fullpath): + parser = InventoryScript(filename=fullpath) else: parser = InventoryParser(filename=fullpath) self.parsers.append(parser) @@ -160,7 +153,7 @@ def _merge_groups(self, group, newgroup): # name if group.name != newgroup.name: - raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) + raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name)) # depth group.depth = max([group.depth, newgroup.depth]) @@ -203,14 +196,14 @@ def _merge_groups(self, group, newgroup): self.groups[newparent.name].add_child_group(group) # variables - group.vars = combine_vars(group.vars, newgroup.vars) + group.vars = utils.combine_vars(group.vars, newgroup.vars) def _merge_hosts(self,host, newhost): """ Merge all of instance newhost into host """ # name if host.name != newhost.name: - raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) + raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name)) # group membership relation for newgroup in newhost.groups: @@ -225,7 +218,7 @@ def _merge_hosts(self,host, newhost): self.groups[newgroup.name].add_host(host) # variables - host.vars = combine_vars(host.vars, newhost.vars) + host.vars = utils.combine_vars(host.vars, newhost.vars) def get_host_variables(self, host): """ Gets additional host variables from all inventories """ diff --git a/v2/ansible/inventory/expand_hosts.py b/v1/ansible/inventory/expand_hosts.py similarity index 97% rename from v2/ansible/inventory/expand_hosts.py rename to v1/ansible/inventory/expand_hosts.py index b5a957c53fe89b..f1297409355c22 100644 --- a/v2/ansible/inventory/expand_hosts.py +++ b/v1/ansible/inventory/expand_hosts.py @@ -30,9 +30,6 @@ Note that when beg is specified with left zero padding, then the length of end must be the same as that of beg, else an exception is raised. ''' -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import string from ansible import errors diff --git a/v2/ansible/inventory/group.py b/v1/ansible/inventory/group.py similarity index 69% rename from v2/ansible/inventory/group.py rename to v1/ansible/inventory/group.py index 6525e69b466bd1..262558e69c87e8 100644 --- a/v2/ansible/inventory/group.py +++ b/v1/ansible/inventory/group.py @@ -14,15 +14,11 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -from ansible.utils.debug import debug - -class Group: +class Group(object): ''' a group of ansible hosts ''' - #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] + __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ] def __init__(self, name=None): @@ -33,49 +29,9 @@ def __init__(self, name=None): self.child_groups = [] self.parent_groups = [] self._hosts_cache = None - #self.clear_hosts_cache() - #if self.name is None: - # raise Exception("group name is required") - - def __repr__(self): - return self.get_name() - - def __getstate__(self): - return self.serialize() - - def __setstate__(self, data): - return self.deserialize(data) - - def serialize(self): - parent_groups = [] - for parent in self.parent_groups: - parent_groups.append(parent.serialize()) - - result = dict( - name=self.name, - vars=self.vars.copy(), - parent_groups=parent_groups, - depth=self.depth, - ) - - debug("serializing group, result is: %s" % result) - return result - - def deserialize(self, data): - debug("deserializing group, data is: %s" % data) - self.__init__() - self.name = data.get('name') - self.vars = data.get('vars', dict()) - - parent_groups = data.get('parent_groups', []) - for parent_data in parent_groups: - g = Group() - g.deserialize(parent_data) - self.parent_groups.append(g) - - def get_name(self): - return self.name + if self.name is None: + raise Exception("group name is required") def add_child_group(self, group): @@ -144,7 +100,7 @@ def _get_hosts(self): hosts.append(mine) return hosts - def get_vars(self): + def get_variables(self): return self.vars.copy() def _get_ancestors(self): diff --git a/v1/ansible/inventory/host.py b/v1/ansible/inventory/host.py new file mode 100644 index 00000000000000..d4dc20fa462588 --- /dev/null +++ b/v1/ansible/inventory/host.py @@ -0,0 +1,67 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import ansible.constants as C +from ansible import utils + +class Host(object): + ''' a single ansible host ''' + + __slots__ = [ 'name', 'vars', 'groups' ] + + def __init__(self, name=None, port=None): + + self.name = name + self.vars = {} + self.groups = [] + if port and port != C.DEFAULT_REMOTE_PORT: + self.set_variable('ansible_ssh_port', int(port)) + + if self.name is None: + raise Exception("host name is required") + + def add_group(self, group): + + self.groups.append(group) + + def set_variable(self, key, value): + + self.vars[key]=value + + def get_groups(self): + + groups = {} + for g in self.groups: + groups[g.name] = g + ancestors = g.get_ancestors() + for a in ancestors: + groups[a.name] = a + return groups.values() + + def get_variables(self): + + results = {} + groups = self.get_groups() + for group in sorted(groups, key=lambda g: g.depth): + results = utils.combine_vars(results, group.get_variables()) + results = utils.combine_vars(results, self.vars) + results['inventory_hostname'] = self.name + results['inventory_hostname_short'] = self.name.split('.')[0] + results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) + return results + + diff --git a/v2/ansible/inventory/ini.py b/v1/ansible/inventory/ini.py similarity index 82% rename from v2/ansible/inventory/ini.py rename to v1/ansible/inventory/ini.py index e004ee8bb7584d..bd9a98e7f86249 100644 --- a/v2/ansible/inventory/ini.py +++ b/v1/ansible/inventory/ini.py @@ -16,20 +16,17 @@ # along with Ansible. If not, see . ############################################# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -import ast -import shlex -import re - -from ansible import constants as C -from ansible.errors import * +import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range from ansible.inventory.expand_hosts import expand_hostname_range -from ansible.utils.unicode import to_unicode +from ansible import errors +from ansible import utils +import shlex +import re +import ast class InventoryParser(object): """ @@ -37,8 +34,9 @@ class InventoryParser(object): """ def __init__(self, filename=C.DEFAULT_HOST_LIST): - self.filename = filename + with open(filename) as fh: + self.filename = filename self.lines = fh.readlines() self.groups = {} self.hosts = {} @@ -56,7 +54,10 @@ def _parse(self): def _parse_value(v): if "#" not in v: try: - v = ast.literal_eval(v) + ret = ast.literal_eval(v) + if not isinstance(ret, float): + # Do not trim floats. Eg: "1.20" to 1.2 + return ret # Using explicit exceptions. # Likely a string that literal_eval does not like. We wil then just set it. except ValueError: @@ -65,7 +66,7 @@ def _parse_value(v): except SyntaxError: # Is this a hash with an equals at the end? pass - return to_unicode(v, nonstring='passthru', errors='strict') + return v # [webservers] # alpha @@ -90,8 +91,8 @@ def _parse_base_groups(self): self.groups = dict(all=all, ungrouped=ungrouped) active_group_name = 'ungrouped' - for line in self.lines: - line = self._before_comment(line).strip() + for lineno in range(len(self.lines)): + line = utils.before_comment(self.lines[lineno]).strip() if line.startswith("[") and line.endswith("]"): active_group_name = line.replace("[","").replace("]","") if ":vars" in line or ":children" in line: @@ -145,11 +146,8 @@ def _parse_base_groups(self): try: (k,v) = t.split("=", 1) except ValueError, e: - raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e))) - if k == 'ansible_ssh_host': - host.ipv4_address = self._parse_value(v) - else: - host.set_variable(k, self._parse_value(v)) + raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e))) + host.set_variable(k, self._parse_value(v)) self.groups[active_group_name].add_host(host) # [southeast:children] @@ -159,8 +157,8 @@ def _parse_base_groups(self): def _parse_group_children(self): group = None - for line in self.lines: - line = line.strip() + for lineno in range(len(self.lines)): + line = self.lines[lineno].strip() if line is None or line == '': continue if line.startswith("[") and ":children]" in line: @@ -175,7 +173,7 @@ def _parse_group_children(self): elif group: kid_group = self.groups.get(line, None) if kid_group is None: - raise AnsibleError("child group is not defined: (%s)" % line) + raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line)) else: group.add_child_group(kid_group) @@ -186,13 +184,13 @@ def _parse_group_children(self): def _parse_group_variables(self): group = None - for line in self.lines: - line = line.strip() + for lineno in range(len(self.lines)): + line = self.lines[lineno].strip() if line.startswith("[") and ":vars]" in line: line = line.replace("[","").replace(":vars]","") group = self.groups.get(line, None) if group is None: - raise AnsibleError("can't add vars to undefined group: %s" % line) + raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line)) elif line.startswith("#") or line.startswith(";"): pass elif line.startswith("["): @@ -201,18 +199,10 @@ def _parse_group_variables(self): pass elif group: if "=" not in line: - raise AnsibleError("variables assigned to group must be in key=value form") + raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1)) else: (k, v) = [e.strip() for e in line.split("=", 1)] group.set_variable(k, self._parse_value(v)) def get_host_variables(self, host): return {} - - def _before_comment(self, msg): - ''' what's the part of a string before a comment? ''' - msg = msg.replace("\#","**NOT_A_COMMENT**") - msg = msg.split("#")[0] - msg = msg.replace("**NOT_A_COMMENT**","#") - return msg - diff --git a/v2/ansible/inventory/script.py b/v1/ansible/inventory/script.py similarity index 82% rename from v2/ansible/inventory/script.py rename to v1/ansible/inventory/script.py index 9675d70f690910..b83cb9bcc7a732 100644 --- a/v2/ansible/inventory/script.py +++ b/v1/ansible/inventory/script.py @@ -16,26 +16,22 @@ # along with Ansible. If not, see . ############################################# -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type import os import subprocess -import sys - -from ansible import constants as C -from ansible.errors import * +import ansible.constants as C from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.module_utils.basic import json_dict_bytes_to_unicode +from ansible import utils +from ansible import errors +import sys -class InventoryScript: +class InventoryScript(object): ''' Host inventory parser for ansible using external inventory scripts. ''' - def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): - - self._loader = loader + def __init__(self, filename=C.DEFAULT_HOST_LIST): # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working @@ -45,11 +41,11 @@ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST): try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: - raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) + raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (stdout, stderr) = sp.communicate() if sp.returncode != 0: - raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) + raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr)) self.data = stdout # see comment about _meta below @@ -62,7 +58,7 @@ def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted - self.raw = self._loader.load(self.data) + self.raw = utils.parse_json(self.data) self.raw = json_dict_bytes_to_unicode(self.raw) all = Group('all') @@ -72,7 +68,7 @@ def _parse(self, err): if 'failed' in self.raw: sys.stderr.write(err + "\n") - raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw) + raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw) for (group_name, data) in self.raw.items(): @@ -96,12 +92,12 @@ def _parse(self, err): if not isinstance(data, dict): data = {'hosts': data} # is not those subkeys, then simplified syntax, host with vars - elif not any(k in data for k in ('hosts','vars')): + elif not any(k in data for k in ('hosts','vars','children')): data = {'hosts': [group_name], 'vars': data} if 'hosts' in data: if not isinstance(data['hosts'], list): - raise AnsibleError("You defined a group \"%s\" with bad " + raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for the host list:\n %s" % (group_name, data)) for hostname in data['hosts']: @@ -112,7 +108,7 @@ def _parse(self, err): if 'vars' in data: if not isinstance(data['vars'], dict): - raise AnsibleError("You defined a group \"%s\" with bad " + raise errors.AnsibleError("You defined a group \"%s\" with bad " "data for variables:\n %s" % (group_name, data)) for k, v in data['vars'].iteritems(): @@ -147,12 +143,12 @@ def get_host_variables(self, host): try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError, e: - raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) + raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e)) (out, err) = sp.communicate() if out.strip() == '': return dict() try: - return json_dict_bytes_to_unicode(self._loader.load(out)) + return json_dict_bytes_to_unicode(utils.parse_json(out)) except ValueError: - raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) + raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out)) diff --git a/lib/ansible/runner/connection_plugins/__init__.py b/v1/ansible/inventory/vars_plugins/__init__.py similarity index 100% rename from lib/ansible/runner/connection_plugins/__init__.py rename to v1/ansible/inventory/vars_plugins/__init__.py diff --git a/v2/ansible/inventory/vars_plugins/noop.py b/v1/ansible/inventory/vars_plugins/noop.py similarity index 94% rename from v2/ansible/inventory/vars_plugins/noop.py rename to v1/ansible/inventory/vars_plugins/noop.py index 8f0c98cad56d35..5d4b4b6658c985 100644 --- a/v2/ansible/inventory/vars_plugins/noop.py +++ b/v1/ansible/inventory/vars_plugins/noop.py @@ -15,8 +15,6 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type class VarsModule(object): diff --git a/lib/ansible/module_common.py b/v1/ansible/module_common.py similarity index 100% rename from lib/ansible/module_common.py rename to v1/ansible/module_common.py diff --git a/v2/ansible/module_utils/__init__.py b/v1/ansible/module_utils/__init__.py similarity index 100% rename from v2/ansible/module_utils/__init__.py rename to v1/ansible/module_utils/__init__.py diff --git a/v2/ansible/module_utils/a10.py b/v1/ansible/module_utils/a10.py similarity index 100% rename from v2/ansible/module_utils/a10.py rename to v1/ansible/module_utils/a10.py diff --git a/v2/ansible/module_utils/basic.py b/v1/ansible/module_utils/basic.py similarity index 97% rename from v2/ansible/module_utils/basic.py rename to v1/ansible/module_utils/basic.py index 8f9b03f882d1a2..54a1a9cfff7f88 100644 --- a/v2/ansible/module_utils/basic.py +++ b/v1/ansible/module_utils/basic.py @@ -43,7 +43,7 @@ # can be inserted in any module source automatically by including # #<> on a blank line by itself inside # of an ansible module. The source of this common code lives -# in ansible/executor/module_common.py +# in lib/ansible/module_common.py import locale import os @@ -65,7 +65,6 @@ import platform import errno import tempfile -from itertools import imap, repeat try: import json @@ -235,7 +234,7 @@ def load_platform_subclass(cls, *args, **kwargs): return super(cls, subclass).__new__(subclass) -def json_dict_unicode_to_bytes(d, encoding='utf-8'): +def json_dict_unicode_to_bytes(d): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, @@ -243,17 +242,17 @@ def json_dict_unicode_to_bytes(d, encoding='utf-8'): ''' if isinstance(d, unicode): - return d.encode(encoding) + return d.encode('utf-8') elif isinstance(d, dict): - return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding))) + return dict(map(json_dict_unicode_to_bytes, d.iteritems())) elif isinstance(d, list): - return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding))) + return list(map(json_dict_unicode_to_bytes, d)) elif isinstance(d, tuple): - return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding))) + return tuple(map(json_dict_unicode_to_bytes, d)) else: return d -def json_dict_bytes_to_unicode(d, encoding='utf-8'): +def json_dict_bytes_to_unicode(d): ''' Recursively convert dict keys and values to byte str Specialized for json return because this only handles, lists, tuples, @@ -261,13 +260,13 @@ def json_dict_bytes_to_unicode(d, encoding='utf-8'): ''' if isinstance(d, str): - return unicode(d, encoding) + return unicode(d, 'utf-8') elif isinstance(d, dict): - return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding))) + return dict(map(json_dict_bytes_to_unicode, d.iteritems())) elif isinstance(d, list): - return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding))) + return list(map(json_dict_bytes_to_unicode, d)) elif isinstance(d, tuple): - return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding))) + return tuple(map(json_dict_bytes_to_unicode, d)) else: return d @@ -360,9 +359,9 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, # reset to LANG=C if it's an invalid/unavailable locale self._check_locale() - self.params = self._load_params() + (self.params, self.args) = self._load_params() - self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log'] + self._legal_inputs = ['CHECKMODE', 'NO_LOG'] self.aliases = self._handle_aliases() @@ -889,7 +888,7 @@ def _handle_aliases(self): def _check_for_check_mode(self): for (k,v) in self.params.iteritems(): - if k == '_ansible_check_mode': + if k == 'CHECKMODE': if not self.supports_check_mode: self.exit_json(skipped=True, msg="remote module does not support check mode") if self.supports_check_mode: @@ -897,13 +896,13 @@ def _check_for_check_mode(self): def _check_for_no_log(self): for (k,v) in self.params.iteritems(): - if k == '_ansible_no_log': + if k == 'NO_LOG': self.no_log = self.boolean(v) def _check_invalid_arguments(self): for (k,v) in self.params.iteritems(): # these should be in legal inputs already - #if k in ('_ansible_check_mode', '_ansible_no_log'): + #if k in ('CHECKMODE', 'NO_LOG'): # continue if k not in self._legal_inputs: self.fail_json(msg="unsupported parameter for module: %s" % k) @@ -1076,11 +1075,20 @@ def _set_defaults(self, pre=True): def _load_params(self): ''' read the input and return a dictionary and the arguments string ''' - params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) - if params is None: - params = dict() - return params - + args = MODULE_ARGS + items = shlex.split(args) + params = {} + for x in items: + try: + (k, v) = x.split("=",1) + except Exception, e: + self.fail_json(msg="this module requires key=value arguments (%s)" % (items)) + if k in params: + self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v)) + params[k] = v + params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + params2.update(params) + return (params2, args) def _log_invocation(self): ''' log that ansible ran the module ''' @@ -1201,17 +1209,13 @@ def boolean(self, arg): self.fail_json(msg='Boolean %s not in either boolean list' % arg) def jsonify(self, data): - for encoding in ("utf-8", "latin-1"): + for encoding in ("utf-8", "latin-1", "unicode_escape"): try: return json.dumps(data, encoding=encoding) - # Old systems using old simplejson module does not support encoding keyword. - except TypeError: - try: - new_data = json_dict_bytes_to_unicode(data, encoding=encoding) - except UnicodeDecodeError: - continue - return json.dumps(new_data) - except UnicodeDecodeError: + # Old systems using simplejson module does not support encoding keyword. + except TypeError, e: + return json.dumps(data) + except UnicodeDecodeError, e: continue self.fail_json(msg='Invalid unicode encoding encountered') @@ -1448,7 +1452,7 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat msg = None st_in = None - # Set a temporary env path if a prefix is passed + # Set a temporart env path if a prefix is passed env=os.environ if path_prefix: env['PATH']="%s:%s" % (path_prefix, env['PATH']) diff --git a/v2/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py similarity index 100% rename from v2/ansible/module_utils/cloudstack.py rename to v1/ansible/module_utils/cloudstack.py diff --git a/v2/ansible/module_utils/database.py b/v1/ansible/module_utils/database.py similarity index 100% rename from v2/ansible/module_utils/database.py rename to v1/ansible/module_utils/database.py diff --git a/v2/ansible/module_utils/ec2.py b/v1/ansible/module_utils/ec2.py similarity index 100% rename from v2/ansible/module_utils/ec2.py rename to v1/ansible/module_utils/ec2.py diff --git a/v2/ansible/module_utils/facts.py b/v1/ansible/module_utils/facts.py similarity index 100% rename from v2/ansible/module_utils/facts.py rename to v1/ansible/module_utils/facts.py diff --git a/v2/ansible/module_utils/gce.py b/v1/ansible/module_utils/gce.py similarity index 100% rename from v2/ansible/module_utils/gce.py rename to v1/ansible/module_utils/gce.py diff --git a/v2/ansible/module_utils/known_hosts.py b/v1/ansible/module_utils/known_hosts.py similarity index 100% rename from v2/ansible/module_utils/known_hosts.py rename to v1/ansible/module_utils/known_hosts.py diff --git a/v2/ansible/module_utils/openstack.py b/v1/ansible/module_utils/openstack.py similarity index 100% rename from v2/ansible/module_utils/openstack.py rename to v1/ansible/module_utils/openstack.py diff --git a/v2/ansible/module_utils/powershell.ps1 b/v1/ansible/module_utils/powershell.ps1 similarity index 97% rename from v2/ansible/module_utils/powershell.ps1 rename to v1/ansible/module_utils/powershell.ps1 index 57d2c1b101caa7..ee7d3ddeca4ba8 100644 --- a/v2/ansible/module_utils/powershell.ps1 +++ b/v1/ansible/module_utils/powershell.ps1 @@ -142,14 +142,14 @@ Function ConvertTo-Bool return } -# Helper function to calculate md5 of a file in a way which powershell 3 +# Helper function to calculate a hash of a file in a way which powershell 3 # and above can handle: -Function Get-FileMd5($path) +Function Get-FileChecksum($path) { $hash = "" If (Test-Path -PathType Leaf $path) { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); diff --git a/v2/ansible/module_utils/rax.py b/v1/ansible/module_utils/rax.py similarity index 100% rename from v2/ansible/module_utils/rax.py rename to v1/ansible/module_utils/rax.py diff --git a/v2/ansible/module_utils/redhat.py b/v1/ansible/module_utils/redhat.py similarity index 100% rename from v2/ansible/module_utils/redhat.py rename to v1/ansible/module_utils/redhat.py diff --git a/v2/ansible/module_utils/splitter.py b/v1/ansible/module_utils/splitter.py similarity index 100% rename from v2/ansible/module_utils/splitter.py rename to v1/ansible/module_utils/splitter.py diff --git a/v2/ansible/module_utils/urls.py b/v1/ansible/module_utils/urls.py similarity index 100% rename from v2/ansible/module_utils/urls.py rename to v1/ansible/module_utils/urls.py diff --git a/lib/ansible/module_utils/vmware.py b/v1/ansible/module_utils/vmware.py similarity index 100% rename from lib/ansible/module_utils/vmware.py rename to v1/ansible/module_utils/vmware.py diff --git a/lib/ansible/runner/filter_plugins/__init__.py b/v1/ansible/modules/__init__.py similarity index 100% rename from lib/ansible/runner/filter_plugins/__init__.py rename to v1/ansible/modules/__init__.py diff --git a/v1/ansible/playbook/__init__.py b/v1/ansible/playbook/__init__.py new file mode 100644 index 00000000000000..24ba2d3c6e0c06 --- /dev/null +++ b/v1/ansible/playbook/__init__.py @@ -0,0 +1,874 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import ansible.inventory +import ansible.constants as C +import ansible.runner +from ansible.utils.template import template +from ansible import utils +from ansible import errors +from ansible.module_utils.splitter import split_args, unquote +import ansible.callbacks +import ansible.cache +import os +import shlex +import collections +from play import Play +import StringIO +import pipes + +# the setup cache stores all variables about a host +# gathered during the setup step, while the vars cache +# holds all other variables about a host +SETUP_CACHE = ansible.cache.FactCache() +VARS_CACHE = collections.defaultdict(dict) +RESERVED_TAGS = ['all','tagged','untagged','always'] + + +class PlayBook(object): + ''' + runs an ansible playbook, given as a datastructure or YAML filename. + A playbook is a deployment, config management, or automation based + set of commands to run in series. + + multiple plays/tasks do not execute simultaneously, but tasks in each + pattern do execute in parallel (according to the number of forks + requested) among the hosts they address + ''' + + # ***************************************************** + + def __init__(self, + playbook = None, + host_list = C.DEFAULT_HOST_LIST, + module_path = None, + forks = C.DEFAULT_FORKS, + timeout = C.DEFAULT_TIMEOUT, + remote_user = C.DEFAULT_REMOTE_USER, + remote_pass = C.DEFAULT_REMOTE_PASS, + remote_port = None, + transport = C.DEFAULT_TRANSPORT, + private_key_file = C.DEFAULT_PRIVATE_KEY_FILE, + callbacks = None, + runner_callbacks = None, + stats = None, + extra_vars = None, + only_tags = None, + skip_tags = None, + subset = C.DEFAULT_SUBSET, + inventory = None, + check = False, + diff = False, + any_errors_fatal = False, + vault_password = False, + force_handlers = False, + # privilege escalation + become = C.DEFAULT_BECOME, + become_method = C.DEFAULT_BECOME_METHOD, + become_user = C.DEFAULT_BECOME_USER, + become_pass = None, + ): + + """ + playbook: path to a playbook file + host_list: path to a file like /etc/ansible/hosts + module_path: path to ansible modules, like /usr/share/ansible/ + forks: desired level of parallelism + timeout: connection timeout + remote_user: run as this user if not specified in a particular play + remote_pass: use this remote password (for all plays) vs using SSH keys + remote_port: default remote port to use if not specified with the host or play + transport: how to connect to hosts that don't specify a transport (local, paramiko, etc) + callbacks output callbacks for the playbook + runner_callbacks: more callbacks, this time for the runner API + stats: holds aggregrate data about events occurring to each host + inventory: can be specified instead of host_list to use a pre-existing inventory object + check: don't change anything, just try to detect some potential changes + any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed + force_handlers: continue to notify and run handlers even if a task fails + """ + + self.SETUP_CACHE = SETUP_CACHE + self.VARS_CACHE = VARS_CACHE + + arguments = [] + if playbook is None: + arguments.append('playbook') + if callbacks is None: + arguments.append('callbacks') + if runner_callbacks is None: + arguments.append('runner_callbacks') + if stats is None: + arguments.append('stats') + if arguments: + raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments)) + + if extra_vars is None: + extra_vars = {} + if only_tags is None: + only_tags = [ 'all' ] + if skip_tags is None: + skip_tags = [] + + self.check = check + self.diff = diff + self.module_path = module_path + self.forks = forks + self.timeout = timeout + self.remote_user = remote_user + self.remote_pass = remote_pass + self.remote_port = remote_port + self.transport = transport + self.callbacks = callbacks + self.runner_callbacks = runner_callbacks + self.stats = stats + self.extra_vars = extra_vars + self.global_vars = {} + self.private_key_file = private_key_file + self.only_tags = only_tags + self.skip_tags = skip_tags + self.any_errors_fatal = any_errors_fatal + self.vault_password = vault_password + self.force_handlers = force_handlers + + self.become = become + self.become_method = become_method + self.become_user = become_user + self.become_pass = become_pass + + self.callbacks.playbook = self + self.runner_callbacks.playbook = self + + if inventory is None: + self.inventory = ansible.inventory.Inventory(host_list) + self.inventory.subset(subset) + else: + self.inventory = inventory + + if self.module_path is not None: + utils.plugins.module_finder.add_directory(self.module_path) + + self.basedir = os.path.dirname(playbook) or '.' + utils.plugins.push_basedir(self.basedir) + + # let inventory know the playbook basedir so it can load more vars + self.inventory.set_playbook_basedir(self.basedir) + + vars = extra_vars.copy() + vars['playbook_dir'] = os.path.abspath(self.basedir) + if self.inventory.basedir() is not None: + vars['inventory_dir'] = self.inventory.basedir() + + if self.inventory.src() is not None: + vars['inventory_file'] = self.inventory.src() + + self.filename = playbook + (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars) + ansible.callbacks.load_callback_plugins() + ansible.callbacks.set_playbook(self.callbacks, self) + + self._ansible_version = utils.version_info(gitinfo=True) + + # ***************************************************** + + def _get_playbook_vars(self, play_ds, existing_vars): + ''' + Gets the vars specified with the play and blends them + with any existing vars that have already been read in + ''' + new_vars = existing_vars.copy() + if 'vars' in play_ds: + if isinstance(play_ds['vars'], dict): + new_vars.update(play_ds['vars']) + elif isinstance(play_ds['vars'], list): + for v in play_ds['vars']: + new_vars.update(v) + return new_vars + + # ***************************************************** + + def _get_include_info(self, play_ds, basedir, existing_vars={}): + ''' + Gets any key=value pairs specified with the included file + name and returns the merged vars along with the path + ''' + new_vars = existing_vars.copy() + tokens = split_args(play_ds.get('include', '')) + for t in tokens[1:]: + try: + (k,v) = unquote(t).split("=", 1) + new_vars[k] = template(basedir, v, new_vars) + except ValueError, e: + raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t) + + return (new_vars, unquote(tokens[0])) + + # ***************************************************** + + def _get_playbook_vars_files(self, play_ds, existing_vars_files): + new_vars_files = list(existing_vars_files) + if 'vars_files' in play_ds: + new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files']) + return new_vars_files + + # ***************************************************** + + def _extend_play_vars(self, play, vars={}): + ''' + Extends the given play's variables with the additional specified vars. + ''' + + if 'vars' not in play or not play['vars']: + # someone left out or put an empty "vars:" entry in their playbook + return vars.copy() + + play_vars = None + if isinstance(play['vars'], dict): + play_vars = play['vars'].copy() + play_vars.update(vars) + elif isinstance(play['vars'], list): + # nobody should really do this, but handle vars: a=1 b=2 + play_vars = play['vars'][:] + play_vars.extend([{k:v} for k,v in vars.iteritems()]) + + return play_vars + + # ***************************************************** + + def _load_playbook_from_file(self, path, vars={}, vars_files=[]): + ''' + run top level error checking on playbooks and allow them to include other playbooks. + ''' + + playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password) + accumulated_plays = [] + play_basedirs = [] + + if type(playbook_data) != list: + raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data)) + + basedir = os.path.dirname(path) or '.' + utils.plugins.push_basedir(basedir) + for play in playbook_data: + if type(play) != dict: + raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play) + + if 'include' in play: + # a playbook (list of plays) decided to include some other list of plays + # from another file. The result is a flat list of plays in the end. + + play_vars = self._get_playbook_vars(play, vars) + play_vars_files = self._get_playbook_vars_files(play, vars_files) + inc_vars, inc_path = self._get_include_info(play, basedir, play_vars) + play_vars.update(inc_vars) + + included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars)) + (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files) + for p in plays: + # support for parameterized play includes works by passing + # those variables along to the subservient play + p['vars'] = self._extend_play_vars(p, play_vars) + # now add in the vars_files + p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files) + + accumulated_plays.extend(plays) + play_basedirs.extend(basedirs) + + else: + + # this is a normal (non-included play) + accumulated_plays.append(play) + play_basedirs.append(basedir) + + return (accumulated_plays, play_basedirs) + + # ***************************************************** + + def run(self): + ''' run all patterns in the playbook ''' + plays = [] + matched_tags_all = set() + unmatched_tags_all = set() + + # loop through all patterns and run them + self.callbacks.on_start() + for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs): + play = Play(self, play_ds, play_basedir, vault_password=self.vault_password) + assert play is not None + + matched_tags, unmatched_tags = play.compare_tags(self.only_tags) + + matched_tags_all = matched_tags_all | matched_tags + unmatched_tags_all = unmatched_tags_all | unmatched_tags + + # Remove tasks we wish to skip + matched_tags = matched_tags - set(self.skip_tags) + + # if we have matched_tags, the play must be run. + # if the play contains no tasks, assume we just want to gather facts + # in this case there are actually 3 meta tasks (handler flushes) not 0 + # tasks, so that's why there's a check against 3 + if (len(matched_tags) > 0 or len(play.tasks()) == 3): + plays.append(play) + + # if the playbook is invoked with --tags or --skip-tags that don't + # exist at all in the playbooks then we need to raise an error so that + # the user can correct the arguments. + unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) - + (matched_tags_all | unmatched_tags_all)) + + for t in RESERVED_TAGS: + unknown_tags.discard(t) + + if len(unknown_tags) > 0: + for t in RESERVED_TAGS: + unmatched_tags_all.discard(t) + msg = 'tag(s) not found in playbook: %s. possible values: %s' + unknown = ','.join(sorted(unknown_tags)) + unmatched = ','.join(sorted(unmatched_tags_all)) + raise errors.AnsibleError(msg % (unknown, unmatched)) + + for play in plays: + ansible.callbacks.set_play(self.callbacks, play) + ansible.callbacks.set_play(self.runner_callbacks, play) + if not self._run_play(play): + break + + ansible.callbacks.set_play(self.callbacks, None) + ansible.callbacks.set_play(self.runner_callbacks, None) + + # summarize the results + results = {} + for host in self.stats.processed.keys(): + results[host] = self.stats.summarize(host) + return results + + # ***************************************************** + + def _async_poll(self, poller, async_seconds, async_poll_interval): + ''' launch an async job, if poll_interval is set, wait for completion ''' + + results = poller.wait(async_seconds, async_poll_interval) + + # mark any hosts that are still listed as started as failed + # since these likely got killed by async_wrapper + for host in poller.hosts_to_poll: + reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' } + self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id']) + results['contacted'][host] = reason + + return results + + # ***************************************************** + + def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False): + ''' returns a list of hosts that haven't failed and aren't dark ''' + + return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)] + + # ***************************************************** + + def _run_task_internal(self, task, include_failed=False): + ''' run a particular module step in a playbook ''' + + hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed) + self.inventory.restrict_to(hosts) + + runner = ansible.runner.Runner( + pattern=task.play.hosts, + inventory=self.inventory, + module_name=task.module_name, + module_args=task.module_args, + forks=self.forks, + remote_pass=self.remote_pass, + module_path=self.module_path, + timeout=self.timeout, + remote_user=task.remote_user, + remote_port=task.play.remote_port, + module_vars=task.module_vars, + play_vars=task.play_vars, + play_file_vars=task.play_file_vars, + role_vars=task.role_vars, + role_params=task.role_params, + default_vars=task.default_vars, + extra_vars=self.extra_vars, + private_key_file=self.private_key_file, + setup_cache=self.SETUP_CACHE, + vars_cache=self.VARS_CACHE, + basedir=task.play.basedir, + conditional=task.when, + callbacks=self.runner_callbacks, + transport=task.transport, + is_playbook=True, + check=self.check, + diff=self.diff, + environment=task.environment, + complex_args=task.args, + accelerate=task.play.accelerate, + accelerate_port=task.play.accelerate_port, + accelerate_ipv6=task.play.accelerate_ipv6, + error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, + vault_pass = self.vault_password, + run_hosts=hosts, + no_log=task.no_log, + run_once=task.run_once, + become=task.become, + become_method=task.become_method, + become_user=task.become_user, + become_pass=task.become_pass, + ) + + runner.module_vars.update({'play_hosts': hosts}) + runner.module_vars.update({'ansible_version': self._ansible_version}) + + if task.async_seconds == 0: + results = runner.run() + else: + results, poller = runner.run_async(task.async_seconds) + self.stats.compute(results) + if task.async_poll_interval > 0: + # if not polling, playbook requested fire and forget, so don't poll + results = self._async_poll(poller, task.async_seconds, task.async_poll_interval) + else: + for (host, res) in results.get('contacted', {}).iteritems(): + self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id']) + + contacted = results.get('contacted',{}) + dark = results.get('dark', {}) + + self.inventory.lift_restriction() + + if len(contacted.keys()) == 0 and len(dark.keys()) == 0: + return None + + return results + + # ***************************************************** + + def _run_task(self, play, task, is_handler): + ''' run a single task in the playbook and recursively run any subtasks. ''' + + ansible.callbacks.set_task(self.callbacks, task) + ansible.callbacks.set_task(self.runner_callbacks, task) + + if task.role_name: + name = '%s | %s' % (task.role_name, task.name) + else: + name = task.name + + try: + # v1 HACK: we don't have enough information to template many names + # at this point. Rather than making this work for all cases in + # v1, just make this degrade gracefully. Will fix in v2 + name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False) + except: + pass + + self.callbacks.on_task_start(name, is_handler) + if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task: + ansible.callbacks.set_task(self.callbacks, None) + ansible.callbacks.set_task(self.runner_callbacks, None) + return True + + # template ignore_errors + # TODO: Is this needed here? cond is templated again in + # check_conditional after some more manipulations. + # TODO: we don't have enough information here to template cond either + # (see note on templating name above) + cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False) + task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR) + + # load up an appropriate ansible runner to run the task in parallel + include_failed = is_handler and play.force_handlers + results = self._run_task_internal(task, include_failed=include_failed) + + # if no hosts are matched, carry on + hosts_remaining = True + if results is None: + hosts_remaining = False + results = {} + + contacted = results.get('contacted', {}) + self.stats.compute(results, ignore_errors=task.ignore_errors) + + def _register_play_vars(host, result): + # when 'register' is used, persist the result in the vars cache + # rather than the setup cache - vars should be transient between + # playbook executions + if 'stdout' in result and 'stdout_lines' not in result: + result['stdout_lines'] = result['stdout'].splitlines() + utils.update_hash(self.VARS_CACHE, host, {task.register: result}) + + def _save_play_facts(host, facts): + # saves play facts in SETUP_CACHE, unless the module executed was + # set_fact, in which case we add them to the VARS_CACHE + if task.module_name in ('set_fact', 'include_vars'): + utils.update_hash(self.VARS_CACHE, host, facts) + else: + utils.update_hash(self.SETUP_CACHE, host, facts) + + # add facts to the global setup cache + for host, result in contacted.iteritems(): + if 'results' in result: + # task ran with_ lookup plugin, so facts are encapsulated in + # multiple list items in the results key + for res in result['results']: + if type(res) == dict: + facts = res.get('ansible_facts', {}) + _save_play_facts(host, facts) + else: + # when facts are returned, persist them in the setup cache + facts = result.get('ansible_facts', {}) + _save_play_facts(host, facts) + + # if requested, save the result into the registered variable name + if task.register: + _register_play_vars(host, result) + + # also have to register some failed, but ignored, tasks + if task.ignore_errors and task.register: + failed = results.get('failed', {}) + for host, result in failed.iteritems(): + _register_play_vars(host, result) + + # flag which notify handlers need to be run + if len(task.notify) > 0: + for host, results in results.get('contacted',{}).iteritems(): + if results.get('changed', False): + for handler_name in task.notify: + self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host) + + ansible.callbacks.set_task(self.callbacks, None) + ansible.callbacks.set_task(self.runner_callbacks, None) + return hosts_remaining + + # ***************************************************** + + def _flag_handler(self, play, handler_name, host): + ''' + if a task has any notify elements, flag handlers for run + at end of execution cycle for hosts that have indicated + changes have been made + ''' + + found = False + for x in play.handlers(): + if handler_name == template(play.basedir, x.name, x.module_vars): + found = True + self.callbacks.on_notify(host, x.name) + x.notified_by.append(host) + if not found: + raise errors.AnsibleError("change handler (%s) is not defined" % handler_name) + + # ***************************************************** + + def _do_setup_step(self, play): + ''' get facts from the remote system ''' + + host_list = self._trim_unavailable_hosts(play._play_hosts) + + if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart': + host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]] + if len(host_list) == 0: + return {} + elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'): + return {} + + self.callbacks.on_setup() + self.inventory.restrict_to(host_list) + + ansible.callbacks.set_task(self.callbacks, None) + ansible.callbacks.set_task(self.runner_callbacks, None) + + # push any variables down to the system + setup_results = ansible.runner.Runner( + basedir=self.basedir, + pattern=play.hosts, + module_name='setup', + module_args={}, + inventory=self.inventory, + forks=self.forks, + module_path=self.module_path, + timeout=self.timeout, + remote_user=play.remote_user, + remote_pass=self.remote_pass, + remote_port=play.remote_port, + private_key_file=self.private_key_file, + setup_cache=self.SETUP_CACHE, + vars_cache=self.VARS_CACHE, + callbacks=self.runner_callbacks, + become=play.become, + become_method=play.become_method, + become_user=play.become_user, + become_pass=self.become_pass, + vault_pass=self.vault_password, + transport=play.transport, + is_playbook=True, + module_vars=play.vars, + play_vars=play.vars, + play_file_vars=play.vars_file_vars, + role_vars=play.role_vars, + default_vars=play.default_vars, + check=self.check, + diff=self.diff, + accelerate=play.accelerate, + accelerate_port=play.accelerate_port, + ).run() + self.stats.compute(setup_results, setup=True) + + self.inventory.lift_restriction() + + # now for each result, load into the setup cache so we can + # let runner template out future commands + setup_ok = setup_results.get('contacted', {}) + for (host, result) in setup_ok.iteritems(): + utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True}) + utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {})) + return setup_results + + # ***************************************************** + + + def generate_retry_inventory(self, replay_hosts): + ''' + called by /usr/bin/ansible when a playbook run fails. It generates an inventory + that allows re-running on ONLY the failed hosts. This may duplicate some + variable information in group_vars/host_vars but that is ok, and expected. + ''' + + buf = StringIO.StringIO() + for x in replay_hosts: + buf.write("%s\n" % x) + basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH) + filename = "%s.retry" % os.path.basename(self.filename) + filename = filename.replace(".yml","") + filename = os.path.join(basedir, filename) + + try: + if not os.path.exists(basedir): + os.makedirs(basedir) + + fd = open(filename, 'w') + fd.write(buf.getvalue()) + fd.close() + except: + ansible.callbacks.display( + "\nERROR: could not create retry file. Check the value of \n" + + "the configuration variable 'retry_files_save_path' or set \n" + + "'retry_files_enabled' to False to avoid this message.\n", + color='red' + ) + return None + + return filename + + # ***************************************************** + def tasks_to_run_in_play(self, play): + + tasks = [] + + for task in play.tasks(): + # only run the task if the requested tags match or has 'always' tag + u = set(['untagged']) + task_set = set(task.tags) + + if 'always' in task.tags: + should_run = True + else: + if 'all' in self.only_tags: + should_run = True + else: + should_run = False + if 'tagged' in self.only_tags: + if task_set != u: + should_run = True + elif 'untagged' in self.only_tags: + if task_set == u: + should_run = True + else: + if task_set.intersection(self.only_tags): + should_run = True + + # Check for tags that we need to skip + if 'all' in self.skip_tags: + should_run = False + else: + if 'tagged' in self.skip_tags: + if task_set != u: + should_run = False + elif 'untagged' in self.skip_tags: + if task_set == u: + should_run = False + else: + if should_run: + if task_set.intersection(self.skip_tags): + should_run = False + + if should_run: + tasks.append(task) + + return tasks + + # ***************************************************** + def _run_play(self, play): + ''' run a list of tasks for a given pattern, in order ''' + + self.callbacks.on_play_start(play.name) + # Get the hosts for this play + play._play_hosts = self.inventory.list_hosts(play.hosts) + # if no hosts matches this play, drop out + if not play._play_hosts: + self.callbacks.on_no_hosts_matched() + return True + + # get facts from system + self._do_setup_step(play) + + # now with that data, handle contentional variable file imports! + all_hosts = self._trim_unavailable_hosts(play._play_hosts) + play.update_vars_files(all_hosts, vault_password=self.vault_password) + hosts_count = len(all_hosts) + + if play.serial.endswith("%"): + + # This is a percentage, so calculate it based on the + # number of hosts + serial_pct = int(play.serial.replace("%","")) + serial = int((serial_pct/100.0) * len(all_hosts)) + + # Ensure that no matter how small the percentage, serial + # can never fall below 1, so that things actually happen + serial = max(serial, 1) + else: + serial = int(play.serial) + + serialized_batch = [] + if serial <= 0: + serialized_batch = [all_hosts] + else: + # do N forks all the way through before moving to next + while len(all_hosts) > 0: + play_hosts = [] + for x in range(serial): + if len(all_hosts) > 0: + play_hosts.append(all_hosts.pop(0)) + serialized_batch.append(play_hosts) + + task_errors = False + for on_hosts in serialized_batch: + + # restrict the play to just the hosts we have in our on_hosts block that are + # available. + play._play_hosts = self._trim_unavailable_hosts(on_hosts) + self.inventory.also_restrict_to(on_hosts) + + for task in self.tasks_to_run_in_play(play): + + if task.meta is not None: + # meta tasks can force handlers to run mid-play + if task.meta == 'flush_handlers': + self.run_handlers(play) + + # skip calling the handler till the play is finished + continue + + if not self._run_task(play, task, False): + # whether no hosts matched is fatal or not depends if it was on the initial step. + # if we got exactly no hosts on the first step (setup!) then the host group + # just didn't match anything and that's ok + return False + + # Get a new list of what hosts are left as available, the ones that + # did not go fail/dark during the task + host_list = self._trim_unavailable_hosts(play._play_hosts) + + # Set max_fail_pct to 0, So if any hosts fails, bail out + if task.any_errors_fatal and len(host_list) < hosts_count: + play.max_fail_pct = 0 + + # If threshold for max nodes failed is exceeded, bail out. + if play.serial > 0: + # if serial is set, we need to shorten the size of host_count + play_count = len(play._play_hosts) + if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count): + host_list = None + else: + if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): + host_list = None + + # if no hosts remain, drop out + if not host_list: + if play.force_handlers: + task_errors = True + break + else: + self.callbacks.on_no_hosts_remaining() + return False + + # lift restrictions after each play finishes + self.inventory.lift_also_restriction() + + if task_errors and not play.force_handlers: + # if there were failed tasks and handler execution + # is not forced, quit the play with an error + return False + else: + # no errors, go ahead and execute all handlers + if not self.run_handlers(play): + return False + + return True + + + def run_handlers(self, play): + on_hosts = play._play_hosts + hosts_count = len(on_hosts) + for task in play.tasks(): + if task.meta is not None: + + fired_names = {} + for handler in play.handlers(): + if len(handler.notified_by) > 0: + self.inventory.restrict_to(handler.notified_by) + + # Resolve the variables first + handler_name = template(play.basedir, handler.name, handler.module_vars) + if handler_name not in fired_names: + self._run_task(play, handler, True) + # prevent duplicate handler includes from running more than once + fired_names[handler_name] = 1 + + host_list = self._trim_unavailable_hosts(play._play_hosts) + if handler.any_errors_fatal and len(host_list) < hosts_count: + play.max_fail_pct = 0 + if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count): + host_list = None + if not host_list and not play.force_handlers: + self.callbacks.on_no_hosts_remaining() + return False + + self.inventory.lift_restriction() + new_list = handler.notified_by[:] + for host in handler.notified_by: + if host in on_hosts: + while host in new_list: + new_list.remove(host) + handler.notified_by = new_list + + continue + + return True diff --git a/v1/ansible/playbook/play.py b/v1/ansible/playbook/play.py new file mode 100644 index 00000000000000..6ee85e0bf48939 --- /dev/null +++ b/v1/ansible/playbook/play.py @@ -0,0 +1,949 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +############################################# + +from ansible.utils.template import template +from ansible import utils +from ansible import errors +from ansible.playbook.task import Task +from ansible.module_utils.splitter import split_args, unquote +import ansible.constants as C +import pipes +import shlex +import os +import sys +import uuid + + +class Play(object): + + _pb_common = [ + 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become', + 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts', + 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su', + 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt', + 'vault_password', + ] + + __slots__ = _pb_common + [ + '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir', + 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port', + 'role_vars', 'transport', 'vars_file_vars', + ] + + # to catch typos and so forth -- these are userland names + # and don't line up 1:1 with how they are stored + VALID_KEYS = frozenset(_pb_common + [ + 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks', + 'pre_tasks', 'role_names', 'tasks', 'user', + ]) + + # ************************************************* + + def __init__(self, playbook, ds, basedir, vault_password=None): + ''' constructor loads from a play datastructure ''' + + for x in ds.keys(): + if not x in Play.VALID_KEYS: + raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x) + + # allow all playbook keys to be set by --extra-vars + self.vars = ds.get('vars', {}) + self.vars_prompt = ds.get('vars_prompt', {}) + self.playbook = playbook + self.vars = self._get_vars() + self.vars_file_vars = dict() # these are vars read in from vars_files: + self.role_vars = dict() # these are vars read in from vars/main.yml files in roles + self.basedir = basedir + self.roles = ds.get('roles', None) + self.tags = ds.get('tags', None) + self.vault_password = vault_password + self.environment = ds.get('environment', {}) + + if self.tags is None: + self.tags = [] + elif type(self.tags) in [ str, unicode ]: + self.tags = self.tags.split(",") + elif type(self.tags) != list: + self.tags = [] + + # make sure we have some special internal variables set, which + # we use later when loading tasks and handlers + load_vars = dict() + load_vars['playbook_dir'] = os.path.abspath(self.basedir) + if self.playbook.inventory.basedir() is not None: + load_vars['inventory_dir'] = self.playbook.inventory.basedir() + if self.playbook.inventory.src() is not None: + load_vars['inventory_file'] = self.playbook.inventory.src() + + # We first load the vars files from the datastructure + # so we have the default variables to pass into the roles + self.vars_files = ds.get('vars_files', []) + if not isinstance(self.vars_files, list): + raise errors.AnsibleError('vars_files must be a list') + processed_vars_files = self._update_vars_files_for_host(None) + + # now we load the roles into the datastructure + self.included_roles = [] + ds = self._load_roles(self.roles, ds) + + # and finally re-process the vars files as they may have been updated + # by the included roles, but exclude any which have been processed + self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files) + if not isinstance(self.vars_files, list): + raise errors.AnsibleError('vars_files must be a list') + + self._update_vars_files_for_host(None) + + # template everything to be efficient, but do not pre-mature template + # tasks/handlers as they may have inventory scope overrides. We also + # create a set of temporary variables for templating, so we don't + # trample on the existing vars structures + _tasks = ds.pop('tasks', []) + _handlers = ds.pop('handlers', []) + + temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) + temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars) + + try: + ds = template(basedir, ds, temp_vars) + except errors.AnsibleError, e: + utils.warning("non fatal error while trying to template play variables: %s" % (str(e))) + + ds['tasks'] = _tasks + ds['handlers'] = _handlers + + self._ds = ds + + hosts = ds.get('hosts') + if hosts is None: + raise errors.AnsibleError('hosts declaration is required') + elif isinstance(hosts, list): + try: + hosts = ';'.join(hosts) + except TypeError,e: + raise errors.AnsibleError('improper host declaration: %s' % str(e)) + + self.serial = str(ds.get('serial', 0)) + self.hosts = hosts + self.name = ds.get('name', self.hosts) + self._tasks = ds.get('tasks', []) + self._handlers = ds.get('handlers', []) + self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user)) + self.remote_port = ds.get('port', self.playbook.remote_port) + self.transport = ds.get('connection', self.playbook.transport) + self.remote_port = self.remote_port + self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false')) + self.accelerate = utils.boolean(ds.get('accelerate', 'false')) + self.accelerate_port = ds.get('accelerate_port', None) + self.accelerate_ipv6 = ds.get('accelerate_ipv6', False) + self.max_fail_pct = int(ds.get('max_fail_percentage', 100)) + self.no_log = utils.boolean(ds.get('no_log', 'false')) + self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers)) + + # Fail out if user specifies conflicting privilege escalations + if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')): + raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together') + if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')): + raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together') + if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')): + raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together') + + # become settings are inherited and updated normally + self.become = ds.get('become', self.playbook.become) + self.become_method = ds.get('become_method', self.playbook.become_method) + self.become_user = ds.get('become_user', self.playbook.become_user) + + # Make sure current play settings are reflected in become fields + if 'sudo' in ds: + self.become=ds['sudo'] + self.become_method='sudo' + if 'sudo_user' in ds: + self.become_user=ds['sudo_user'] + elif 'su' in ds: + self.become=True + self.become=ds['su'] + self.become_method='su' + if 'su_user' in ds: + self.become_user=ds['su_user'] + + # gather_facts is not a simple boolean, as None means that a 'smart' + # fact gathering mode will be used, so we need to be careful here as + # calling utils.boolean(None) returns False + self.gather_facts = ds.get('gather_facts', None) + if self.gather_facts is not None: + self.gather_facts = utils.boolean(self.gather_facts) + + load_vars['role_names'] = ds.get('role_names', []) + + self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars) + self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars) + + # apply any missing tags to role tasks + self._late_merge_role_tags() + + # place holder for the discovered hosts to be used in this play + self._play_hosts = None + + # ************************************************* + + def _get_role_path(self, role): + """ + Returns the path on disk to the directory containing + the role directories like tasks, templates, etc. Also + returns any variables that were included with the role + """ + orig_path = template(self.basedir,role,self.vars) + + role_vars = {} + if type(orig_path) == dict: + # what, not a path? + role_name = orig_path.get('role', None) + if role_name is None: + raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path) + role_vars = orig_path + else: + role_name = utils.role_spec_parse(orig_path)["name"] + + role_path = None + + possible_paths = [ + utils.path_dwim(self.basedir, os.path.join('roles', role_name)), + utils.path_dwim(self.basedir, role_name) + ] + + if C.DEFAULT_ROLES_PATH: + search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep) + for loc in search_locations: + loc = os.path.expanduser(loc) + possible_paths.append(utils.path_dwim(loc, role_name)) + + for path_option in possible_paths: + if os.path.isdir(path_option): + role_path = path_option + break + + if role_path is None: + raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths)) + + return (role_path, role_vars) + + def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0): + # this number is arbitrary, but it seems sane + if level > 20: + raise errors.AnsibleError("too many levels of recursion while resolving role dependencies") + for role in roles: + role_path,role_vars = self._get_role_path(role) + + # save just the role params for this role, which exclude the special + # keywords 'role', 'tags', and 'when'. + role_params = role_vars.copy() + for item in ('role', 'tags', 'when'): + if item in role_params: + del role_params[item] + + role_vars = utils.combine_vars(passed_vars, role_vars) + + vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))) + vars_data = {} + if os.path.isfile(vars): + vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password) + if vars_data: + if not isinstance(vars_data, dict): + raise errors.AnsibleError("vars from '%s' are not a dict" % vars) + role_vars = utils.combine_vars(vars_data, role_vars) + + defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))) + defaults_data = {} + if os.path.isfile(defaults): + defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password) + + # the meta directory contains the yaml that should + # hold the list of dependencies (if any) + meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))) + if os.path.isfile(meta): + data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password) + if data: + dependencies = data.get('dependencies',[]) + if dependencies is None: + dependencies = [] + for dep in dependencies: + allow_dupes = False + (dep_path,dep_vars) = self._get_role_path(dep) + + # save the dep params, just as we did above + dep_params = dep_vars.copy() + for item in ('role', 'tags', 'when'): + if item in dep_params: + del dep_params[item] + + meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta'))) + if os.path.isfile(meta): + meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password) + if meta_data: + allow_dupes = utils.boolean(meta_data.get('allow_duplicates','')) + + # if any tags were specified as role/dep variables, merge + # them into the current dep_vars so they're passed on to any + # further dependencies too, and so we only have one place + # (dep_vars) to look for tags going forward + def __merge_tags(var_obj): + old_tags = dep_vars.get('tags', []) + if isinstance(old_tags, basestring): + old_tags = [old_tags, ] + if isinstance(var_obj, dict): + new_tags = var_obj.get('tags', []) + if isinstance(new_tags, basestring): + new_tags = [new_tags, ] + else: + new_tags = [] + return list(set(old_tags).union(set(new_tags))) + + dep_vars['tags'] = __merge_tags(role_vars) + dep_vars['tags'] = __merge_tags(passed_vars) + + # if tags are set from this role, merge them + # into the tags list for the dependent role + if "tags" in passed_vars: + for included_role_dep in dep_stack: + included_dep_name = included_role_dep[0] + included_dep_vars = included_role_dep[2] + if included_dep_name == dep: + if "tags" in included_dep_vars: + included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"]))) + else: + included_dep_vars["tags"] = passed_vars["tags"][:] + + dep_vars = utils.combine_vars(passed_vars, dep_vars) + dep_vars = utils.combine_vars(role_vars, dep_vars) + + vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars'))) + vars_data = {} + if os.path.isfile(vars): + vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password) + if vars_data: + dep_vars = utils.combine_vars(dep_vars, vars_data) + pass + + defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults'))) + dep_defaults_data = {} + if os.path.isfile(defaults): + dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password) + if 'role' in dep_vars: + del dep_vars['role'] + + if not allow_dupes: + if dep in self.included_roles: + # skip back to the top, since we don't want to + # do anything else with this role + continue + else: + self.included_roles.append(dep) + + def _merge_conditional(cur_conditionals, new_conditionals): + if isinstance(new_conditionals, (basestring, bool)): + cur_conditionals.append(new_conditionals) + elif isinstance(new_conditionals, list): + cur_conditionals.extend(new_conditionals) + + # pass along conditionals from roles to dep roles + passed_when = passed_vars.get('when') + role_when = role_vars.get('when') + dep_when = dep_vars.get('when') + + tmpcond = [] + _merge_conditional(tmpcond, passed_when) + _merge_conditional(tmpcond, role_when) + _merge_conditional(tmpcond, dep_when) + + if len(tmpcond) > 0: + dep_vars['when'] = tmpcond + + self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1) + dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data]) + + # only add the current role when we're at the top level, + # otherwise we'll end up in a recursive loop + if level == 0: + self.included_roles.append(role) + dep_stack.append([role, role_path, role_vars, role_params, defaults_data]) + return dep_stack + + def _load_role_vars_files(self, vars_files): + # process variables stored in vars/main.yml files + role_vars = {} + for filename in vars_files: + if os.path.exists(filename): + new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password) + if new_vars: + if type(new_vars) != dict: + raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars))) + role_vars = utils.combine_vars(role_vars, new_vars) + + return role_vars + + def _load_role_defaults(self, defaults_files): + # process default variables + default_vars = {} + for filename in defaults_files: + if os.path.exists(filename): + new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password) + if new_default_vars: + if type(new_default_vars) != dict: + raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars))) + default_vars = utils.combine_vars(default_vars, new_default_vars) + + return default_vars + + def _load_roles(self, roles, ds): + # a role is a name that auto-includes the following if they exist + # /tasks/main.yml + # /handlers/main.yml + # /vars/main.yml + # /library + # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found + + if roles is None: + roles = [] + if type(roles) != list: + raise errors.AnsibleError("value of 'roles:' must be a list") + + new_tasks = [] + new_handlers = [] + role_vars_files = [] + defaults_files = [] + + pre_tasks = ds.get('pre_tasks', None) + if type(pre_tasks) != list: + pre_tasks = [] + for x in pre_tasks: + new_tasks.append(x) + + # flush handlers after pre_tasks + new_tasks.append(dict(meta='flush_handlers')) + + roles = self._build_role_dependencies(roles, [], {}) + + # give each role an uuid and + # make role_path available as variable to the task + for idx, val in enumerate(roles): + this_uuid = str(uuid.uuid4()) + roles[idx][-3]['role_uuid'] = this_uuid + roles[idx][-3]['role_path'] = roles[idx][1] + + role_names = [] + + for (role, role_path, role_vars, role_params, default_vars) in roles: + # special vars must be extracted from the dict to the included tasks + special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ] + special_vars = {} + for k in special_keys: + if k in role_vars: + special_vars[k] = role_vars[k] + + task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks')) + handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers')) + vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')) + meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')) + defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')) + + task = self._resolve_main(task_basepath) + handler = self._resolve_main(handler_basepath) + vars_file = self._resolve_main(vars_basepath) + meta_file = self._resolve_main(meta_basepath) + defaults_file = self._resolve_main(defaults_basepath) + + library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library')) + + missing = lambda f: not os.path.isfile(f) + if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library): + raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library)) + + if isinstance(role, dict): + role_name = role['role'] + else: + role_name = utils.role_spec_parse(role)["name"] + + role_names.append(role_name) + if os.path.isfile(task): + nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name) + for k in special_keys: + if k in special_vars: + nt[k] = special_vars[k] + new_tasks.append(nt) + if os.path.isfile(handler): + nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name) + for k in special_keys: + if k in special_vars: + nt[k] = special_vars[k] + new_handlers.append(nt) + if os.path.isfile(vars_file): + role_vars_files.append(vars_file) + if os.path.isfile(defaults_file): + defaults_files.append(defaults_file) + if os.path.isdir(library): + utils.plugins.module_finder.add_directory(library) + + tasks = ds.get('tasks', None) + post_tasks = ds.get('post_tasks', None) + handlers = ds.get('handlers', None) + vars_files = ds.get('vars_files', None) + + if type(tasks) != list: + tasks = [] + if type(handlers) != list: + handlers = [] + if type(vars_files) != list: + vars_files = [] + if type(post_tasks) != list: + post_tasks = [] + + new_tasks.extend(tasks) + # flush handlers after tasks + role tasks + new_tasks.append(dict(meta='flush_handlers')) + new_tasks.extend(post_tasks) + # flush handlers after post tasks + new_tasks.append(dict(meta='flush_handlers')) + + new_handlers.extend(handlers) + + ds['tasks'] = new_tasks + ds['handlers'] = new_handlers + ds['role_names'] = role_names + + self.role_vars = self._load_role_vars_files(role_vars_files) + self.default_vars = self._load_role_defaults(defaults_files) + + return ds + + # ************************************************* + + def _resolve_main(self, basepath): + ''' flexibly handle variations in main filenames ''' + # these filenames are acceptable: + mains = ( + os.path.join(basepath, 'main'), + os.path.join(basepath, 'main.yml'), + os.path.join(basepath, 'main.yaml'), + os.path.join(basepath, 'main.json'), + ) + if sum([os.path.isfile(x) for x in mains]) > 1: + raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath)) + else: + for m in mains: + if os.path.isfile(m): + return m # exactly one main file + return mains[0] # zero mains (we still need to return something) + + # ************************************************* + + def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None, + additional_conditions=None, original_file=None, role_name=None): + ''' handle task and handler include statements ''' + + results = [] + if tasks is None: + # support empty handler files, and the like. + tasks = [] + if additional_conditions is None: + additional_conditions = [] + if vars is None: + vars = {} + if role_params is None: + role_params = {} + if default_vars is None: + default_vars = {} + if become_vars is None: + become_vars = {} + + old_conditions = list(additional_conditions) + + for x in tasks: + + # prevent assigning the same conditions to each task on an include + included_additional_conditions = list(old_conditions) + + if not isinstance(x, dict): + raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file)) + + # evaluate privilege escalation vars for current and child tasks + included_become_vars = {} + for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]: + if k in x: + included_become_vars[k] = x[k] + elif k in become_vars: + included_become_vars[k] = become_vars[k] + x[k] = become_vars[k] + + task_vars = vars.copy() + if original_file: + task_vars['_original_file'] = original_file + + if 'meta' in x: + if x['meta'] == 'flush_handlers': + if role_name and 'role_name' not in x: + x['role_name'] = role_name + results.append(Task(self, x, module_vars=task_vars, role_name=role_name)) + continue + + if 'include' in x: + tokens = split_args(str(x['include'])) + included_additional_conditions = list(additional_conditions) + include_vars = {} + for k in x: + if k.startswith("with_"): + if original_file: + offender = " (in %s)" % original_file + else: + offender = "" + utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True) + elif k.startswith("when_"): + utils.deprecated("\"when_:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True) + elif k == 'when': + if isinstance(x[k], (basestring, bool)): + included_additional_conditions.append(x[k]) + elif type(x[k]) is list: + included_additional_conditions.extend(x[k]) + elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"): + continue + else: + include_vars[k] = x[k] + + # get any role parameters specified + role_params = x.get('role_params', {}) + + # get any role default variables specified + default_vars = x.get('default_vars', {}) + if not default_vars: + default_vars = self.default_vars + else: + default_vars = utils.combine_vars(self.default_vars, default_vars) + + # append the vars defined with the include (from above) + # as well as the old-style 'vars' element. The old-style + # vars are given higher precedence here (just in case) + task_vars = utils.combine_vars(task_vars, include_vars) + if 'vars' in x: + task_vars = utils.combine_vars(task_vars, x['vars']) + + new_role = None + if 'role_name' in x: + new_role = x['role_name'] + + mv = task_vars.copy() + for t in tokens[1:]: + (k,v) = t.split("=", 1) + v = unquote(v) + mv[k] = template(self.basedir, v, mv) + dirname = self.basedir + if original_file: + dirname = os.path.dirname(original_file) + + # temp vars are used here to avoid trampling on the existing vars structures + temp_vars = utils.combine_vars(self.vars, self.vars_file_vars) + temp_vars = utils.combine_vars(temp_vars, mv) + temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars) + include_file = template(dirname, tokens[0], temp_vars) + include_filename = utils.path_dwim(dirname, include_file) + + data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password) + if 'role_name' in x and data is not None: + for y in data: + if isinstance(y, dict) and 'include' in y: + y['role_name'] = new_role + loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role) + results += loaded + elif type(x) == dict: + task = Task( + self, x, + module_vars=task_vars, + play_vars=self.vars, + play_file_vars=self.vars_file_vars, + role_vars=self.role_vars, + role_params=role_params, + default_vars=default_vars, + additional_conditions=list(additional_conditions), + role_name=role_name + ) + results.append(task) + else: + raise Exception("unexpected task type") + + for x in results: + if self.tags is not None: + x.tags.extend(self.tags) + + return results + + # ************************************************* + + def tasks(self): + ''' return task objects for this play ''' + return self._tasks + + def handlers(self): + ''' return handler objects for this play ''' + return self._handlers + + # ************************************************* + + def _get_vars(self): + ''' load the vars section from a play, accounting for all sorts of variable features + including loading from yaml files, prompting, and conditional includes of the first + file found in a list. ''' + + if self.vars is None: + self.vars = {} + + if type(self.vars) not in [dict, list]: + raise errors.AnsibleError("'vars' section must contain only key/value pairs") + + vars = {} + + # translate a list of vars into a dict + if type(self.vars) == list: + for item in self.vars: + if getattr(item, 'items', None) is None: + raise errors.AnsibleError("expecting a key-value pair in 'vars' section") + k, v = item.items()[0] + vars[k] = v + else: + vars.update(self.vars) + + if type(self.vars_prompt) == list: + for var in self.vars_prompt: + if not 'name' in var: + raise errors.AnsibleError("'vars_prompt' item is missing 'name:'") + + vname = var['name'] + prompt = var.get("prompt", vname) + default = var.get("default", None) + private = var.get("private", True) + + confirm = var.get("confirm", False) + encrypt = var.get("encrypt", None) + salt_size = var.get("salt_size", None) + salt = var.get("salt", None) + + if vname not in self.playbook.extra_vars: + vars[vname] = self.playbook.callbacks.on_vars_prompt( + vname, private, prompt, encrypt, confirm, salt_size, salt, default + ) + + elif type(self.vars_prompt) == dict: + for (vname, prompt) in self.vars_prompt.iteritems(): + prompt_msg = "%s: " % prompt + if vname not in self.playbook.extra_vars: + vars[vname] = self.playbook.callbacks.on_vars_prompt( + varname=vname, private=False, prompt=prompt_msg, default=None + ) + + else: + raise errors.AnsibleError("'vars_prompt' section is malformed, see docs") + + if type(self.playbook.extra_vars) == dict: + vars = utils.combine_vars(vars, self.playbook.extra_vars) + + return vars + + # ************************************************* + + def update_vars_files(self, hosts, vault_password=None): + ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in ''' + + # now loop through all the hosts... + for h in hosts: + self._update_vars_files_for_host(h, vault_password=vault_password) + + # ************************************************* + + def compare_tags(self, tags): + ''' given a list of tags that the user has specified, return two lists: + matched_tags: tags were found within the current play and match those given + by the user + unmatched_tags: tags that were found within the current play but do not match + any provided by the user ''' + + # gather all the tags in all the tasks and handlers into one list + # FIXME: isn't this in self.tags already? + + all_tags = [] + for task in self._tasks: + if not task.meta: + all_tags.extend(task.tags) + for handler in self._handlers: + all_tags.extend(handler.tags) + + # compare the lists of tags using sets and return the matched and unmatched + all_tags_set = set(all_tags) + tags_set = set(tags) + + matched_tags = all_tags_set.intersection(tags_set) + unmatched_tags = all_tags_set.difference(tags_set) + + a = set(['always']) + u = set(['untagged']) + if 'always' in all_tags_set: + matched_tags = matched_tags.union(a) + unmatched_tags = all_tags_set.difference(a) + + if 'all' in tags_set: + matched_tags = matched_tags.union(all_tags_set) + unmatched_tags = set() + + if 'tagged' in tags_set: + matched_tags = all_tags_set.difference(u) + unmatched_tags = u + + if 'untagged' in tags_set and 'untagged' in all_tags_set: + matched_tags = matched_tags.union(u) + unmatched_tags = unmatched_tags.difference(u) + + return matched_tags, unmatched_tags + + # ************************************************* + + def _late_merge_role_tags(self): + # build a local dict of tags for roles + role_tags = {} + for task in self._ds['tasks']: + if 'role_name' in task: + this_role = task['role_name'] + "-" + task['vars']['role_uuid'] + + if this_role not in role_tags: + role_tags[this_role] = [] + + if 'tags' in task['vars']: + if isinstance(task['vars']['tags'], basestring): + role_tags[this_role] += shlex.split(task['vars']['tags']) + else: + role_tags[this_role] += task['vars']['tags'] + + # apply each role's tags to its tasks + for idx, val in enumerate(self._tasks): + if getattr(val, 'role_name', None) is not None: + this_role = val.role_name + "-" + val.module_vars['role_uuid'] + if this_role in role_tags: + self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role])) + + # ************************************************* + + def _update_vars_files_for_host(self, host, vault_password=None): + + def generate_filenames(host, inject, filename): + + """ Render the raw filename into 3 forms """ + + # filename2 is the templated version of the filename, which will + # be fully rendered if any variables contained within it are + # non-inventory related + filename2 = template(self.basedir, filename, self.vars) + + # filename3 is the same as filename2, but when the host object is + # available, inventory variables will be expanded as well since the + # name is templated with the injected variables + filename3 = filename2 + if host is not None: + filename3 = template(self.basedir, filename2, inject) + + # filename4 is the dwim'd path, but may also be mixed-scope, so we use + # both play scoped vars and host scoped vars to template the filepath + if utils.contains_vars(filename3) and host is not None: + inject.update(self.vars) + filename4 = template(self.basedir, filename3, inject) + filename4 = utils.path_dwim(self.basedir, filename4) + else: + filename4 = utils.path_dwim(self.basedir, filename3) + + return filename2, filename3, filename4 + + + def update_vars_cache(host, data, target_filename=None): + + """ update a host's varscache with new var data """ + + self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data) + if target_filename: + self.playbook.callbacks.on_import_for_host(host, target_filename) + + def process_files(filename, filename2, filename3, filename4, host=None): + + """ pseudo-algorithm for deciding where new vars should go """ + + data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password) + if data: + if type(data) != dict: + raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4) + if host is not None: + target_filename = None + if utils.contains_vars(filename2): + if not utils.contains_vars(filename3): + target_filename = filename3 + else: + target_filename = filename4 + update_vars_cache(host, data, target_filename=target_filename) + else: + self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data) + # we did process this file + return True + # we did not process this file + return False + + # Enforce that vars_files is always a list + if type(self.vars_files) != list: + self.vars_files = [ self.vars_files ] + + # Build an inject if this is a host run started by self.update_vars_files + if host is not None: + inject = {} + inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password)) + inject.update(self.playbook.SETUP_CACHE.get(host, {})) + inject.update(self.playbook.VARS_CACHE.get(host, {})) + else: + inject = None + + processed = [] + for filename in self.vars_files: + if type(filename) == list: + # loop over all filenames, loading the first one, and failing if none found + found = False + sequence = [] + for real_filename in filename: + filename2, filename3, filename4 = generate_filenames(host, inject, real_filename) + sequence.append(filename4) + if os.path.exists(filename4): + found = True + if process_files(filename, filename2, filename3, filename4, host=host): + processed.append(filename) + elif host is not None: + self.playbook.callbacks.on_not_import_for_host(host, filename4) + if found: + break + if not found and host is not None: + raise errors.AnsibleError( + "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence) + ) + else: + # just one filename supplied, load it! + filename2, filename3, filename4 = generate_filenames(host, inject, filename) + if utils.contains_vars(filename4): + continue + if process_files(filename, filename2, filename3, filename4, host=host): + processed.append(filename) + + return processed diff --git a/v1/ansible/playbook/task.py b/v1/ansible/playbook/task.py new file mode 100644 index 00000000000000..70c1bc8df6bb00 --- /dev/null +++ b/v1/ansible/playbook/task.py @@ -0,0 +1,346 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import errors +from ansible import utils +from ansible.module_utils.splitter import split_args +import os +import ansible.utils.template as template +import sys + +class Task(object): + + _t_common = [ + 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass', + 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when', + 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log', + 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user', + 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when', + ] + + __slots__ = [ + 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file', + 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars', + 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars', + ] + _t_common + + # to prevent typos and such + VALID_KEYS = frozenset([ + 'async', 'connection', 'include', 'poll', + ] + _t_common) + + def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None): + ''' constructor loads from a task or handler datastructure ''' + + # meta directives are used to tell things like ansible/playbook to run + # operations like handler execution. Meta tasks are not executed + # normally. + if 'meta' in ds: + self.meta = ds['meta'] + self.tags = [] + self.module_vars = module_vars + self.role_name = role_name + return + else: + self.meta = None + + + library = os.path.join(play.basedir, 'library') + if os.path.exists(library): + utils.plugins.module_finder.add_directory(library) + + for x in ds.keys(): + + # code to allow for saying "modulename: args" versus "action: modulename args" + if x in utils.plugins.module_finder: + + if 'action' in ds: + raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action']))) + if isinstance(ds[x], dict): + if 'args' in ds: + raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x])))) + ds['args'] = ds[x] + ds[x] = '' + elif ds[x] is None: + ds[x] = '' + if not isinstance(ds[x], basestring): + raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x]))) + ds['action'] = x + " " + ds[x] + ds.pop(x) + + # code to allow "with_glob" and to reference a lookup plugin named glob + elif x.startswith("with_"): + if isinstance(ds[x], basestring): + param = ds[x].strip() + + plugin_name = x.replace("with_","") + if plugin_name in utils.plugins.lookup_loader: + ds['items_lookup_plugin'] = plugin_name + ds['items_lookup_terms'] = ds[x] + ds.pop(x) + else: + raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name)) + + elif x in [ 'changed_when', 'failed_when', 'when']: + if isinstance(ds[x], basestring): + param = ds[x].strip() + # Only a variable, no logic + if (param.startswith('{{') and + param.find('}}') == len(ds[x]) - 2 and + param.find('|') == -1): + utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.") + elif x.startswith("when_"): + utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True) + + if 'when' in ds: + raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action']))) + when_name = x.replace("when_","") + ds['when'] = "%s %s" % (when_name, ds[x]) + ds.pop(x) + elif not x in Task.VALID_KEYS: + raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x) + + self.module_vars = module_vars + self.play_vars = play_vars + self.play_file_vars = play_file_vars + self.role_vars = role_vars + self.role_params = role_params + self.default_vars = default_vars + self.play = play + + # load various attributes + self.name = ds.get('name', None) + self.tags = [ 'untagged' ] + self.register = ds.get('register', None) + self.environment = ds.get('environment', play.environment) + self.role_name = role_name + self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log + self.run_once = utils.boolean(ds.get('run_once', 'false')) + + #Code to allow do until feature in a Task + if 'until' in ds: + if not ds.get('register'): + raise errors.AnsibleError("register keyword is mandatory when using do until feature") + self.module_vars['delay'] = ds.get('delay', 5) + self.module_vars['retries'] = ds.get('retries', 3) + self.module_vars['register'] = ds.get('register', None) + self.until = ds.get('until') + self.module_vars['until'] = self.until + + # rather than simple key=value args on the options line, these represent structured data and the values + # can be hashes and lists, not just scalars + self.args = ds.get('args', {}) + + # get remote_user for task, then play, then playbook + if ds.get('remote_user') is not None: + self.remote_user = ds.get('remote_user') + elif ds.get('remote_user', play.remote_user) is not None: + self.remote_user = ds.get('remote_user', play.remote_user) + else: + self.remote_user = ds.get('remote_user', play.playbook.remote_user) + + # Fail out if user specifies privilege escalation params in conflict + if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')): + raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) + + if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): + raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name) + + if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')): + raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name) + + self.become = utils.boolean(ds.get('become', play.become)) + self.become_method = ds.get('become_method', play.become_method) + self.become_user = ds.get('become_user', play.become_user) + self.become_pass = ds.get('become_pass', play.playbook.become_pass) + + # set only if passed in current task data + if 'sudo' in ds or 'sudo_user' in ds: + self.become_method='sudo' + + if 'sudo' in ds: + self.become=ds['sudo'] + del ds['sudo'] + else: + self.become=True + if 'sudo_user' in ds: + self.become_user = ds['sudo_user'] + del ds['sudo_user'] + if 'sudo_pass' in ds: + self.become_pass = ds['sudo_pass'] + del ds['sudo_pass'] + + elif 'su' in ds or 'su_user' in ds: + self.become_method='su' + + if 'su' in ds: + self.become=ds['su'] + else: + self.become=True + del ds['su'] + if 'su_user' in ds: + self.become_user = ds['su_user'] + del ds['su_user'] + if 'su_pass' in ds: + self.become_pass = ds['su_pass'] + del ds['su_pass'] + + # Both are defined + if ('action' in ds) and ('local_action' in ds): + raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together") + # Both are NOT defined + elif (not 'action' in ds) and (not 'local_action' in ds): + raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '')) + # Only one of them is defined + elif 'local_action' in ds: + self.action = ds.get('local_action', '') + self.delegate_to = '127.0.0.1' + else: + self.action = ds.get('action', '') + self.delegate_to = ds.get('delegate_to', None) + self.transport = ds.get('connection', ds.get('transport', play.transport)) + + if isinstance(self.action, dict): + if 'module' not in self.action: + raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action)) + if self.args: + raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action)) + self.args = self.action + self.action = self.args.pop('module') + + # delegate_to can use variables + if not (self.delegate_to is None): + # delegate_to: localhost should use local transport + if self.delegate_to in ['127.0.0.1', 'localhost']: + self.transport = 'local' + + # notified by is used by Playbook code to flag which hosts + # need to run a notifier + self.notified_by = [] + + # if no name is specified, use the action line as the name + if self.name is None: + self.name = self.action + + # load various attributes + self.when = ds.get('when', None) + self.changed_when = ds.get('changed_when', None) + self.failed_when = ds.get('failed_when', None) + + # combine the default and module vars here for use in templating + all_vars = self.default_vars.copy() + all_vars = utils.combine_vars(all_vars, self.play_vars) + all_vars = utils.combine_vars(all_vars, self.play_file_vars) + all_vars = utils.combine_vars(all_vars, self.role_vars) + all_vars = utils.combine_vars(all_vars, self.module_vars) + all_vars = utils.combine_vars(all_vars, self.role_params) + + self.async_seconds = ds.get('async', 0) # not async by default + self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars) + self.async_seconds = int(self.async_seconds) + self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds + self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars) + self.async_poll_interval = int(self.async_poll_interval) + self.notify = ds.get('notify', []) + self.first_available_file = ds.get('first_available_file', None) + + self.items_lookup_plugin = ds.get('items_lookup_plugin', None) + self.items_lookup_terms = ds.get('items_lookup_terms', None) + + + self.ignore_errors = ds.get('ignore_errors', False) + self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal) + + self.always_run = ds.get('always_run', False) + + # action should be a string + if not isinstance(self.action, basestring): + raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name)) + + # notify can be a string or a list, store as a list + if isinstance(self.notify, basestring): + self.notify = [ self.notify ] + + # split the action line into a module name + arguments + try: + tokens = split_args(self.action) + except Exception, e: + if "unbalanced" in str(e): + raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \ + "Make sure quotes are matched or escaped properly") + else: + raise + if len(tokens) < 1: + raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name) + self.module_name = tokens[0] + self.module_args = '' + if len(tokens) > 1: + self.module_args = " ".join(tokens[1:]) + + import_tags = self.module_vars.get('tags',[]) + if type(import_tags) in [int,float]: + import_tags = str(import_tags) + elif type(import_tags) in [str,unicode]: + # allow the user to list comma delimited tags + import_tags = import_tags.split(",") + + # handle mutually incompatible options + incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ] + if len(incompatibles) > 1: + raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task") + + # make first_available_file accessible to Runner code + if self.first_available_file: + self.module_vars['first_available_file'] = self.first_available_file + # make sure that the 'item' variable is set when using + # first_available_file (issue #8220) + if 'item' not in self.module_vars: + self.module_vars['item'] = '' + + if self.items_lookup_plugin is not None: + self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin + self.module_vars['items_lookup_terms'] = self.items_lookup_terms + + # allow runner to see delegate_to option + self.module_vars['delegate_to'] = self.delegate_to + + # make some task attributes accessible to Runner code + self.module_vars['ignore_errors'] = self.ignore_errors + self.module_vars['register'] = self.register + self.module_vars['changed_when'] = self.changed_when + self.module_vars['failed_when'] = self.failed_when + self.module_vars['always_run'] = self.always_run + + # tags allow certain parts of a playbook to be run without running the whole playbook + apply_tags = ds.get('tags', None) + if apply_tags is not None: + if type(apply_tags) in [ str, unicode ]: + self.tags.append(apply_tags) + elif type(apply_tags) in [ int, float ]: + self.tags.append(str(apply_tags)) + elif type(apply_tags) == list: + self.tags.extend(apply_tags) + self.tags.extend(import_tags) + + if len(self.tags) > 1: + self.tags.remove('untagged') + + if additional_conditions: + new_conditions = additional_conditions[:] + if self.when: + new_conditions.append(self.when) + self.when = new_conditions diff --git a/lib/ansible/runner/__init__.py b/v1/ansible/runner/__init__.py similarity index 100% rename from lib/ansible/runner/__init__.py rename to v1/ansible/runner/__init__.py diff --git a/lib/ansible/runner/lookup_plugins/__init__.py b/v1/ansible/runner/action_plugins/__init__.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/__init__.py rename to v1/ansible/runner/action_plugins/__init__.py diff --git a/lib/ansible/runner/action_plugins/add_host.py b/v1/ansible/runner/action_plugins/add_host.py similarity index 100% rename from lib/ansible/runner/action_plugins/add_host.py rename to v1/ansible/runner/action_plugins/add_host.py diff --git a/lib/ansible/runner/action_plugins/assemble.py b/v1/ansible/runner/action_plugins/assemble.py similarity index 100% rename from lib/ansible/runner/action_plugins/assemble.py rename to v1/ansible/runner/action_plugins/assemble.py diff --git a/lib/ansible/runner/action_plugins/assert.py b/v1/ansible/runner/action_plugins/assert.py similarity index 100% rename from lib/ansible/runner/action_plugins/assert.py rename to v1/ansible/runner/action_plugins/assert.py diff --git a/lib/ansible/runner/action_plugins/async.py b/v1/ansible/runner/action_plugins/async.py similarity index 100% rename from lib/ansible/runner/action_plugins/async.py rename to v1/ansible/runner/action_plugins/async.py diff --git a/lib/ansible/runner/action_plugins/copy.py b/v1/ansible/runner/action_plugins/copy.py similarity index 100% rename from lib/ansible/runner/action_plugins/copy.py rename to v1/ansible/runner/action_plugins/copy.py diff --git a/lib/ansible/runner/action_plugins/debug.py b/v1/ansible/runner/action_plugins/debug.py similarity index 100% rename from lib/ansible/runner/action_plugins/debug.py rename to v1/ansible/runner/action_plugins/debug.py diff --git a/lib/ansible/runner/action_plugins/fail.py b/v1/ansible/runner/action_plugins/fail.py similarity index 100% rename from lib/ansible/runner/action_plugins/fail.py rename to v1/ansible/runner/action_plugins/fail.py diff --git a/lib/ansible/runner/action_plugins/fetch.py b/v1/ansible/runner/action_plugins/fetch.py similarity index 100% rename from lib/ansible/runner/action_plugins/fetch.py rename to v1/ansible/runner/action_plugins/fetch.py diff --git a/lib/ansible/runner/action_plugins/group_by.py b/v1/ansible/runner/action_plugins/group_by.py similarity index 100% rename from lib/ansible/runner/action_plugins/group_by.py rename to v1/ansible/runner/action_plugins/group_by.py diff --git a/lib/ansible/runner/action_plugins/include_vars.py b/v1/ansible/runner/action_plugins/include_vars.py similarity index 100% rename from lib/ansible/runner/action_plugins/include_vars.py rename to v1/ansible/runner/action_plugins/include_vars.py diff --git a/lib/ansible/runner/action_plugins/normal.py b/v1/ansible/runner/action_plugins/normal.py similarity index 100% rename from lib/ansible/runner/action_plugins/normal.py rename to v1/ansible/runner/action_plugins/normal.py diff --git a/lib/ansible/runner/action_plugins/patch.py b/v1/ansible/runner/action_plugins/patch.py similarity index 100% rename from lib/ansible/runner/action_plugins/patch.py rename to v1/ansible/runner/action_plugins/patch.py diff --git a/lib/ansible/runner/action_plugins/pause.py b/v1/ansible/runner/action_plugins/pause.py similarity index 100% rename from lib/ansible/runner/action_plugins/pause.py rename to v1/ansible/runner/action_plugins/pause.py diff --git a/lib/ansible/runner/action_plugins/raw.py b/v1/ansible/runner/action_plugins/raw.py similarity index 100% rename from lib/ansible/runner/action_plugins/raw.py rename to v1/ansible/runner/action_plugins/raw.py diff --git a/lib/ansible/runner/action_plugins/script.py b/v1/ansible/runner/action_plugins/script.py similarity index 100% rename from lib/ansible/runner/action_plugins/script.py rename to v1/ansible/runner/action_plugins/script.py diff --git a/lib/ansible/runner/action_plugins/set_fact.py b/v1/ansible/runner/action_plugins/set_fact.py similarity index 100% rename from lib/ansible/runner/action_plugins/set_fact.py rename to v1/ansible/runner/action_plugins/set_fact.py diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/v1/ansible/runner/action_plugins/synchronize.py similarity index 100% rename from lib/ansible/runner/action_plugins/synchronize.py rename to v1/ansible/runner/action_plugins/synchronize.py diff --git a/lib/ansible/runner/action_plugins/template.py b/v1/ansible/runner/action_plugins/template.py similarity index 100% rename from lib/ansible/runner/action_plugins/template.py rename to v1/ansible/runner/action_plugins/template.py diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/v1/ansible/runner/action_plugins/unarchive.py similarity index 100% rename from lib/ansible/runner/action_plugins/unarchive.py rename to v1/ansible/runner/action_plugins/unarchive.py diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/v1/ansible/runner/action_plugins/win_copy.py similarity index 100% rename from lib/ansible/runner/action_plugins/win_copy.py rename to v1/ansible/runner/action_plugins/win_copy.py diff --git a/lib/ansible/runner/action_plugins/win_template.py b/v1/ansible/runner/action_plugins/win_template.py similarity index 100% rename from lib/ansible/runner/action_plugins/win_template.py rename to v1/ansible/runner/action_plugins/win_template.py diff --git a/lib/ansible/runner/connection.py b/v1/ansible/runner/connection.py similarity index 100% rename from lib/ansible/runner/connection.py rename to v1/ansible/runner/connection.py diff --git a/lib/ansible/runner/shell_plugins/__init__.py b/v1/ansible/runner/connection_plugins/__init__.py similarity index 100% rename from lib/ansible/runner/shell_plugins/__init__.py rename to v1/ansible/runner/connection_plugins/__init__.py diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/v1/ansible/runner/connection_plugins/accelerate.py similarity index 100% rename from lib/ansible/runner/connection_plugins/accelerate.py rename to v1/ansible/runner/connection_plugins/accelerate.py diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/v1/ansible/runner/connection_plugins/chroot.py similarity index 100% rename from lib/ansible/runner/connection_plugins/chroot.py rename to v1/ansible/runner/connection_plugins/chroot.py diff --git a/lib/ansible/runner/connection_plugins/fireball.py b/v1/ansible/runner/connection_plugins/fireball.py similarity index 100% rename from lib/ansible/runner/connection_plugins/fireball.py rename to v1/ansible/runner/connection_plugins/fireball.py diff --git a/lib/ansible/runner/connection_plugins/funcd.py b/v1/ansible/runner/connection_plugins/funcd.py similarity index 100% rename from lib/ansible/runner/connection_plugins/funcd.py rename to v1/ansible/runner/connection_plugins/funcd.py diff --git a/lib/ansible/runner/connection_plugins/jail.py b/v1/ansible/runner/connection_plugins/jail.py similarity index 100% rename from lib/ansible/runner/connection_plugins/jail.py rename to v1/ansible/runner/connection_plugins/jail.py diff --git a/lib/ansible/runner/connection_plugins/libvirt_lxc.py b/v1/ansible/runner/connection_plugins/libvirt_lxc.py similarity index 100% rename from lib/ansible/runner/connection_plugins/libvirt_lxc.py rename to v1/ansible/runner/connection_plugins/libvirt_lxc.py diff --git a/lib/ansible/runner/connection_plugins/local.py b/v1/ansible/runner/connection_plugins/local.py similarity index 100% rename from lib/ansible/runner/connection_plugins/local.py rename to v1/ansible/runner/connection_plugins/local.py diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/v1/ansible/runner/connection_plugins/paramiko_ssh.py similarity index 100% rename from lib/ansible/runner/connection_plugins/paramiko_ssh.py rename to v1/ansible/runner/connection_plugins/paramiko_ssh.py diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/v1/ansible/runner/connection_plugins/ssh.py similarity index 100% rename from lib/ansible/runner/connection_plugins/ssh.py rename to v1/ansible/runner/connection_plugins/ssh.py diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/v1/ansible/runner/connection_plugins/winrm.py similarity index 100% rename from lib/ansible/runner/connection_plugins/winrm.py rename to v1/ansible/runner/connection_plugins/winrm.py diff --git a/lib/ansible/runner/connection_plugins/zone.py b/v1/ansible/runner/connection_plugins/zone.py similarity index 100% rename from lib/ansible/runner/connection_plugins/zone.py rename to v1/ansible/runner/connection_plugins/zone.py diff --git a/lib/ansible/utils/module_docs_fragments/__init__.py b/v1/ansible/runner/filter_plugins/__init__.py similarity index 100% rename from lib/ansible/utils/module_docs_fragments/__init__.py rename to v1/ansible/runner/filter_plugins/__init__.py diff --git a/lib/ansible/runner/filter_plugins/core.py b/v1/ansible/runner/filter_plugins/core.py similarity index 100% rename from lib/ansible/runner/filter_plugins/core.py rename to v1/ansible/runner/filter_plugins/core.py diff --git a/lib/ansible/runner/filter_plugins/ipaddr.py b/v1/ansible/runner/filter_plugins/ipaddr.py similarity index 100% rename from lib/ansible/runner/filter_plugins/ipaddr.py rename to v1/ansible/runner/filter_plugins/ipaddr.py diff --git a/lib/ansible/runner/filter_plugins/mathstuff.py b/v1/ansible/runner/filter_plugins/mathstuff.py similarity index 100% rename from lib/ansible/runner/filter_plugins/mathstuff.py rename to v1/ansible/runner/filter_plugins/mathstuff.py diff --git a/v2/ansible/inventory/vars_plugins/__init__.py b/v1/ansible/runner/lookup_plugins/__init__.py similarity index 100% rename from v2/ansible/inventory/vars_plugins/__init__.py rename to v1/ansible/runner/lookup_plugins/__init__.py diff --git a/lib/ansible/runner/lookup_plugins/cartesian.py b/v1/ansible/runner/lookup_plugins/cartesian.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/cartesian.py rename to v1/ansible/runner/lookup_plugins/cartesian.py diff --git a/lib/ansible/runner/lookup_plugins/consul_kv.py b/v1/ansible/runner/lookup_plugins/consul_kv.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/consul_kv.py rename to v1/ansible/runner/lookup_plugins/consul_kv.py diff --git a/lib/ansible/runner/lookup_plugins/csvfile.py b/v1/ansible/runner/lookup_plugins/csvfile.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/csvfile.py rename to v1/ansible/runner/lookup_plugins/csvfile.py diff --git a/lib/ansible/runner/lookup_plugins/dict.py b/v1/ansible/runner/lookup_plugins/dict.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/dict.py rename to v1/ansible/runner/lookup_plugins/dict.py diff --git a/lib/ansible/runner/lookup_plugins/dig.py b/v1/ansible/runner/lookup_plugins/dig.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/dig.py rename to v1/ansible/runner/lookup_plugins/dig.py diff --git a/lib/ansible/runner/lookup_plugins/dnstxt.py b/v1/ansible/runner/lookup_plugins/dnstxt.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/dnstxt.py rename to v1/ansible/runner/lookup_plugins/dnstxt.py diff --git a/lib/ansible/runner/lookup_plugins/env.py b/v1/ansible/runner/lookup_plugins/env.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/env.py rename to v1/ansible/runner/lookup_plugins/env.py diff --git a/lib/ansible/runner/lookup_plugins/etcd.py b/v1/ansible/runner/lookup_plugins/etcd.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/etcd.py rename to v1/ansible/runner/lookup_plugins/etcd.py diff --git a/lib/ansible/runner/lookup_plugins/file.py b/v1/ansible/runner/lookup_plugins/file.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/file.py rename to v1/ansible/runner/lookup_plugins/file.py diff --git a/lib/ansible/runner/lookup_plugins/fileglob.py b/v1/ansible/runner/lookup_plugins/fileglob.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/fileglob.py rename to v1/ansible/runner/lookup_plugins/fileglob.py diff --git a/lib/ansible/runner/lookup_plugins/first_found.py b/v1/ansible/runner/lookup_plugins/first_found.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/first_found.py rename to v1/ansible/runner/lookup_plugins/first_found.py diff --git a/lib/ansible/runner/lookup_plugins/flattened.py b/v1/ansible/runner/lookup_plugins/flattened.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/flattened.py rename to v1/ansible/runner/lookup_plugins/flattened.py diff --git a/lib/ansible/runner/lookup_plugins/indexed_items.py b/v1/ansible/runner/lookup_plugins/indexed_items.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/indexed_items.py rename to v1/ansible/runner/lookup_plugins/indexed_items.py diff --git a/lib/ansible/runner/lookup_plugins/inventory_hostnames.py b/v1/ansible/runner/lookup_plugins/inventory_hostnames.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/inventory_hostnames.py rename to v1/ansible/runner/lookup_plugins/inventory_hostnames.py diff --git a/lib/ansible/runner/lookup_plugins/items.py b/v1/ansible/runner/lookup_plugins/items.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/items.py rename to v1/ansible/runner/lookup_plugins/items.py diff --git a/lib/ansible/runner/lookup_plugins/lines.py b/v1/ansible/runner/lookup_plugins/lines.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/lines.py rename to v1/ansible/runner/lookup_plugins/lines.py diff --git a/lib/ansible/runner/lookup_plugins/nested.py b/v1/ansible/runner/lookup_plugins/nested.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/nested.py rename to v1/ansible/runner/lookup_plugins/nested.py diff --git a/lib/ansible/runner/lookup_plugins/password.py b/v1/ansible/runner/lookup_plugins/password.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/password.py rename to v1/ansible/runner/lookup_plugins/password.py diff --git a/lib/ansible/runner/lookup_plugins/pipe.py b/v1/ansible/runner/lookup_plugins/pipe.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/pipe.py rename to v1/ansible/runner/lookup_plugins/pipe.py diff --git a/lib/ansible/runner/lookup_plugins/random_choice.py b/v1/ansible/runner/lookup_plugins/random_choice.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/random_choice.py rename to v1/ansible/runner/lookup_plugins/random_choice.py diff --git a/lib/ansible/runner/lookup_plugins/redis_kv.py b/v1/ansible/runner/lookup_plugins/redis_kv.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/redis_kv.py rename to v1/ansible/runner/lookup_plugins/redis_kv.py diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/v1/ansible/runner/lookup_plugins/sequence.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/sequence.py rename to v1/ansible/runner/lookup_plugins/sequence.py diff --git a/lib/ansible/runner/lookup_plugins/subelements.py b/v1/ansible/runner/lookup_plugins/subelements.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/subelements.py rename to v1/ansible/runner/lookup_plugins/subelements.py diff --git a/lib/ansible/runner/lookup_plugins/template.py b/v1/ansible/runner/lookup_plugins/template.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/template.py rename to v1/ansible/runner/lookup_plugins/template.py diff --git a/lib/ansible/runner/lookup_plugins/together.py b/v1/ansible/runner/lookup_plugins/together.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/together.py rename to v1/ansible/runner/lookup_plugins/together.py diff --git a/lib/ansible/runner/lookup_plugins/url.py b/v1/ansible/runner/lookup_plugins/url.py similarity index 100% rename from lib/ansible/runner/lookup_plugins/url.py rename to v1/ansible/runner/lookup_plugins/url.py diff --git a/lib/ansible/runner/poller.py b/v1/ansible/runner/poller.py similarity index 100% rename from lib/ansible/runner/poller.py rename to v1/ansible/runner/poller.py diff --git a/lib/ansible/runner/return_data.py b/v1/ansible/runner/return_data.py similarity index 100% rename from lib/ansible/runner/return_data.py rename to v1/ansible/runner/return_data.py diff --git a/v2/test/parsing/yaml/__init__.py b/v1/ansible/runner/shell_plugins/__init__.py similarity index 100% rename from v2/test/parsing/yaml/__init__.py rename to v1/ansible/runner/shell_plugins/__init__.py diff --git a/lib/ansible/runner/shell_plugins/csh.py b/v1/ansible/runner/shell_plugins/csh.py similarity index 100% rename from lib/ansible/runner/shell_plugins/csh.py rename to v1/ansible/runner/shell_plugins/csh.py diff --git a/lib/ansible/runner/shell_plugins/fish.py b/v1/ansible/runner/shell_plugins/fish.py similarity index 100% rename from lib/ansible/runner/shell_plugins/fish.py rename to v1/ansible/runner/shell_plugins/fish.py diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/v1/ansible/runner/shell_plugins/powershell.py similarity index 100% rename from lib/ansible/runner/shell_plugins/powershell.py rename to v1/ansible/runner/shell_plugins/powershell.py diff --git a/lib/ansible/runner/shell_plugins/sh.py b/v1/ansible/runner/shell_plugins/sh.py similarity index 100% rename from lib/ansible/runner/shell_plugins/sh.py rename to v1/ansible/runner/shell_plugins/sh.py diff --git a/v1/ansible/utils/__init__.py b/v1/ansible/utils/__init__.py new file mode 100644 index 00000000000000..7ed07a54c840d3 --- /dev/null +++ b/v1/ansible/utils/__init__.py @@ -0,0 +1,1660 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import errno +import sys +import re +import os +import shlex +import yaml +import copy +import optparse +import operator +from ansible import errors +from ansible import __version__ +from ansible.utils.display_functions import * +from ansible.utils.plugins import * +from ansible.utils.su_prompts import * +from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s +from ansible.callbacks import display +from ansible.module_utils.splitter import split_args, unquote +from ansible.module_utils.basic import heuristic_log_sanitize +from ansible.utils.unicode import to_bytes, to_unicode +import ansible.constants as C +import ast +import time +import StringIO +import stat +import termios +import tty +import pipes +import random +import difflib +import warnings +import traceback +import getpass +import sys +import subprocess +import contextlib + +from vault import VaultLib + +VERBOSITY=0 + +MAX_FILE_SIZE_FOR_DIFF=1*1024*1024 + +# caching the compilation of the regex used +# to check for lookup calls within data +LOOKUP_REGEX = re.compile(r'lookup\s*\(') +PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})') +CODE_REGEX = re.compile(r'(?:{%|%})') + + +try: + # simplejson can be much faster if it's available + import simplejson as json +except ImportError: + import json + +try: + from yaml import CSafeLoader as Loader +except ImportError: + from yaml import SafeLoader as Loader + +PASSLIB_AVAILABLE = False +try: + import passlib.hash + PASSLIB_AVAILABLE = True +except: + pass + +try: + import builtin +except ImportError: + import __builtin__ as builtin + +KEYCZAR_AVAILABLE=False +try: + try: + # some versions of pycrypto may not have this? + from Crypto.pct_warnings import PowmInsecureWarning + except ImportError: + PowmInsecureWarning = RuntimeWarning + + with warnings.catch_warnings(record=True) as warning_handler: + warnings.simplefilter("error", PowmInsecureWarning) + try: + import keyczar.errors as key_errors + from keyczar.keys import AesKey + except PowmInsecureWarning: + system_warning( + "The version of gmp you have installed has a known issue regarding " + \ + "timing vulnerabilities when used with pycrypto. " + \ + "If possible, you should update it (i.e. yum update gmp)." + ) + warnings.resetwarnings() + warnings.simplefilter("ignore") + import keyczar.errors as key_errors + from keyczar.keys import AesKey + KEYCZAR_AVAILABLE=True +except ImportError: + pass + + +############################################################### +# Abstractions around keyczar +############################################################### + +def key_for_hostname(hostname): + # fireball mode is an implementation of ansible firing up zeromq via SSH + # to use no persistent daemons or key management + + if not KEYCZAR_AVAILABLE: + raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes") + + key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR) + if not os.path.exists(key_path): + os.makedirs(key_path, mode=0700) + os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8)) + elif not os.path.isdir(key_path): + raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.') + + if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8): + raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))) + + key_path = os.path.join(key_path, hostname) + + # use new AES keys every 2 hours, which means fireball must not allow running for longer either + if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2): + key = AesKey.Generate() + fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)) + fh = os.fdopen(fd, 'w') + fh.write(str(key)) + fh.close() + return key + else: + if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8): + raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path)) + fh = open(key_path) + key = AesKey.Read(fh.read()) + fh.close() + return key + +def encrypt(key, msg): + return key.Encrypt(msg) + +def decrypt(key, msg): + try: + return key.Decrypt(msg) + except key_errors.InvalidSignatureError: + raise errors.AnsibleError("decryption failed") + +############################################################### +# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS +############################################################### + +def read_vault_file(vault_password_file): + """Read a vault password from a file or if executable, execute the script and + retrieve password from STDOUT + """ + if vault_password_file: + this_path = os.path.realpath(os.path.expanduser(vault_password_file)) + if is_executable(this_path): + try: + # STDERR not captured to make it easier for users to prompt for input in their scripts + p = subprocess.Popen(this_path, stdout=subprocess.PIPE) + except OSError, e: + raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e)) + stdout, stderr = p.communicate() + vault_pass = stdout.strip('\r\n') + else: + try: + f = open(this_path, "rb") + vault_pass=f.read().strip() + f.close() + except (OSError, IOError), e: + raise errors.AnsibleError("Could not read %s: %s" % (this_path, e)) + + return vault_pass + else: + return None + +def err(msg): + ''' print an error message to stderr ''' + + print >> sys.stderr, msg + +def exit(msg, rc=1): + ''' quit with an error to stdout and a failure code ''' + + err(msg) + sys.exit(rc) + +def jsonify(result, format=False): + ''' format JSON output (uncompressed or uncompressed) ''' + + if result is None: + return "{}" + result2 = result.copy() + for key, value in result2.items(): + if type(value) is str: + result2[key] = value.decode('utf-8', 'ignore') + + indent = None + if format: + indent = 4 + + try: + return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False) + except UnicodeDecodeError: + return json.dumps(result2, sort_keys=True, indent=indent) + +def write_tree_file(tree, hostname, buf): + ''' write something into treedir/hostname ''' + + # TODO: might be nice to append playbook runs per host in a similar way + # in which case, we'd want append mode. + path = os.path.join(tree, hostname) + fd = open(path, "w+") + fd.write(buf) + fd.close() + +def is_failed(result): + ''' is a given JSON result a failed result? ''' + + return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true'])) + +def is_changed(result): + ''' is a given JSON result a changed result? ''' + + return (result.get('changed', False) in [ True, 'True', 'true']) + +def check_conditional(conditional, basedir, inject, fail_on_undefined=False): + from ansible.utils import template + + if conditional is None or conditional == '': + return True + + if isinstance(conditional, list): + for x in conditional: + if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined): + return False + return True + + if not isinstance(conditional, basestring): + return conditional + + conditional = conditional.replace("jinja2_compare ","") + # allow variable names + if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'): + conditional = to_unicode(inject[conditional], nonstring='simplerepr') + conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined) + original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","") + # a Jinja2 evaluation that results in something Python can eval! + presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional + conditional = template.template(basedir, presented, inject) + val = conditional.strip() + if val == presented: + # the templating failed, meaning most likely a + # variable was undefined. If we happened to be + # looking for an undefined variable, return True, + # otherwise fail + if "is undefined" in conditional: + return True + elif "is defined" in conditional: + return False + else: + raise errors.AnsibleError("error while evaluating conditional: %s" % original) + elif val == "True": + return True + elif val == "False": + return False + else: + raise errors.AnsibleError("unable to evaluate conditional: %s" % original) + +def is_executable(path): + '''is the given path executable?''' + return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] + or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] + or stat.S_IXOTH & os.stat(path)[stat.ST_MODE]) + +def unfrackpath(path): + ''' + returns a path that is free of symlinks, environment + variables, relative path traversals and symbols (~) + example: + '$HOME/../../var/mail' becomes '/var/spool/mail' + ''' + return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path)))) + +def prepare_writeable_dir(tree,mode=0777): + ''' make sure a directory exists and is writeable ''' + + # modify the mode to ensure the owner at least + # has read/write access to this directory + mode |= 0700 + + # make sure the tree path is always expanded + # and normalized and free of symlinks + tree = unfrackpath(tree) + + if not os.path.exists(tree): + try: + os.makedirs(tree, mode) + except (IOError, OSError), e: + raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e)) + if not os.access(tree, os.W_OK): + raise errors.AnsibleError("Cannot write to path %s" % tree) + return tree + +def path_dwim(basedir, given): + ''' + make relative paths work like folks expect. + ''' + + if given.startswith("'"): + given = given[1:-1] + + if given.startswith("/"): + return os.path.abspath(given) + elif given.startswith("~"): + return os.path.abspath(os.path.expanduser(given)) + else: + if basedir is None: + basedir = "." + return os.path.abspath(os.path.join(basedir, given)) + +def path_dwim_relative(original, dirname, source, playbook_base, check=True): + ''' find one file in a directory one level up in a dir named dirname relative to current ''' + # (used by roles code) + + from ansible.utils import template + + + basedir = os.path.dirname(original) + if os.path.islink(basedir): + basedir = unfrackpath(basedir) + template2 = os.path.join(basedir, dirname, source) + else: + template2 = os.path.join(basedir, '..', dirname, source) + source2 = path_dwim(basedir, template2) + if os.path.exists(source2): + return source2 + obvious_local_path = path_dwim(playbook_base, source) + if os.path.exists(obvious_local_path): + return obvious_local_path + if check: + raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path)) + return source2 # which does not exist + +def repo_url_to_role_name(repo_url): + # gets the role name out of a repo like + # http://git.example.com/repos/repo.git" => "repo" + + if '://' not in repo_url and '@' not in repo_url: + return repo_url + trailing_path = repo_url.split('/')[-1] + if trailing_path.endswith('.git'): + trailing_path = trailing_path[:-4] + if trailing_path.endswith('.tar.gz'): + trailing_path = trailing_path[:-7] + if ',' in trailing_path: + trailing_path = trailing_path.split(',')[0] + return trailing_path + + +def role_spec_parse(role_spec): + # takes a repo and a version like + # git+http://git.example.com/repos/repo.git,v1.0 + # and returns a list of properties such as: + # { + # 'scm': 'git', + # 'src': 'http://git.example.com/repos/repo.git', + # 'version': 'v1.0', + # 'name': 'repo' + # } + + role_spec = role_spec.strip() + role_version = '' + default_role_versions = dict(git='master', hg='tip') + if role_spec == "" or role_spec.startswith("#"): + return (None, None, None, None) + + tokens = [s.strip() for s in role_spec.split(',')] + + # assume https://github.com URLs are git+https:// URLs and not + # tarballs unless they end in '.zip' + if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'): + tokens[0] = 'git+' + tokens[0] + + if '+' in tokens[0]: + (scm, role_url) = tokens[0].split('+') + else: + scm = None + role_url = tokens[0] + if len(tokens) >= 2: + role_version = tokens[1] + if len(tokens) == 3: + role_name = tokens[2] + else: + role_name = repo_url_to_role_name(tokens[0]) + if scm and not role_version: + role_version = default_role_versions.get(scm, '') + return dict(scm=scm, src=role_url, version=role_version, name=role_name) + + +def role_yaml_parse(role): + if 'role' in role: + # Old style: {role: "galaxy.role,version,name", other_vars: "here" } + role_info = role_spec_parse(role['role']) + if isinstance(role_info, dict): + # Warning: Slight change in behaviour here. name may be being + # overloaded. Previously, name was only a parameter to the role. + # Now it is both a parameter to the role and the name that + # ansible-galaxy will install under on the local system. + if 'name' in role and 'name' in role_info: + del role_info['name'] + role.update(role_info) + else: + # New style: { src: 'galaxy.role,version,name', other_vars: "here" } + if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'): + role["src"] = "git+" + role["src"] + + if '+' in role["src"]: + (scm, src) = role["src"].split('+') + role["scm"] = scm + role["src"] = src + + if 'name' not in role: + role["name"] = repo_url_to_role_name(role["src"]) + + if 'version' not in role: + role['version'] = '' + + if 'scm' not in role: + role['scm'] = None + + return role + + +def json_loads(data): + ''' parse a JSON string and return a data structure ''' + try: + loaded = json.loads(data) + except ValueError,e: + raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e)) + + return loaded + +def _clean_data(orig_data, from_remote=False, from_inventory=False): + ''' remove jinja2 template tags from a string ''' + + if not isinstance(orig_data, basestring): + return orig_data + + # when the data is marked as having come from a remote, we always + # replace any print blocks (ie. {{var}}), however when marked as coming + # from inventory we only replace print blocks that contain a call to + # a lookup plugin (ie. {{lookup('foo','bar'))}}) + replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None) + + regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX + + with contextlib.closing(StringIO.StringIO(orig_data)) as data: + # these variables keep track of opening block locations, as we only + # want to replace matched pairs of print/block tags + print_openings = [] + block_openings = [] + for mo in regex.finditer(orig_data): + token = mo.group(0) + token_start = mo.start(0) + + if token[0] == '{': + if token == '{%': + block_openings.append(token_start) + elif token == '{{': + print_openings.append(token_start) + + elif token[1] == '}': + prev_idx = None + if token == '%}' and block_openings: + prev_idx = block_openings.pop() + elif token == '}}' and print_openings: + prev_idx = print_openings.pop() + + if prev_idx is not None: + # replace the opening + data.seek(prev_idx, os.SEEK_SET) + data.write('{#') + # replace the closing + data.seek(token_start, os.SEEK_SET) + data.write('#}') + + else: + assert False, 'Unhandled regex match' + + return data.getvalue() + +def _clean_data_struct(orig_data, from_remote=False, from_inventory=False): + ''' + walk a complex data structure, and use _clean_data() to + remove any template tags that may exist + ''' + if not from_remote and not from_inventory: + raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory") + if isinstance(orig_data, dict): + data = orig_data.copy() + for key in data: + new_key = _clean_data_struct(key, from_remote, from_inventory) + new_val = _clean_data_struct(data[key], from_remote, from_inventory) + if key != new_key: + del data[key] + data[new_key] = new_val + elif isinstance(orig_data, list): + data = orig_data[:] + for i in range(0, len(data)): + data[i] = _clean_data_struct(data[i], from_remote, from_inventory) + elif isinstance(orig_data, basestring): + data = _clean_data(orig_data, from_remote, from_inventory) + else: + data = orig_data + return data + +def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False): + ''' this version for module return data only ''' + + orig_data = raw_data + + # ignore stuff like tcgetattr spewage or other warnings + data = filter_leading_non_json_lines(raw_data) + + try: + results = json.loads(data) + except: + if no_exceptions: + return dict(failed=True, parsed=False, msg=raw_data) + else: + raise + + if from_remote: + results = _clean_data_struct(results, from_remote, from_inventory) + + return results + +def serialize_args(args): + ''' + Flattens a dictionary args to a k=v string + ''' + module_args = "" + for (k,v) in args.iteritems(): + if isinstance(v, basestring): + module_args = "%s=%s %s" % (k, pipes.quote(v), module_args) + elif isinstance(v, bool): + module_args = "%s=%s %s" % (k, str(v), module_args) + return module_args.strip() + +def merge_module_args(current_args, new_args): + ''' + merges either a dictionary or string of k=v pairs with another string of k=v pairs, + and returns a new k=v string without duplicates. + ''' + if not isinstance(current_args, basestring): + raise errors.AnsibleError("expected current_args to be a basestring") + # we use parse_kv to split up the current args into a dictionary + final_args = parse_kv(current_args) + if isinstance(new_args, dict): + final_args.update(new_args) + elif isinstance(new_args, basestring): + new_args_kv = parse_kv(new_args) + final_args.update(new_args_kv) + return serialize_args(final_args) + +def parse_yaml(data, path_hint=None): + ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!''' + + stripped_data = data.lstrip() + loaded = None + if stripped_data.startswith("{") or stripped_data.startswith("["): + # since the line starts with { or [ we can infer this is a JSON document. + try: + loaded = json.loads(data) + except ValueError, ve: + if path_hint: + raise errors.AnsibleError(path_hint + ": " + str(ve)) + else: + raise errors.AnsibleError(str(ve)) + else: + # else this is pretty sure to be a YAML document + loaded = yaml.load(data, Loader=Loader) + + return loaded + +def process_common_errors(msg, probline, column): + replaced = probline.replace(" ","") + + if ":{{" in replaced and "}}" in replaced: + msg = msg + """ +This one looks easy to fix. YAML thought it was looking for the start of a +hash/dictionary and was confused to see a second "{". Most likely this was +meant to be an ansible template evaluation instead, so we have to give the +parser a small hint that we wanted a string instead. The solution here is to +just quote the entire value. + +For instance, if the original line was: + + app_path: {{ base_path }}/foo + +It should be written as: + + app_path: "{{ base_path }}/foo" +""" + return msg + + elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1: + msg = msg + """ +This one looks easy to fix. There seems to be an extra unquoted colon in the line +and this is confusing the parser. It was only expecting to find one free +colon. The solution is just add some quotes around the colon, or quote the +entire line after the first colon. + +For instance, if the original line was: + + copy: src=file.txt dest=/path/filename:with_colon.txt + +It can be written as: + + copy: src=file.txt dest='/path/filename:with_colon.txt' + +Or: + + copy: 'src=file.txt dest=/path/filename:with_colon.txt' + + +""" + return msg + else: + parts = probline.split(":") + if len(parts) > 1: + middle = parts[1].strip() + match = False + unbalanced = False + if middle.startswith("'") and not middle.endswith("'"): + match = True + elif middle.startswith('"') and not middle.endswith('"'): + match = True + if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2: + unbalanced = True + if match: + msg = msg + """ +This one looks easy to fix. It seems that there is a value started +with a quote, and the YAML parser is expecting to see the line ended +with the same kind of quote. For instance: + + when: "ok" in result.stdout + +Could be written as: + + when: '"ok" in result.stdout' + +or equivalently: + + when: "'ok' in result.stdout" + +""" + return msg + + if unbalanced: + msg = msg + """ +We could be wrong, but this one looks like it might be an issue with +unbalanced quotes. If starting a value with a quote, make sure the +line ends with the same set of quotes. For instance this arbitrary +example: + + foo: "bad" "wolf" + +Could be written as: + + foo: '"bad" "wolf"' + +""" + return msg + + return msg + +def process_yaml_error(exc, data, path=None, show_content=True): + if hasattr(exc, 'problem_mark'): + mark = exc.problem_mark + if show_content: + if mark.line -1 >= 0: + before_probline = data.split("\n")[mark.line-1] + else: + before_probline = '' + probline = data.split("\n")[mark.line] + arrow = " " * mark.column + "^" + msg = """Syntax Error while loading YAML script, %s +Note: The error may actually appear before this position: line %s, column %s + +%s +%s +%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow) + + unquoted_var = None + if '{{' in probline and '}}' in probline: + if '"{{' not in probline or "'{{" not in probline: + unquoted_var = True + + if not unquoted_var: + msg = process_common_errors(msg, probline, mark.column) + else: + msg = msg + """ +We could be wrong, but this one looks like it might be an issue with +missing quotes. Always quote template expression brackets when they +start a value. For instance: + + with_items: + - {{ foo }} + +Should be written as: + + with_items: + - "{{ foo }}" + +""" + else: + # most likely displaying a file with sensitive content, + # so don't show any of the actual lines of yaml just the + # line number itself + msg = """Syntax error while loading YAML script, %s +The error appears to have been on line %s, column %s, but may actually +be before there depending on the exact syntax problem. +""" % (path, mark.line + 1, mark.column + 1) + + else: + # No problem markers means we have to throw a generic + # "stuff messed up" type message. Sry bud. + if path: + msg = "Could not parse YAML. Check over %s again." % path + else: + msg = "Could not parse YAML." + raise errors.AnsibleYAMLValidationFailed(msg) + + +def parse_yaml_from_file(path, vault_password=None): + ''' convert a yaml file to a data structure ''' + + data = None + show_content = True + + try: + data = open(path).read() + except IOError: + raise errors.AnsibleError("file could not read: %s" % path) + + vault = VaultLib(password=vault_password) + if vault.is_encrypted(data): + # if the file is encrypted and no password was specified, + # the decrypt call would throw an error, but we check first + # since the decrypt function doesn't know the file name + if vault_password is None: + raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path) + data = vault.decrypt(data) + show_content = False + + try: + return parse_yaml(data, path_hint=path) + except yaml.YAMLError, exc: + process_yaml_error(exc, data, path, show_content) + +def parse_kv(args): + ''' convert a string of key/value items to a dict ''' + options = {} + if args is not None: + try: + vargs = split_args(args) + except ValueError, ve: + if 'no closing quotation' in str(ve).lower(): + raise errors.AnsibleError("error parsing argument string, try quoting the entire line.") + else: + raise + for x in vargs: + if "=" in x: + k, v = x.split("=",1) + options[k.strip()] = unquote(v.strip()) + return options + +def _validate_both_dicts(a, b): + + if not (isinstance(a, dict) and isinstance(b, dict)): + raise errors.AnsibleError( + "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__) + ) + +def merge_hash(a, b): + ''' recursively merges hash b into a + keys from b take precedence over keys from a ''' + + result = {} + + # we check here as well as in combine_vars() since this + # function can work recursively with nested dicts + _validate_both_dicts(a, b) + + for dicts in a, b: + # next, iterate over b keys and values + for k, v in dicts.iteritems(): + # if there's already such key in a + # and that key contains dict + if k in result and isinstance(result[k], dict): + # merge those dicts recursively + result[k] = merge_hash(a[k], v) + else: + # otherwise, just copy a value from b to a + result[k] = v + + return result + +def default(value, function): + ''' syntactic sugar around lazy evaluation of defaults ''' + if value is None: + return function() + return value + + +def _git_repo_info(repo_path): + ''' returns a string containing git branch, commit id and commit date ''' + result = None + if os.path.exists(repo_path): + # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. + if os.path.isfile(repo_path): + try: + gitdir = yaml.safe_load(open(repo_path)).get('gitdir') + # There is a possibility the .git file to have an absolute path. + if os.path.isabs(gitdir): + repo_path = gitdir + else: + repo_path = os.path.join(repo_path[:-4], gitdir) + except (IOError, AttributeError): + return '' + f = open(os.path.join(repo_path, "HEAD")) + branch = f.readline().split('/')[-1].rstrip("\n") + f.close() + branch_path = os.path.join(repo_path, "refs", "heads", branch) + if os.path.exists(branch_path): + f = open(branch_path) + commit = f.readline()[:10] + f.close() + else: + # detached HEAD + commit = branch[:10] + branch = 'detached HEAD' + branch_path = os.path.join(repo_path, "HEAD") + + date = time.localtime(os.stat(branch_path).st_mtime) + if time.daylight == 0: + offset = time.timezone + else: + offset = time.altzone + result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, + time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) + else: + result = '' + return result + + +def _gitinfo(): + basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..') + repo_path = os.path.join(basedir, '.git') + result = _git_repo_info(repo_path) + submodules = os.path.join(basedir, '.gitmodules') + if not os.path.exists(submodules): + return result + f = open(submodules) + for line in f: + tokens = line.strip().split(' ') + if tokens[0] == 'path': + submodule_path = tokens[2] + submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git')) + if not submodule_info: + submodule_info = ' not found - use git submodule update --init ' + submodule_path + result += "\n {0}: {1}".format(submodule_path, submodule_info) + f.close() + return result + + +def version(prog): + result = "{0} {1}".format(prog, __version__) + gitinfo = _gitinfo() + if gitinfo: + result = result + " {0}".format(gitinfo) + result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH + return result + +def version_info(gitinfo=False): + if gitinfo: + # expensive call, user with care + ansible_version_string = version('') + else: + ansible_version_string = __version__ + ansible_version = ansible_version_string.split()[0] + ansible_versions = ansible_version.split('.') + for counter in range(len(ansible_versions)): + if ansible_versions[counter] == "": + ansible_versions[counter] = 0 + try: + ansible_versions[counter] = int(ansible_versions[counter]) + except: + pass + if len(ansible_versions) < 3: + for counter in range(len(ansible_versions), 3): + ansible_versions.append(0) + return {'string': ansible_version_string.strip(), + 'full': ansible_version, + 'major': ansible_versions[0], + 'minor': ansible_versions[1], + 'revision': ansible_versions[2]} + +def getch(): + ''' read in a single character ''' + fd = sys.stdin.fileno() + old_settings = termios.tcgetattr(fd) + try: + tty.setraw(sys.stdin.fileno()) + ch = sys.stdin.read(1) + finally: + termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) + return ch + +def sanitize_output(arg_string): + ''' strips private info out of a string ''' + + private_keys = ('password', 'login_password') + + output = [] + for part in arg_string.split(): + try: + (k, v) = part.split('=', 1) + except ValueError: + v = heuristic_log_sanitize(part) + output.append(v) + continue + + if k in private_keys: + v = 'VALUE_HIDDEN' + else: + v = heuristic_log_sanitize(v) + output.append('%s=%s' % (k, v)) + + output = ' '.join(output) + return output + + +#################################################################### +# option handling code for /usr/bin/ansible and ansible-playbook +# below this line + +class SortedOptParser(optparse.OptionParser): + '''Optparser which sorts the options by opt before outputting --help''' + + def format_help(self, formatter=None): + self.option_list.sort(key=operator.methodcaller('get_opt_string')) + return optparse.OptionParser.format_help(self, formatter=None) + +def increment_debug(option, opt, value, parser): + global VERBOSITY + VERBOSITY += 1 + +def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, + async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False): + ''' create an options parser for any ansible script ''' + + parser = SortedOptParser(usage, version=version("%prog")) + parser.add_option('-v','--verbose', default=False, action="callback", + callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") + + parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int', + help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS) + parser.add_option('-i', '--inventory-file', dest='inventory', + help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST, + default=constants.DEFAULT_HOST_LIST) + parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", + help="set additional variables as key=value or YAML/JSON", default=[]) + parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user', + help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER) + parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', + help='ask for SSH password') + parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', + help='use this file to authenticate the connection') + parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', + help='ask for vault password') + parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE, + dest='vault_password_file', help="vault password file") + parser.add_option('--list-hosts', dest='listhosts', action='store_true', + help='outputs a list of matching hosts; does not execute anything else') + parser.add_option('-M', '--module-path', dest='module_path', + help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH, + default=None) + + if subset_opts: + parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset', + help='further limit selected hosts to an additional pattern') + + parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int', + dest='timeout', + help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT) + + if output_opts: + parser.add_option('-o', '--one-line', dest='one_line', action='store_true', + help='condense output') + parser.add_option('-t', '--tree', dest='tree', default=None, + help='log output to this directory') + + if runas_opts: + # priv user defaults to root later on to enable detecting when this option was given here + parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password (deprecated, use become)') + parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + help='ask for su password (deprecated, use become)') + parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', + help="run operations with sudo (nopasswd) (deprecated, use become)") + parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, + help='desired sudo user (default=root) (deprecated, use become)') + parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true', + help='run operations with su (deprecated, use become)') + parser.add_option('-R', '--su-user', default=None, + help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER) + + # consolidated privilege escalation (become) + parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become', + help="run operations with become (nopasswd implied)") + parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string', + help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS))) + parser.add_option('--become-user', default=None, dest='become_user', type='string', + help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER) + parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', + help='ask for privilege escalation password') + + + if connect_opts: + parser.add_option('-c', '--connection', dest='connection', + default=constants.DEFAULT_TRANSPORT, + help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT) + + if async_opts: + parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int', + dest='poll_interval', + help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL) + parser.add_option('-B', '--background', dest='seconds', type='int', default=0, + help='run asynchronously, failing after X seconds (default=N/A)') + + if check_opts: + parser.add_option("-C", "--check", default=False, dest='check', action='store_true', + help="don't make any changes; instead, try to predict some of the changes that may occur" + ) + + if diff_opts: + parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true', + help="when changing (small) files and templates, show the differences in those files; works great with --check" + ) + + return parser + +def parse_extra_vars(extra_vars_opts, vault_pass): + extra_vars = {} + for extra_vars_opt in extra_vars_opts: + extra_vars_opt = to_unicode(extra_vars_opt) + if extra_vars_opt.startswith(u"@"): + # Argument is a YAML file (JSON is a subset of YAML) + extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass)) + elif extra_vars_opt and extra_vars_opt[0] in u'[{': + # Arguments as YAML + extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt)) + else: + # Arguments as Key-value + extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt)) + return extra_vars + +def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False): + + vault_pass = None + new_vault_pass = None + + if ask_vault_pass: + vault_pass = getpass.getpass(prompt="Vault password: ") + + if ask_vault_pass and confirm_vault: + vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ") + if vault_pass != vault_pass2: + raise errors.AnsibleError("Passwords do not match") + + if ask_new_vault_pass: + new_vault_pass = getpass.getpass(prompt="New Vault password: ") + + if ask_new_vault_pass and confirm_new: + new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") + if new_vault_pass != new_vault_pass2: + raise errors.AnsibleError("Passwords do not match") + + # enforce no newline chars at the end of passwords + if vault_pass: + vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip() + if new_vault_pass: + new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip() + + return vault_pass, new_vault_pass + +def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD): + sshpass = None + becomepass = None + vaultpass = None + become_prompt = '' + + if ask_pass: + sshpass = getpass.getpass(prompt="SSH password: ") + become_prompt = "%s password[defaults to SSH password]: " % become_method.upper() + if sshpass: + sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') + else: + become_prompt = "%s password: " % become_method.upper() + + if become_ask_pass: + becomepass = getpass.getpass(prompt=become_prompt) + if ask_pass and becomepass == '': + becomepass = sshpass + if becomepass: + becomepass = to_bytes(becomepass) + + if ask_vault_pass: + vaultpass = getpass.getpass(prompt="Vault password: ") + if vaultpass: + vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip() + + return (sshpass, becomepass, vaultpass) + + +def choose_pass_prompt(options): + + if options.ask_su_pass: + return 'su' + elif options.ask_sudo_pass: + return 'sudo' + + return options.become_method + +def normalize_become_options(options): + + options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS + options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER + + if options.become: + pass + elif options.sudo: + options.become = True + options.become_method = 'sudo' + elif options.su: + options.become = True + options.become_method = 'su' + + +def do_encrypt(result, encrypt, salt_size=None, salt=None): + if PASSLIB_AVAILABLE: + try: + crypt = getattr(passlib.hash, encrypt) + except: + raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt) + + if salt_size: + result = crypt.encrypt(result, salt_size=salt_size) + elif salt: + result = crypt.encrypt(result, salt=salt) + else: + result = crypt.encrypt(result) + else: + raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values") + + return result + +def last_non_blank_line(buf): + + all_lines = buf.splitlines() + all_lines.reverse() + for line in all_lines: + if (len(line) > 0): + return line + # shouldn't occur unless there's no output + return "" + +def filter_leading_non_json_lines(buf): + ''' + used to avoid random output from SSH at the top of JSON output, like messages from + tcagetattr, or where dropbear spews MOTD on every single command (which is nuts). + + need to filter anything which starts not with '{', '[', ', '=' or is an empty line. + filter only leading lines since multiline JSON is valid. + ''' + + filtered_lines = StringIO.StringIO() + stop_filtering = False + for line in buf.splitlines(): + if stop_filtering or line.startswith('{') or line.startswith('['): + stop_filtering = True + filtered_lines.write(line + '\n') + return filtered_lines.getvalue() + +def boolean(value): + val = str(value) + if val.lower() in [ "true", "t", "y", "1", "yes" ]: + return True + else: + return False + +def make_become_cmd(cmd, user, shell, method, flags=None, exe=None): + """ + helper function for connection plugins to create privilege escalation commands + """ + + randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) + success_key = 'BECOME-SUCCESS-%s' % randbits + prompt = None + becomecmd = None + + shell = shell or '$SHELL' + + if method == 'sudo': + # Rather than detect if sudo wants a password this time, -k makes sudo always ask for + # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) + # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted + # string to the user's shell. We loop reading output until we see the randomly-generated + # sudo prompt set with the -p option. + prompt = '[sudo via ansible, key=%s] password: ' % randbits + exe = exe or C.DEFAULT_SUDO_EXE + becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ + (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) + + elif method == 'su': + exe = exe or C.DEFAULT_SU_EXE + flags = flags or C.DEFAULT_SU_FLAGS + becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd))) + + elif method == 'pbrun': + prompt = 'assword:' + exe = exe or 'pbrun' + flags = flags or '' + becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd))) + + elif method == 'pfexec': + exe = exe or 'pfexec' + flags = flags or '' + # No user as it uses it's own exec_attr to figure it out + becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd))) + + if becomecmd is None: + raise errors.AnsibleError("Privilege escalation method not found: %s" % method) + + return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key) + + +def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd): + """ + helper function for connection plugins to create sudo commands + """ + return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe) + + +def make_su_cmd(su_user, executable, cmd): + """ + Helper function for connection plugins to create direct su commands + """ + return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE) + +def get_diff(diff): + # called by --diff usage in playbook and runner via callbacks + # include names in diffs 'before' and 'after' and do diff -U 10 + + try: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + ret = [] + if 'dst_binary' in diff: + ret.append("diff skipped: destination file appears to be binary\n") + if 'src_binary' in diff: + ret.append("diff skipped: source file appears to be binary\n") + if 'dst_larger' in diff: + ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger']) + if 'src_larger' in diff: + ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) + if 'before' in diff and 'after' in diff: + if 'before_header' in diff: + before_header = "before: %s" % diff['before_header'] + else: + before_header = 'before' + if 'after_header' in diff: + after_header = "after: %s" % diff['after_header'] + else: + after_header = 'after' + differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10) + for line in list(differ): + ret.append(line) + return u"".join(ret) + except UnicodeDecodeError: + return ">> the files are different, but the diff library cannot compare unicode strings" + +def is_list_of_strings(items): + for x in items: + if not isinstance(x, basestring): + return False + return True + +def list_union(a, b): + result = [] + for x in a: + if x not in result: + result.append(x) + for x in b: + if x not in result: + result.append(x) + return result + +def list_intersection(a, b): + result = [] + for x in a: + if x in b and x not in result: + result.append(x) + return result + +def list_difference(a, b): + result = [] + for x in a: + if x not in b and x not in result: + result.append(x) + for x in b: + if x not in a and x not in result: + result.append(x) + return result + +def contains_vars(data): + ''' + returns True if the data contains a variable pattern + ''' + return "$" in data or "{{" in data + +def safe_eval(expr, locals={}, include_exceptions=False): + ''' + This is intended for allowing things like: + with_items: a_list_variable + + Where Jinja2 would return a string but we do not want to allow it to + call functions (outside of Jinja2, where the env is constrained). If + the input data to this function came from an untrusted (remote) source, + it should first be run through _clean_data_struct() to ensure the data + is further sanitized prior to evaluation. + + Based on: + http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe + ''' + + # this is the whitelist of AST nodes we are going to + # allow in the evaluation. Any node type other than + # those listed here will raise an exception in our custom + # visitor class defined below. + SAFE_NODES = set( + ( + ast.Add, + ast.BinOp, + ast.Call, + ast.Compare, + ast.Dict, + ast.Div, + ast.Expression, + ast.List, + ast.Load, + ast.Mult, + ast.Num, + ast.Name, + ast.Str, + ast.Sub, + ast.Tuple, + ast.UnaryOp, + ) + ) + + # AST node types were expanded after 2.6 + if not sys.version.startswith('2.6'): + SAFE_NODES.union( + set( + (ast.Set,) + ) + ) + + filter_list = [] + for filter in filter_loader.all(): + filter_list.extend(filter.filters().keys()) + + CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + + class CleansingNodeVisitor(ast.NodeVisitor): + def generic_visit(self, node, inside_call=False): + if type(node) not in SAFE_NODES: + raise Exception("invalid expression (%s)" % expr) + elif isinstance(node, ast.Call): + inside_call = True + elif isinstance(node, ast.Name) and inside_call: + if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST: + raise Exception("invalid function: %s" % node.id) + # iterate over all child nodes + for child_node in ast.iter_child_nodes(node): + self.generic_visit(child_node, inside_call) + + if not isinstance(expr, basestring): + # already templated to a datastructure, perhaps? + if include_exceptions: + return (expr, None) + return expr + + cnv = CleansingNodeVisitor() + try: + parsed_tree = ast.parse(expr, mode='eval') + cnv.visit(parsed_tree) + compiled = compile(parsed_tree, expr, 'eval') + result = eval(compiled, {}, locals) + + if include_exceptions: + return (result, None) + else: + return result + except SyntaxError, e: + # special handling for syntax errors, we just return + # the expression string back as-is + if include_exceptions: + return (expr, None) + return expr + except Exception, e: + if include_exceptions: + return (expr, e) + return expr + + +def listify_lookup_plugin_terms(terms, basedir, inject): + + from ansible.utils import template + + if isinstance(terms, basestring): + # someone did: + # with_items: alist + # OR + # with_items: {{ alist }} + + stripped = terms.strip() + if not (stripped.startswith('{') or stripped.startswith('[')) and \ + not stripped.startswith("/") and \ + not stripped.startswith('set([') and \ + not LOOKUP_REGEX.search(terms): + # if not already a list, get ready to evaluate with Jinja2 + # not sure why the "/" is in above code :) + try: + new_terms = template.template(basedir, "{{ %s }}" % terms, inject) + if isinstance(new_terms, basestring) and "{{" in new_terms: + pass + else: + terms = new_terms + except: + pass + + if '{' in terms or '[' in terms: + # Jinja2 already evaluated a variable to a list. + # Jinja2-ified list needs to be converted back to a real type + # TODO: something a bit less heavy than eval + return safe_eval(terms) + + if isinstance(terms, basestring): + terms = [ terms ] + + return terms + +def combine_vars(a, b): + + _validate_both_dicts(a, b) + + if C.DEFAULT_HASH_BEHAVIOUR == "merge": + return merge_hash(a, b) + else: + return dict(a.items() + b.items()) + +def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS): + '''Return a random password string of length containing only chars.''' + + password = [] + while len(password) < length: + new_char = os.urandom(1) + if new_char in chars: + password.append(new_char) + + return ''.join(password) + +def before_comment(msg): + ''' what's the part of a string before a comment? ''' + msg = msg.replace("\#","**NOT_A_COMMENT**") + msg = msg.split("#")[0] + msg = msg.replace("**NOT_A_COMMENT**","#") + return msg + +def load_vars(basepath, results, vault_password=None): + """ + Load variables from any potential yaml filename combinations of basepath, + returning result. + """ + + paths_to_check = [ "".join([basepath, ext]) + for ext in C.YAML_FILENAME_EXTENSIONS ] + + found_paths = [] + + for path in paths_to_check: + found, results = _load_vars_from_path(path, results, vault_password=vault_password) + if found: + found_paths.append(path) + + + # disallow the potentially confusing situation that there are multiple + # variable files for the same name. For example if both group_vars/all.yml + # and group_vars/all.yaml + if len(found_paths) > 1: + raise errors.AnsibleError("Multiple variable files found. " + "There should only be one. %s" % ( found_paths, )) + + return results + +## load variables from yaml files/dirs +# e.g. host/group_vars +# +def _load_vars_from_path(path, results, vault_password=None): + """ + Robustly access the file at path and load variables, carefully reporting + errors in a friendly/informative way. + + Return the tuple (found, new_results, ) + """ + + try: + # in the case of a symbolic link, we want the stat of the link itself, + # not its target + pathstat = os.lstat(path) + except os.error, err: + # most common case is that nothing exists at that path. + if err.errno == errno.ENOENT: + return False, results + # otherwise this is a condition we should report to the user + raise errors.AnsibleError( + "%s is not accessible: %s." + " Please check its permissions." % ( path, err.strerror)) + + # symbolic link + if stat.S_ISLNK(pathstat.st_mode): + try: + target = os.path.realpath(path) + except os.error, err2: + raise errors.AnsibleError("The symbolic link at %s " + "is not readable: %s. Please check its permissions." + % (path, err2.strerror, )) + # follow symbolic link chains by recursing, so we repeat the same + # permissions checks above and provide useful errors. + return _load_vars_from_path(target, results, vault_password) + + # directory + if stat.S_ISDIR(pathstat.st_mode): + + # support organizing variables across multiple files in a directory + return True, _load_vars_from_folder(path, results, vault_password=vault_password) + + # regular file + elif stat.S_ISREG(pathstat.st_mode): + data = parse_yaml_from_file(path, vault_password=vault_password) + if data and type(data) != dict: + raise errors.AnsibleError( + "%s must be stored as a dictionary/hash" % path) + elif data is None: + data = {} + + # combine vars overrides by default but can be configured to do a + # hash merge in settings + results = combine_vars(results, data) + return True, results + + # something else? could be a fifo, socket, device, etc. + else: + raise errors.AnsibleError("Expected a variable file or directory " + "but found a non-file object at path %s" % (path, )) + +def _load_vars_from_folder(folder_path, results, vault_password=None): + """ + Load all variables within a folder recursively. + """ + + # this function and _load_vars_from_path are mutually recursive + + try: + names = os.listdir(folder_path) + except os.error, err: + raise errors.AnsibleError( + "This folder cannot be listed: %s: %s." + % ( folder_path, err.strerror)) + + # evaluate files in a stable order rather than whatever order the + # filesystem lists them. + names.sort() + + # do not parse hidden files or dirs, e.g. .svn/ + paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')] + for path in paths: + _found, results = _load_vars_from_path(path, results, vault_password=vault_password) + return results + +def update_hash(hash, key, new_value): + ''' used to avoid nested .update calls on the parent ''' + + value = hash.get(key, {}) + value.update(new_value) + hash[key] = value + +def censor_unlogged_data(data): + ''' + used when the no_log: True attribute is passed to a task to keep data from a callback. + NOT intended to prevent variable registration, but only things from showing up on + screen + ''' + new_data = {} + for (x,y) in data.iteritems(): + if x in [ 'skipped', 'changed', 'failed', 'rc' ]: + new_data[x] = y + new_data['censored'] = 'results hidden due to no_log parameter' + return new_data + +def check_mutually_exclusive_privilege(options, parser): + + # privilege escalation command line arguments need to be mutually exclusive + if (options.su or options.su_user or options.ask_su_pass) and \ + (options.sudo or options.sudo_user or options.ask_sudo_pass) or \ + (options.su or options.su_user or options.ask_su_pass) and \ + (options.become or options.become_user or options.become_ask_pass) or \ + (options.sudo or options.sudo_user or options.ask_sudo_pass) and \ + (options.become or options.become_user or options.become_ask_pass): + + parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " + "and su arguments ('-su', '--su-user', and '--ask-su-pass') " + "and become arguments ('--become', '--become-user', and '--ask-become-pass')" + " are exclusive of each other") + + diff --git a/lib/ansible/utils/cmd_functions.py b/v1/ansible/utils/cmd_functions.py similarity index 100% rename from lib/ansible/utils/cmd_functions.py rename to v1/ansible/utils/cmd_functions.py diff --git a/lib/ansible/utils/display_functions.py b/v1/ansible/utils/display_functions.py similarity index 100% rename from lib/ansible/utils/display_functions.py rename to v1/ansible/utils/display_functions.py diff --git a/v2/ansible/utils/hashing.py b/v1/ansible/utils/hashing.py similarity index 92% rename from v2/ansible/utils/hashing.py rename to v1/ansible/utils/hashing.py index 5e378db79f49c5..a7d142e5bd4ba2 100644 --- a/v2/ansible/utils/hashing.py +++ b/v1/ansible/utils/hashing.py @@ -20,7 +20,6 @@ __metaclass__ = type import os -from ansible.errors import AnsibleError # Note, sha1 is the only hash algorithm compatible with python2.4 and with # FIPS-140 mode (as of 11-2014) @@ -44,8 +43,6 @@ def secure_hash_s(data, hash_func=sha1): digest = hash_func() try: - if not isinstance(data, basestring): - data = "%s" % data digest.update(data) except UnicodeEncodeError: digest.update(data.encode('utf-8')) @@ -65,8 +62,8 @@ def secure_hash(filename, hash_func=sha1): digest.update(block) block = infile.read(blocksize) infile.close() - except IOError as e: - raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) + except IOError, e: + raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e)) return digest.hexdigest() # The checksum algorithm must match with the algorithm in ShellModule.checksum() method diff --git a/v2/ansible/utils/module_docs.py b/v1/ansible/utils/module_docs.py similarity index 96% rename from v2/ansible/utils/module_docs.py rename to v1/ansible/utils/module_docs.py index 632b4a00c2a36a..ee99af2cb54dba 100644 --- a/v2/ansible/utils/module_docs.py +++ b/v1/ansible/utils/module_docs.py @@ -23,7 +23,7 @@ import yaml import traceback -from ansible.plugins import fragment_loader +from ansible import utils # modules that are ok that they do not have documentation strings BLACKLIST_MODULES = [ @@ -66,7 +66,7 @@ def get_docstring(filename, verbose=False): if fragment_slug != 'doesnotexist': - fragment_class = fragment_loader.get(fragment_name) + fragment_class = utils.plugins.fragment_loader.get(fragment_name) assert fragment_class is not None fragment_yaml = getattr(fragment_class, fragment_var, '{}') diff --git a/v1/ansible/utils/module_docs_fragments/__init__.py b/v1/ansible/utils/module_docs_fragments/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/lib/ansible/utils/module_docs_fragments/aws.py b/v1/ansible/utils/module_docs_fragments/aws.py similarity index 100% rename from lib/ansible/utils/module_docs_fragments/aws.py rename to v1/ansible/utils/module_docs_fragments/aws.py diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/v1/ansible/utils/module_docs_fragments/cloudstack.py similarity index 100% rename from lib/ansible/utils/module_docs_fragments/cloudstack.py rename to v1/ansible/utils/module_docs_fragments/cloudstack.py diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/v1/ansible/utils/module_docs_fragments/files.py similarity index 100% rename from lib/ansible/utils/module_docs_fragments/files.py rename to v1/ansible/utils/module_docs_fragments/files.py diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/v1/ansible/utils/module_docs_fragments/openstack.py similarity index 100% rename from lib/ansible/utils/module_docs_fragments/openstack.py rename to v1/ansible/utils/module_docs_fragments/openstack.py diff --git a/lib/ansible/utils/module_docs_fragments/rackspace.py b/v1/ansible/utils/module_docs_fragments/rackspace.py similarity index 100% rename from lib/ansible/utils/module_docs_fragments/rackspace.py rename to v1/ansible/utils/module_docs_fragments/rackspace.py diff --git a/lib/ansible/utils/plugins.py b/v1/ansible/utils/plugins.py similarity index 100% rename from lib/ansible/utils/plugins.py rename to v1/ansible/utils/plugins.py diff --git a/lib/ansible/utils/string_functions.py b/v1/ansible/utils/string_functions.py similarity index 100% rename from lib/ansible/utils/string_functions.py rename to v1/ansible/utils/string_functions.py diff --git a/lib/ansible/utils/su_prompts.py b/v1/ansible/utils/su_prompts.py similarity index 100% rename from lib/ansible/utils/su_prompts.py rename to v1/ansible/utils/su_prompts.py diff --git a/lib/ansible/utils/template.py b/v1/ansible/utils/template.py similarity index 100% rename from lib/ansible/utils/template.py rename to v1/ansible/utils/template.py diff --git a/v2/ansible/utils/unicode.py b/v1/ansible/utils/unicode.py similarity index 93% rename from v2/ansible/utils/unicode.py rename to v1/ansible/utils/unicode.py index 2cff2e5e45c76d..7bd035c0075609 100644 --- a/v2/ansible/utils/unicode.py +++ b/v1/ansible/utils/unicode.py @@ -19,8 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from six import string_types, text_type, binary_type, PY3 - # to_bytes and to_unicode were written by Toshio Kuratomi for the # python-kitchen library https://pypi.python.org/pypi/kitchen # They are licensed in kitchen under the terms of the GPLv2+ @@ -37,9 +35,6 @@ # EXCEPTION_CONVERTERS is defined below due to using to_unicode -if PY3: - basestring = (str, bytes) - def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): '''Convert an object into a :class:`unicode` string @@ -94,12 +89,12 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): # Could use isbasestring/isunicode here but we want this code to be as # fast as possible if isinstance(obj, basestring): - if isinstance(obj, text_type): + if isinstance(obj, unicode): return obj if encoding in _UTF8_ALIASES: - return text_type(obj, 'utf-8', errors) + return unicode(obj, 'utf-8', errors) if encoding in _LATIN1_ALIASES: - return text_type(obj, 'latin-1', errors) + return unicode(obj, 'latin-1', errors) return obj.decode(encoding, errors) if not nonstring: @@ -115,19 +110,19 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None): simple = None if not simple: try: - simple = text_type(obj) + simple = str(obj) except UnicodeError: try: simple = obj.__str__() except (UnicodeError, AttributeError): simple = u'' - if isinstance(simple, binary_type): - return text_type(simple, encoding, errors) + if isinstance(simple, str): + return unicode(simple, encoding, errors) return simple elif nonstring in ('repr', 'strict'): obj_repr = repr(obj) - if isinstance(obj_repr, binary_type): - obj_repr = text_type(obj_repr, encoding, errors) + if isinstance(obj_repr, str): + obj_repr = unicode(obj_repr, encoding, errors) if nonstring == 'repr': return obj_repr raise TypeError('to_unicode was given "%(obj)s" which is neither' @@ -203,19 +198,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): # Could use isbasestring, isbytestring here but we want this to be as fast # as possible if isinstance(obj, basestring): - if isinstance(obj, binary_type): + if isinstance(obj, str): return obj return obj.encode(encoding, errors) if not nonstring: nonstring = 'simplerepr' if nonstring == 'empty': - return b'' + return '' elif nonstring == 'passthru': return obj elif nonstring == 'simplerepr': try: - simple = binary_type(obj) + simple = str(obj) except UnicodeError: try: simple = obj.__str__() @@ -225,19 +220,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None): try: simple = obj.__unicode__() except (AttributeError, UnicodeError): - simple = b'' - if isinstance(simple, text_type): + simple = '' + if isinstance(simple, unicode): simple = simple.encode(encoding, 'replace') return simple elif nonstring in ('repr', 'strict'): try: obj_repr = obj.__repr__() except (AttributeError, UnicodeError): - obj_repr = b'' - if isinstance(obj_repr, text_type): + obj_repr = '' + if isinstance(obj_repr, unicode): obj_repr = obj_repr.encode(encoding, errors) else: - obj_repr = binary_type(obj_repr) + obj_repr = str(obj_repr) if nonstring == 'repr': return obj_repr raise TypeError('to_bytes was given "%(obj)s" which is neither' diff --git a/v1/ansible/utils/vault.py b/v1/ansible/utils/vault.py new file mode 100644 index 00000000000000..842688a2c18fce --- /dev/null +++ b/v1/ansible/utils/vault.py @@ -0,0 +1,585 @@ +# (c) 2014, James Tanner +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-pull is a script that runs ansible in local mode +# after checking out a playbooks directory from source repo. There is an +# example playbook to bootstrap this script in the examples/ dir which +# installs ansible and sets it up to run on cron. + +import os +import shlex +import shutil +import tempfile +from io import BytesIO +from subprocess import call +from ansible import errors +from hashlib import sha256 + +# Note: Only used for loading obsolete VaultAES files. All files are written +# using the newer VaultAES256 which does not require md5 +try: + from hashlib import md5 +except ImportError: + try: + from md5 import md5 + except ImportError: + # MD5 unavailable. Possibly FIPS mode + md5 = None + +from binascii import hexlify +from binascii import unhexlify +from ansible import constants as C + +try: + from Crypto.Hash import SHA256, HMAC + HAS_HASH = True +except ImportError: + HAS_HASH = False + +# Counter import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Util import Counter + HAS_COUNTER = True +except ImportError: + HAS_COUNTER = False + +# KDF import fails for 2.0.1, requires >= 2.6.1 from pip +try: + from Crypto.Protocol.KDF import PBKDF2 + HAS_PBKDF2 = True +except ImportError: + HAS_PBKDF2 = False + +# AES IMPORTS +try: + from Crypto.Cipher import AES as AES + HAS_AES = True +except ImportError: + HAS_AES = False + +CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto" + +HEADER='$ANSIBLE_VAULT' +CIPHER_WHITELIST=['AES', 'AES256'] + +class VaultLib(object): + + def __init__(self, password): + self.password = password + self.cipher_name = None + self.version = '1.1' + + def is_encrypted(self, data): + if data.startswith(HEADER): + return True + else: + return False + + def encrypt(self, data): + + if self.is_encrypted(data): + raise errors.AnsibleError("data is already encrypted") + + if not self.cipher_name: + self.cipher_name = "AES256" + #raise errors.AnsibleError("the cipher must be set before encrypting data") + + if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: + cipher = globals()['Vault' + self.cipher_name] + this_cipher = cipher() + else: + raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + + """ + # combine sha + data + this_sha = sha256(data).hexdigest() + tmp_data = this_sha + "\n" + data + """ + + # encrypt sha + data + enc_data = this_cipher.encrypt(data, self.password) + + # add header + tmp_data = self._add_header(enc_data) + return tmp_data + + def decrypt(self, data): + if self.password is None: + raise errors.AnsibleError("A vault password must be specified to decrypt data") + + if not self.is_encrypted(data): + raise errors.AnsibleError("data is not encrypted") + + # clean out header + data = self._split_header(data) + + # create the cipher object + if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: + cipher = globals()['Vault' + self.cipher_name] + this_cipher = cipher() + else: + raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name) + + # try to unencrypt data + data = this_cipher.decrypt(data, self.password) + if data is None: + raise errors.AnsibleError("Decryption failed") + + return data + + def _add_header(self, data): + # combine header and encrypted data in 80 char columns + + #tmpdata = hexlify(data) + tmpdata = [data[i:i+80] for i in range(0, len(data), 80)] + + if not self.cipher_name: + raise errors.AnsibleError("the cipher must be set before adding a header") + + dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n" + + for l in tmpdata: + dirty_data += l + '\n' + + return dirty_data + + + def _split_header(self, data): + # used by decrypt + + tmpdata = data.split('\n') + tmpheader = tmpdata[0].strip().split(';') + + self.version = str(tmpheader[1].strip()) + self.cipher_name = str(tmpheader[2].strip()) + clean_data = '\n'.join(tmpdata[1:]) + + """ + # strip out newline, join, unhex + clean_data = [ x.strip() for x in clean_data ] + clean_data = unhexlify(''.join(clean_data)) + """ + + return clean_data + + def __enter__(self): + return self + + def __exit__(self, *err): + pass + +class VaultEditor(object): + # uses helper methods for write_file(self, filename, data) + # to write a file so that code isn't duplicated for simple + # file I/O, ditto read_file(self, filename) and launch_editor(self, filename) + # ... "Don't Repeat Yourself", etc. + + def __init__(self, cipher_name, password, filename): + # instantiates a member variable for VaultLib + self.cipher_name = cipher_name + self.password = password + self.filename = filename + + def _edit_file_helper(self, existing_data=None, cipher=None): + # make sure the umask is set to a sane value + old_umask = os.umask(0o077) + + # Create a tempfile + _, tmp_path = tempfile.mkstemp() + + if existing_data: + self.write_data(existing_data, tmp_path) + + # drop the user into an editor on the tmp file + try: + call(self._editor_shell_command(tmp_path)) + except OSError, e: + raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e))) + tmpdata = self.read_data(tmp_path) + + # create new vault + this_vault = VaultLib(self.password) + if cipher: + this_vault.cipher_name = cipher + + # encrypt new data and write out to tmp + enc_data = this_vault.encrypt(tmpdata) + self.write_data(enc_data, tmp_path) + + # shuffle tmp file into place + self.shuffle_files(tmp_path, self.filename) + + # and restore umask + os.umask(old_umask) + + def create_file(self): + """ create a new encrypted file """ + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + if os.path.isfile(self.filename): + raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) + + # Let the user specify contents and save file + self._edit_file_helper(cipher=self.cipher_name) + + def decrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + if not os.path.isfile(self.filename): + raise errors.AnsibleError("%s does not exist" % self.filename) + + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + if this_vault.is_encrypted(tmpdata): + dec_data = this_vault.decrypt(tmpdata) + if dec_data is None: + raise errors.AnsibleError("Decryption failed") + else: + self.write_data(dec_data, self.filename) + else: + raise errors.AnsibleError("%s is not encrypted" % self.filename) + + def edit_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + # decrypt to tmpfile + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + dec_data = this_vault.decrypt(tmpdata) + + # let the user edit the data and save + self._edit_file_helper(existing_data=dec_data) + ###we want the cipher to default to AES256 (get rid of files + # encrypted with the AES cipher) + #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name) + + + def view_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + # decrypt to tmpfile + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + dec_data = this_vault.decrypt(tmpdata) + old_umask = os.umask(0o077) + _, tmp_path = tempfile.mkstemp() + self.write_data(dec_data, tmp_path) + os.umask(old_umask) + + # drop the user into pager on the tmp file + call(self._pager_shell_command(tmp_path)) + os.remove(tmp_path) + + def encrypt_file(self): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + if not os.path.isfile(self.filename): + raise errors.AnsibleError("%s does not exist" % self.filename) + + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + this_vault.cipher_name = self.cipher_name + if not this_vault.is_encrypted(tmpdata): + enc_data = this_vault.encrypt(tmpdata) + self.write_data(enc_data, self.filename) + else: + raise errors.AnsibleError("%s is already encrypted" % self.filename) + + def rekey_file(self, new_password): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + # decrypt + tmpdata = self.read_data(self.filename) + this_vault = VaultLib(self.password) + dec_data = this_vault.decrypt(tmpdata) + + # create new vault + new_vault = VaultLib(new_password) + + # we want to force cipher to the default + #new_vault.cipher_name = this_vault.cipher_name + + # re-encrypt data and re-write file + enc_data = new_vault.encrypt(dec_data) + self.write_data(enc_data, self.filename) + + def read_data(self, filename): + f = open(filename, "rb") + tmpdata = f.read() + f.close() + return tmpdata + + def write_data(self, data, filename): + if os.path.isfile(filename): + os.remove(filename) + f = open(filename, "wb") + f.write(data) + f.close() + + def shuffle_files(self, src, dest): + # overwrite dest with src + if os.path.isfile(dest): + os.remove(dest) + shutil.move(src, dest) + + def _editor_shell_command(self, filename): + EDITOR = os.environ.get('EDITOR','vim') + editor = shlex.split(EDITOR) + editor.append(filename) + + return editor + + def _pager_shell_command(self, filename): + PAGER = os.environ.get('PAGER','less') + pager = shlex.split(PAGER) + pager.append(filename) + + return pager + +######################################## +# CIPHERS # +######################################## + +class VaultAES(object): + + # this version has been obsoleted by the VaultAES256 class + # which uses encrypt-then-mac (fixing order) and also improving the KDF used + # code remains for upgrade purposes only + # http://stackoverflow.com/a/16761459 + + def __init__(self): + if not md5: + raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.') + if not HAS_AES: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): + + """ Create a key and an initialization vector """ + + d = d_i = '' + while len(d) < key_length + iv_length: + d_i = md5(d_i + password + salt).digest() + d += d_i + + key = d[:key_length] + iv = d[key_length:key_length+iv_length] + + return key, iv + + def encrypt(self, data, password, key_length=32): + + """ Read plaintext data from in_file and write encrypted to out_file """ + + + # combine sha + data + this_sha = sha256(data).hexdigest() + tmp_data = this_sha + "\n" + data + + in_file = BytesIO(tmp_data) + in_file.seek(0) + out_file = BytesIO() + + bs = AES.block_size + + # Get a block of random data. EL does not have Crypto.Random.new() + # so os.urandom is used for cross platform purposes + salt = os.urandom(bs - len('Salted__')) + + key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) + cipher = AES.new(key, AES.MODE_CBC, iv) + out_file.write('Salted__' + salt) + finished = False + while not finished: + chunk = in_file.read(1024 * bs) + if len(chunk) == 0 or len(chunk) % bs != 0: + padding_length = (bs - len(chunk) % bs) or bs + chunk += padding_length * chr(padding_length) + finished = True + out_file.write(cipher.encrypt(chunk)) + + out_file.seek(0) + enc_data = out_file.read() + tmp_data = hexlify(enc_data) + + return tmp_data + + + def decrypt(self, data, password, key_length=32): + + """ Read encrypted data from in_file and write decrypted to out_file """ + + # http://stackoverflow.com/a/14989032 + + data = ''.join(data.split('\n')) + data = unhexlify(data) + + in_file = BytesIO(data) + in_file.seek(0) + out_file = BytesIO() + + bs = AES.block_size + salt = in_file.read(bs)[len('Salted__'):] + key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs) + cipher = AES.new(key, AES.MODE_CBC, iv) + next_chunk = '' + finished = False + + while not finished: + chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs)) + if len(next_chunk) == 0: + padding_length = ord(chunk[-1]) + chunk = chunk[:-padding_length] + finished = True + out_file.write(chunk) + + # reset the stream pointer to the beginning + out_file.seek(0) + new_data = out_file.read() + + # split out sha and verify decryption + split_data = new_data.split("\n") + this_sha = split_data[0] + this_data = '\n'.join(split_data[1:]) + test_sha = sha256(this_data).hexdigest() + + if this_sha != test_sha: + raise errors.AnsibleError("Decryption failed") + + #return out_file.read() + return this_data + + +class VaultAES256(object): + + """ + Vault implementation using AES-CTR with an HMAC-SHA256 authentication code. + Keys are derived using PBKDF2 + """ + + # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html + + def __init__(self): + + if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + + def gen_key_initctr(self, password, salt): + # 16 for AES 128, 32 for AES256 + keylength = 32 + + # match the size used for counter.new to avoid extra work + ivlength = 16 + + hash_function = SHA256 + + # make two keys and one iv + pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest() + + + derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength, + count=10000, prf=pbkdf2_prf) + + key1 = derivedkey[:keylength] + key2 = derivedkey[keylength:(keylength * 2)] + iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength] + + return key1, key2, hexlify(iv) + + + def encrypt(self, data, password): + + salt = os.urandom(32) + key1, key2, iv = self.gen_key_initctr(password, salt) + + # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3 + bs = AES.block_size + padding_length = (bs - len(data) % bs) or bs + data += padding_length * chr(padding_length) + + # COUNTER.new PARAMETERS + # 1) nbits (integer) - Length of the counter, in bits. + # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr + + ctr = Counter.new(128, initial_value=long(iv, 16)) + + # AES.new PARAMETERS + # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr + # 2) MODE_CTR, is the recommended mode + # 3) counter= + + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # ENCRYPT PADDED DATA + cryptedData = cipher.encrypt(data) + + # COMBINE SALT, DIGEST AND DATA + hmac = HMAC.new(key2, cryptedData, SHA256) + message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) ) + message = hexlify(message) + return message + + def decrypt(self, data, password): + + # SPLIT SALT, DIGEST, AND DATA + data = ''.join(data.split("\n")) + data = unhexlify(data) + salt, cryptedHmac, cryptedData = data.split("\n", 2) + salt = unhexlify(salt) + cryptedData = unhexlify(cryptedData) + + key1, key2, iv = self.gen_key_initctr(password, salt) + + # EXIT EARLY IF DIGEST DOESN'T MATCH + hmacDecrypt = HMAC.new(key2, cryptedData, SHA256) + if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()): + return None + + # SET THE COUNTER AND THE CIPHER + ctr = Counter.new(128, initial_value=long(iv, 16)) + cipher = AES.new(key1, AES.MODE_CTR, counter=ctr) + + # DECRYPT PADDED DATA + decryptedData = cipher.decrypt(cryptedData) + + # UNPAD DATA + padding_length = ord(decryptedData[-1]) + decryptedData = decryptedData[:-padding_length] + + return decryptedData + + def is_equal(self, a, b): + # http://codahale.com/a-lesson-in-timing-attacks/ + if len(a) != len(b): + return False + + result = 0 + for x, y in zip(a, b): + result |= ord(x) ^ ord(y) + return result == 0 + + diff --git a/v1/bin/ansible b/v1/bin/ansible new file mode 100755 index 00000000000000..7fec34ec81e9c6 --- /dev/null +++ b/v1/bin/ansible @@ -0,0 +1,207 @@ +#!/usr/bin/env python + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +######################################################## + +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + +import os +import sys + +from ansible.runner import Runner +import ansible.constants as C +from ansible import utils +from ansible import errors +from ansible import callbacks +from ansible import inventory +######################################################## + +class Cli(object): + ''' code behind bin/ansible ''' + + # ---------------------------------------------- + + def __init__(self): + self.stats = callbacks.AggregateStats() + self.callbacks = callbacks.CliRunnerCallbacks() + if C.DEFAULT_LOAD_CALLBACK_PLUGINS: + callbacks.load_callback_plugins() + + # ---------------------------------------------- + + def parse(self): + ''' create an options parser for bin/ansible ''' + + parser = utils.base_parser( + constants=C, + runas_opts=True, + subset_opts=True, + async_opts=True, + output_opts=True, + connect_opts=True, + check_opts=True, + diff_opts=False, + usage='%prog [options]' + ) + + parser.add_option('-a', '--args', dest='module_args', + help="module arguments", default=C.DEFAULT_MODULE_ARGS) + parser.add_option('-m', '--module-name', dest='module_name', + help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, + default=C.DEFAULT_MODULE_NAME) + + options, args = parser.parse_args() + self.callbacks.options = options + + if len(args) == 0 or len(args) > 1: + parser.print_help() + sys.exit(1) + + # privlege escalation command line arguments need to be mutually exclusive + utils.check_mutually_exclusive_privilege(options, parser) + + if (options.ask_vault_pass and options.vault_password_file): + parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") + + return (options, args) + + # ---------------------------------------------- + + def run(self, options, args): + ''' use Runner lib to do SSH things ''' + + pattern = args[0] + + sshpass = becomepass = vault_pass = become_method = None + + # Never ask for an SSH password when we run with local connection + if options.connection == "local": + options.ask_pass = False + else: + options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS + + options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS + + # become + utils.normalize_become_options(options) + prompt_method = utils.choose_pass_prompt(options) + (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method) + + # read vault_pass from a file + if not options.ask_vault_pass and options.vault_password_file: + vault_pass = utils.read_vault_file(options.vault_password_file) + + extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) + + inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass) + if options.subset: + inventory_manager.subset(options.subset) + hosts = inventory_manager.list_hosts(pattern) + + if len(hosts) == 0: + callbacks.display("No hosts matched", stderr=True) + sys.exit(0) + + if options.listhosts: + for host in hosts: + callbacks.display(' %s' % host) + sys.exit(0) + + if options.module_name in ['command','shell'] and not options.module_args: + callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True) + sys.exit(1) + + if options.tree: + utils.prepare_writeable_dir(options.tree) + + runner = Runner( + module_name=options.module_name, + module_path=options.module_path, + module_args=options.module_args, + remote_user=options.remote_user, + remote_pass=sshpass, + inventory=inventory_manager, + timeout=options.timeout, + private_key_file=options.private_key_file, + forks=options.forks, + pattern=pattern, + callbacks=self.callbacks, + transport=options.connection, + subset=options.subset, + check=options.check, + diff=options.check, + vault_pass=vault_pass, + become=options.become, + become_method=options.become_method, + become_pass=becomepass, + become_user=options.become_user, + extra_vars=extra_vars, + ) + + if options.seconds: + callbacks.display("background launch...\n\n", color='cyan') + results, poller = runner.run_async(options.seconds) + results = self.poll_while_needed(poller, options) + else: + results = runner.run() + + return (runner, results) + + # ---------------------------------------------- + + def poll_while_needed(self, poller, options): + ''' summarize results from Runner ''' + + # BACKGROUND POLL LOGIC when -B and -P are specified + if options.seconds and options.poll_interval > 0: + poller.wait(options.seconds, options.poll_interval) + + return poller.results + + +######################################################## + +if __name__ == '__main__': + callbacks.display("", log_only=True) + callbacks.display(" ".join(sys.argv), log_only=True) + callbacks.display("", log_only=True) + + cli = Cli() + (options, args) = cli.parse() + try: + (runner, results) = cli.run(options, args) + for result in results['contacted'].values(): + if 'failed' in result or result.get('rc', 0) != 0: + sys.exit(2) + if results['dark']: + sys.exit(3) + except errors.AnsibleError, e: + # Generic handler for ansible specific errors + callbacks.display("ERROR: %s" % str(e), stderr=True, color='red') + sys.exit(1) + diff --git a/v1/bin/ansible-doc b/v1/bin/ansible-doc new file mode 100755 index 00000000000000..dff7cecce7903a --- /dev/null +++ b/v1/bin/ansible-doc @@ -0,0 +1,337 @@ +#!/usr/bin/env python + +# (c) 2012, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import os +import sys +import textwrap +import re +import optparse +import datetime +import subprocess +import fcntl +import termios +import struct + +from ansible import utils +from ansible.utils import module_docs +import ansible.constants as C +from ansible.utils import version +import traceback + +MODULEDIR = C.DEFAULT_MODULE_PATH + +BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') +IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"] + +_ITALIC = re.compile(r"I\(([^)]+)\)") +_BOLD = re.compile(r"B\(([^)]+)\)") +_MODULE = re.compile(r"M\(([^)]+)\)") +_URL = re.compile(r"U\(([^)]+)\)") +_CONST = re.compile(r"C\(([^)]+)\)") +PAGER = 'less' +LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) + # -S (chop long lines) -X (disable termcap init and de-init) + +def pager_print(text): + ''' just print text ''' + print text + +def pager_pipe(text, cmd): + ''' pipe text through a pager ''' + if 'LESS' not in os.environ: + os.environ['LESS'] = LESS_OPTS + try: + cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) + cmd.communicate(input=text) + except IOError: + pass + except KeyboardInterrupt: + pass + +def pager(text): + ''' find reasonable way to display text ''' + # this is a much simpler form of what is in pydoc.py + if not sys.stdout.isatty(): + pager_print(text) + elif 'PAGER' in os.environ: + if sys.platform == 'win32': + pager_print(text) + else: + pager_pipe(text, os.environ['PAGER']) + elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: + pager_pipe(text, 'less') + else: + pager_print(text) + +def tty_ify(text): + + t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' + t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* + t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] + t = _URL.sub(r"\1", t) # U(word) => word + t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' + + return t + +def get_man_text(doc): + + opt_indent=" " + text = [] + text.append("> %s\n" % doc['module'].upper()) + + desc = " ".join(doc['description']) + + text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" ")) + + if 'option_keys' in doc and len(doc['option_keys']) > 0: + text.append("Options (= is mandatory):\n") + + for o in sorted(doc['option_keys']): + opt = doc['options'][o] + + if opt.get('required', False): + opt_leadin = "=" + else: + opt_leadin = "-" + + text.append("%s %s" % (opt_leadin, o)) + + desc = " ".join(opt['description']) + + if 'choices' in opt: + choices = ", ".join(str(i) for i in opt['choices']) + desc = desc + " (Choices: " + choices + ")" + if 'default' in opt: + default = str(opt['default']) + desc = desc + " [Default: " + default + "]" + text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent, + subsequent_indent=opt_indent)) + + if 'notes' in doc and len(doc['notes']) > 0: + notes = " ".join(doc['notes']) + text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ", + subsequent_indent=opt_indent)) + + + if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: + req = ", ".join(doc['requirements']) + text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ", + subsequent_indent=opt_indent)) + + if 'examples' in doc and len(doc['examples']) > 0: + text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) + for ex in doc['examples']: + text.append("%s\n" % (ex['code'])) + + if 'plainexamples' in doc and doc['plainexamples'] is not None: + text.append("EXAMPLES:") + text.append(doc['plainexamples']) + if 'returndocs' in doc and doc['returndocs'] is not None: + text.append("RETURN VALUES:") + text.append(doc['returndocs']) + text.append('') + + return "\n".join(text) + + +def get_snippet_text(doc): + + text = [] + desc = tty_ify(" ".join(doc['short_description'])) + text.append("- name: %s" % (desc)) + text.append(" action: %s" % (doc['module'])) + + for o in sorted(doc['options'].keys()): + opt = doc['options'][o] + desc = tty_ify(" ".join(opt['description'])) + + if opt.get('required', False): + s = o + "=" + else: + s = o + + text.append(" %-20s # %s" % (s, desc)) + text.append('') + + return "\n".join(text) + +def get_module_list_text(module_list): + tty_size = 0 + if os.isatty(0): + tty_size = struct.unpack('HHHH', + fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1] + columns = max(60, tty_size) + displace = max(len(x) for x in module_list) + linelimit = columns - displace - 5 + text = [] + deprecated = [] + for module in sorted(set(module_list)): + + if module in module_docs.BLACKLIST_MODULES: + continue + + filename = utils.plugins.module_finder.find_plugin(module) + + if filename is None: + continue + if filename.endswith(".ps1"): + continue + if os.path.isdir(filename): + continue + + try: + doc, plainexamples, returndocs = module_docs.get_docstring(filename) + desc = tty_ify(doc.get('short_description', '?')).strip() + if len(desc) > linelimit: + desc = desc[:linelimit] + '...' + + if module.startswith('_'): # Handle deprecated + deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) + else: + text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) + except: + traceback.print_exc() + sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) + + if len(deprecated) > 0: + text.append("\nDEPRECATED:") + text.extend(deprecated) + return "\n".join(text) + +def find_modules(path, module_list): + + if os.path.isdir(path): + for module in os.listdir(path): + if module.startswith('.'): + continue + elif os.path.isdir(module): + find_modules(module, module_list) + elif any(module.endswith(x) for x in BLACKLIST_EXTS): + continue + elif module.startswith('__'): + continue + elif module in IGNORE_FILES: + continue + elif module.startswith('_'): + fullpath = '/'.join([path,module]) + if os.path.islink(fullpath): # avoids aliases + continue + + module = os.path.splitext(module)[0] # removes the extension + module_list.append(module) + +def main(): + + p = optparse.OptionParser( + version=version("%prog"), + usage='usage: %prog [options] [module...]', + description='Show Ansible module documentation', + ) + + p.add_option("-M", "--module-path", + action="store", + dest="module_path", + default=MODULEDIR, + help="Ansible modules/ directory") + p.add_option("-l", "--list", + action="store_true", + default=False, + dest='list_dir', + help='List available modules') + p.add_option("-s", "--snippet", + action="store_true", + default=False, + dest='show_snippet', + help='Show playbook snippet for specified module(s)') + p.add_option('-v', action='version', help='Show version number and exit') + + (options, args) = p.parse_args() + + if options.module_path is not None: + for i in options.module_path.split(os.pathsep): + utils.plugins.module_finder.add_directory(i) + + if options.list_dir: + # list modules + paths = utils.plugins.module_finder._get_paths() + module_list = [] + for path in paths: + find_modules(path, module_list) + + pager(get_module_list_text(module_list)) + sys.exit() + + if len(args) == 0: + p.print_help() + + def print_paths(finder): + ''' Returns a string suitable for printing of the search path ''' + + # Uses a list to get the order right + ret = [] + for i in finder._get_paths(): + if i not in ret: + ret.append(i) + return os.pathsep.join(ret) + + text = '' + for module in args: + + filename = utils.plugins.module_finder.find_plugin(module) + if filename is None: + sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder))) + continue + + if any(filename.endswith(x) for x in BLACKLIST_EXTS): + continue + + try: + doc, plainexamples, returndocs = module_docs.get_docstring(filename) + except: + traceback.print_exc() + sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) + continue + + if doc is not None: + + all_keys = [] + for (k,v) in doc['options'].iteritems(): + all_keys.append(k) + all_keys = sorted(all_keys) + doc['option_keys'] = all_keys + + doc['filename'] = filename + doc['docuri'] = doc['module'].replace('_', '-') + doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') + doc['plainexamples'] = plainexamples + doc['returndocs'] = returndocs + + if options.show_snippet: + text += get_snippet_text(doc) + else: + text += get_man_text(doc) + else: + # this typically means we couldn't even parse the docstring, not just that the YAML is busted, + # probably a quoting issue. + sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module) + pager(text) + +if __name__ == '__main__': + main() diff --git a/v1/bin/ansible-galaxy b/v1/bin/ansible-galaxy new file mode 100755 index 00000000000000..a6d625671ec548 --- /dev/null +++ b/v1/bin/ansible-galaxy @@ -0,0 +1,957 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2013, James Cammarata +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## + +import datetime +import json +import os +import os.path +import shutil +import subprocess +import sys +import tarfile +import tempfile +import urllib +import urllib2 +import yaml + +from collections import defaultdict +from distutils.version import LooseVersion +from jinja2 import Environment +from optparse import OptionParser + +import ansible.constants as C +import ansible.utils +from ansible.errors import AnsibleError + +default_meta_template = """--- +galaxy_info: + author: {{ author }} + description: {{description}} + company: {{ company }} + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: {{ issue_tracker_url }} + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: {{ license }} + min_ansible_version: {{ min_ansible_version }} + # + # Below are all platforms currently available. Just uncomment + # the ones that apply to your role. If you don't see your + # platform on this list, let us know and we'll get it added! + # + #platforms: + {%- for platform,versions in platforms.iteritems() %} + #- name: {{ platform }} + # versions: + # - all + {%- for version in versions %} + # - {{ version }} + {%- endfor %} + {%- endfor %} + # + # Below are all categories currently available. Just as with + # the platforms above, uncomment those that apply to your role. + # + #categories: + {%- for category in categories %} + #- {{ category.name }} + {%- endfor %} +dependencies: [] + # List your role dependencies here, one per line. + # Be sure to remove the '[]' above if you add dependencies + # to this list. + {% for dependency in dependencies %} + #- {{ dependency }} + {% endfor %} + +""" + +default_readme_template = """Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). +""" + +#------------------------------------------------------------------------------------- +# Utility functions for parsing actions/options +#------------------------------------------------------------------------------------- + +VALID_ACTIONS = ("init", "info", "install", "list", "remove") +SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) + +def get_action(args): + """ + Get the action the user wants to execute from the + sys argv list. + """ + for i in range(0,len(args)): + arg = args[i] + if arg in VALID_ACTIONS: + del args[i] + return arg + return None + +def build_option_parser(action): + """ + Builds an option parser object based on the action + the user wants to execute. + """ + + usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS) + epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) + OptionParser.format_epilog = lambda self, formatter: self.epilog + parser = OptionParser(usage=usage, epilog=epilog) + + if not action: + parser.print_help() + sys.exit() + + # options for all actions + # - none yet + + # options specific to actions + if action == "info": + parser.set_usage("usage: %prog info [options] role_name[,version]") + elif action == "init": + parser.set_usage("usage: %prog init [options] role_name") + parser.add_option( + '-p', '--init-path', dest='init_path', default="./", + help='The path in which the skeleton role will be created. ' + 'The default is the current working directory.') + parser.add_option( + '--offline', dest='offline', default=False, action='store_true', + help="Don't query the galaxy API when creating roles") + elif action == "install": + parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") + parser.add_option( + '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, + help='Ignore errors and continue with the next specified role.') + parser.add_option( + '-n', '--no-deps', dest='no_deps', action='store_true', default=False, + help='Don\'t download roles listed as dependencies') + parser.add_option( + '-r', '--role-file', dest='role_file', + help='A file containing a list of roles to be imported') + elif action == "remove": + parser.set_usage("usage: %prog remove role1 role2 ...") + elif action == "list": + parser.set_usage("usage: %prog list [role_name]") + + # options that apply to more than one action + if action != "init": + parser.add_option( + '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, + help='The path to the directory containing your roles. ' + 'The default is the roles_path configured in your ' + 'ansible.cfg file (/etc/ansible/roles if not configured)') + + if action in ("info","init","install"): + parser.add_option( + '-s', '--server', dest='api_server', default="galaxy.ansible.com", + help='The API server destination') + + if action in ("init","install"): + parser.add_option( + '-f', '--force', dest='force', action='store_true', default=False, + help='Force overwriting an existing role') + # done, return the parser + return parser + +def get_opt(options, k, defval=""): + """ + Returns an option from an Optparse values instance. + """ + try: + data = getattr(options, k) + except: + return defval + if k == "roles_path": + if os.pathsep in data: + data = data.split(os.pathsep)[0] + return data + +def exit_without_ignore(options, rc=1): + """ + Exits with the specified return code unless the + option --ignore-errors was specified + """ + + if not get_opt(options, "ignore_errors", False): + print '- you can use --ignore-errors to skip failed roles.' + sys.exit(rc) + + +#------------------------------------------------------------------------------------- +# Galaxy API functions +#------------------------------------------------------------------------------------- + +def api_get_config(api_server): + """ + Fetches the Galaxy API current version to ensure + the API server is up and reachable. + """ + + try: + url = 'https://%s/api/' % api_server + data = json.load(urllib2.urlopen(url)) + if not data.get("current_version",None): + return None + else: + return data + except: + return None + +def api_lookup_role_by_name(api_server, role_name, notify=True): + """ + Uses the Galaxy API to do a lookup on the role owner/name. + """ + + role_name = urllib.quote(role_name) + + try: + parts = role_name.split(".") + user_name = ".".join(parts[0:-1]) + role_name = parts[-1] + if notify: + print "- downloading role '%s', owned by %s" % (role_name, user_name) + except: + parser.print_help() + print "- invalid role name (%s). Specify role as format: username.rolename" % role_name + sys.exit(1) + + url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name) + try: + data = json.load(urllib2.urlopen(url)) + if len(data["results"]) == 0: + return None + else: + return data["results"][0] + except: + return None + +def api_fetch_role_related(api_server, related, role_id): + """ + Uses the Galaxy API to fetch the list of related items for + the given role. The url comes from the 'related' field of + the role. + """ + + try: + url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related) + data = json.load(urllib2.urlopen(url)) + results = data['results'] + done = (data.get('next', None) == None) + while not done: + url = 'https://%s%s' % (api_server, data['next']) + print url + data = json.load(urllib2.urlopen(url)) + results += data['results'] + done = (data.get('next', None) == None) + return results + except: + return None + +def api_get_list(api_server, what): + """ + Uses the Galaxy API to fetch the list of items specified. + """ + + try: + url = 'https://%s/api/v1/%s/?page_size' % (api_server, what) + data = json.load(urllib2.urlopen(url)) + if "results" in data: + results = data['results'] + else: + results = data + done = True + if "next" in data: + done = (data.get('next', None) == None) + while not done: + url = 'https://%s%s' % (api_server, data['next']) + print url + data = json.load(urllib2.urlopen(url)) + results += data['results'] + done = (data.get('next', None) == None) + return results + except: + print "- failed to download the %s list" % what + return None + +#------------------------------------------------------------------------------------- +# scm repo utility functions +#------------------------------------------------------------------------------------- + +def scm_archive_role(scm, role_url, role_version, role_name): + if scm not in ['hg', 'git']: + print "- scm %s is not currently supported" % scm + return False + tempdir = tempfile.mkdtemp() + clone_cmd = [scm, 'clone', role_url, role_name] + with open('/dev/null', 'w') as devnull: + try: + print "- executing: %s" % " ".join(clone_cmd) + popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull) + except: + raise AnsibleError("error executing: %s" % " ".join(clone_cmd)) + rc = popen.wait() + if rc != 0: + print "- command %s failed" % ' '.join(clone_cmd) + print " in directory %s" % tempdir + return False + + temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar') + if scm == 'hg': + archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name] + if role_version: + archive_cmd.extend(['-r', role_version]) + archive_cmd.append(temp_file.name) + if scm == 'git': + archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name] + if role_version: + archive_cmd.append(role_version) + else: + archive_cmd.append('HEAD') + + with open('/dev/null', 'w') as devnull: + print "- executing: %s" % " ".join(archive_cmd) + popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name), + stderr=devnull, stdout=devnull) + rc = popen.wait() + if rc != 0: + print "- command %s failed" % ' '.join(archive_cmd) + print " in directory %s" % tempdir + return False + + shutil.rmtree(tempdir, ignore_errors=True) + + return temp_file.name + + +#------------------------------------------------------------------------------------- +# Role utility functions +#------------------------------------------------------------------------------------- + +def get_role_path(role_name, options): + """ + Returns the role path based on the roles_path option + and the role name. + """ + roles_path = get_opt(options,'roles_path') + roles_path = os.path.join(roles_path, role_name) + roles_path = os.path.expanduser(roles_path) + return roles_path + +def get_role_metadata(role_name, options): + """ + Returns the metadata as YAML, if the file 'meta/main.yml' + exists in the specified role_path + """ + role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml') + try: + if os.path.isfile(role_path): + f = open(role_path, 'r') + meta_data = yaml.safe_load(f) + f.close() + return meta_data + else: + return None + except: + return None + +def get_galaxy_install_info(role_name, options): + """ + Returns the YAML data contained in 'meta/.galaxy_install_info', + if it exists. + """ + + try: + info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') + if os.path.isfile(info_path): + f = open(info_path, 'r') + info_data = yaml.safe_load(f) + f.close() + return info_data + else: + return None + except: + return None + +def write_galaxy_install_info(role_name, role_version, options): + """ + Writes a YAML-formatted file to the role's meta/ directory + (named .galaxy_install_info) which contains some information + we can use later for commands like 'list' and 'info'. + """ + + info = dict( + version = role_version, + install_date = datetime.datetime.utcnow().strftime("%c"), + ) + try: + info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') + f = open(info_path, 'w+') + info_data = yaml.safe_dump(info, f) + f.close() + except: + return False + return True + + +def remove_role(role_name, options): + """ + Removes the specified role from the roles path. There is a + sanity check to make sure there's a meta/main.yml file at this + path so the user doesn't blow away random directories + """ + if get_role_metadata(role_name, options): + role_path = get_role_path(role_name, options) + shutil.rmtree(role_path) + return True + else: + return False + +def fetch_role(role_name, target, role_data, options): + """ + Downloads the archived role from github to a temp location, extracts + it, and then copies the extracted role to the role library path. + """ + + # first grab the file and save it to a temp location + if '://' in role_name: + archive_url = role_name + else: + archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target) + print "- downloading role from %s" % archive_url + + try: + url_file = urllib2.urlopen(archive_url) + temp_file = tempfile.NamedTemporaryFile(delete=False) + data = url_file.read() + while data: + temp_file.write(data) + data = url_file.read() + temp_file.close() + return temp_file.name + except Exception, e: + # TODO: better urllib2 error handling for error + # messages that are more exact + print "- error: failed to download the file." + return False + +def install_role(role_name, role_version, role_filename, options): + # the file is a tar, so open it that way and extract it + # to the specified (or default) roles directory + + if not tarfile.is_tarfile(role_filename): + print "- error: the file downloaded was not a tar.gz" + return False + else: + if role_filename.endswith('.gz'): + role_tar_file = tarfile.open(role_filename, "r:gz") + else: + role_tar_file = tarfile.open(role_filename, "r") + # verify the role's meta file + meta_file = None + members = role_tar_file.getmembers() + # next find the metadata file + for member in members: + if "/meta/main.yml" in member.name: + meta_file = member + break + if not meta_file: + print "- error: this role does not appear to have a meta/main.yml file." + return False + else: + try: + meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file)) + except: + print "- error: this role does not appear to have a valid meta/main.yml file." + return False + + # we strip off the top-level directory for all of the files contained within + # the tar file here, since the default is 'github_repo-target', and change it + # to the specified role's name + role_path = os.path.join(get_opt(options, 'roles_path'), role_name) + role_path = os.path.expanduser(role_path) + print "- extracting %s to %s" % (role_name, role_path) + try: + if os.path.exists(role_path): + if not os.path.isdir(role_path): + print "- error: the specified roles path exists and is not a directory." + return False + elif not get_opt(options, "force", False): + print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name + return False + else: + # using --force, remove the old path + if not remove_role(role_name, options): + print "- error: %s doesn't appear to contain a role." % role_path + print " please remove this directory manually if you really want to put the role here." + return False + else: + os.makedirs(role_path) + + # now we do the actual extraction to the role_path + for member in members: + # we only extract files, and remove any relative path + # bits that might be in the file for security purposes + # and drop the leading directory, as mentioned above + if member.isreg() or member.issym(): + parts = member.name.split("/")[1:] + final_parts = [] + for part in parts: + if part != '..' and '~' not in part and '$' not in part: + final_parts.append(part) + member.name = os.path.join(*final_parts) + role_tar_file.extract(member, role_path) + + # write out the install info file for later use + write_galaxy_install_info(role_name, role_version, options) + except OSError, e: + print "- error: you do not have permission to modify files in %s" % role_path + return False + + # return the parsed yaml metadata + print "- %s was installed successfully" % role_name + return meta_file_data + +#------------------------------------------------------------------------------------- +# Action functions +#------------------------------------------------------------------------------------- + +def execute_init(args, options, parser): + """ + Executes the init action, which creates the skeleton framework + of a role that complies with the galaxy metadata format. + """ + + init_path = get_opt(options, 'init_path', './') + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + force = get_opt(options, 'force', False) + offline = get_opt(options, 'offline', False) + + if not offline: + api_config = api_get_config(api_server) + if not api_config: + print "- the API server (%s) is not responding, please try again later." % api_server + sys.exit(1) + + try: + role_name = args.pop(0).strip() + if role_name == "": + raise Exception("") + role_path = os.path.join(init_path, role_name) + if os.path.exists(role_path): + if os.path.isfile(role_path): + print "- the path %s already exists, but is a file - aborting" % role_path + sys.exit(1) + elif not force: + print "- the directory %s already exists." % role_path + print " you can use --force to re-initialize this directory,\n" + \ + " however it will reset any main.yml files that may have\n" + \ + " been modified there already." + sys.exit(1) + except Exception, e: + parser.print_help() + print "- no role name specified for init" + sys.exit(1) + + ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') + + # create the default README.md + if not os.path.exists(role_path): + os.makedirs(role_path) + readme_path = os.path.join(role_path, "README.md") + f = open(readme_path, "wb") + f.write(default_readme_template) + f.close + + for dir in ROLE_DIRS: + dir_path = os.path.join(init_path, role_name, dir) + main_yml_path = os.path.join(dir_path, 'main.yml') + # create the directory if it doesn't exist already + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + # now create the main.yml file for that directory + if dir == "meta": + # create a skeleton meta/main.yml with a valid galaxy_info + # datastructure in place, plus with all of the available + # tags/platforms included (but commented out) and the + # dependencies section + platforms = [] + if not offline: + platforms = api_get_list(api_server, "platforms") or [] + categories = [] + if not offline: + categories = api_get_list(api_server, "categories") or [] + + # group the list of platforms from the api based + # on their names, with the release field being + # appended to a list of versions + platform_groups = defaultdict(list) + for platform in platforms: + platform_groups[platform['name']].append(platform['release']) + platform_groups[platform['name']].sort() + + inject = dict( + author = 'your name', + company = 'your company (optional)', + license = 'license (GPLv2, CC-BY, etc)', + issue_tracker_url = 'http://example.com/issue/tracker', + min_ansible_version = '1.2', + platforms = platform_groups, + categories = categories, + ) + rendered_meta = Environment().from_string(default_meta_template).render(inject) + f = open(main_yml_path, 'w') + f.write(rendered_meta) + f.close() + pass + elif dir not in ('files','templates'): + # just write a (mostly) empty YAML file for main.yml + f = open(main_yml_path, 'w') + f.write('---\n# %s file for %s\n' % (dir,role_name)) + f.close() + print "- %s was created successfully" % role_name + +def execute_info(args, options, parser): + """ + Executes the info action. This action prints out detailed + information about an installed role as well as info available + from the galaxy API. + """ + + if len(args) == 0: + # the user needs to specify a role + parser.print_help() + print "- you must specify a user/role name" + sys.exit(1) + + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + api_config = api_get_config(api_server) + roles_path = get_opt(options, "roles_path") + + for role in args: + + role_info = {} + + install_info = get_galaxy_install_info(role, options) + if install_info: + if 'version' in install_info: + install_info['intalled_version'] = install_info['version'] + del install_info['version'] + role_info.update(install_info) + + remote_data = api_lookup_role_by_name(api_server, role, False) + if remote_data: + role_info.update(remote_data) + + metadata = get_role_metadata(role, options) + if metadata: + role_info.update(metadata) + + role_spec = ansible.utils.role_spec_parse(role) + if role_spec: + role_info.update(role_spec) + + if role_info: + print "- %s:" % (role) + for k in sorted(role_info.keys()): + + if k in SKIP_INFO_KEYS: + continue + + if isinstance(role_info[k], dict): + print "\t%s: " % (k) + for key in sorted(role_info[k].keys()): + if key in SKIP_INFO_KEYS: + continue + print "\t\t%s: %s" % (key, role_info[k][key]) + else: + print "\t%s: %s" % (k, role_info[k]) + else: + print "- the role %s was not found" % role + +def execute_install(args, options, parser): + """ + Executes the installation action. The args list contains the + roles to be installed, unless -f was specified. The list of roles + can be a name (which will be downloaded via the galaxy API and github), + or it can be a local .tar.gz file. + """ + + role_file = get_opt(options, "role_file", None) + + if len(args) == 0 and role_file is None: + # the user needs to specify one of either --role-file + # or specify a single user/role name + parser.print_help() + print "- you must specify a user/role name or a roles file" + sys.exit() + elif len(args) == 1 and not role_file is None: + # using a role file is mutually exclusive of specifying + # the role name on the command line + parser.print_help() + print "- please specify a user/role name, or a roles file, but not both" + sys.exit(1) + + api_server = get_opt(options, "api_server", "galaxy.ansible.com") + no_deps = get_opt(options, "no_deps", False) + roles_path = get_opt(options, "roles_path") + + roles_done = [] + if role_file: + f = open(role_file, 'r') + if role_file.endswith('.yaml') or role_file.endswith('.yml'): + roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) + else: + # roles listed in a file, one per line + roles_left = map(ansible.utils.role_spec_parse, f.readlines()) + f.close() + else: + # roles were specified directly, so we'll just go out grab them + # (and their dependencies, unless the user doesn't want us to). + roles_left = map(ansible.utils.role_spec_parse, args) + + while len(roles_left) > 0: + # query the galaxy API for the role data + role_data = None + role = roles_left.pop(0) + role_src = role.get("src") + role_scm = role.get("scm") + role_path = role.get("path") + + if role_path: + options.roles_path = role_path + else: + options.roles_path = roles_path + + if os.path.isfile(role_src): + # installing a local tar.gz + tmp_file = role_src + else: + if role_scm: + # create tar file from scm url + tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name")) + elif '://' in role_src: + # just download a URL - version will probably be in the URL + tmp_file = fetch_role(role_src, None, None, options) + else: + # installing from galaxy + api_config = api_get_config(api_server) + if not api_config: + print "- the API server (%s) is not responding, please try again later." % api_server + sys.exit(1) + + role_data = api_lookup_role_by_name(api_server, role_src) + if not role_data: + print "- sorry, %s was not found on %s." % (role_src, api_server) + exit_without_ignore(options) + continue + + role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) + if "version" not in role or role['version'] == '': + # convert the version names to LooseVersion objects + # and sort them to get the latest version. If there + # are no versions in the list, we'll grab the head + # of the master branch + if len(role_versions) > 0: + loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] + loose_versions.sort() + role["version"] = str(loose_versions[-1]) + else: + role["version"] = 'master' + elif role['version'] != 'master': + if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]: + print 'role is %s' % role + print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions) + exit_without_ignore(options) + continue + + # download the role. if --no-deps was specified, we stop here, + # otherwise we recursively grab roles and all of their deps. + tmp_file = fetch_role(role_src, role["version"], role_data, options) + installed = False + if tmp_file: + installed = install_role(role.get("name"), role.get("version"), tmp_file, options) + # we're done with the temp file, clean it up + if tmp_file != role_src: + os.unlink(tmp_file) + # install dependencies, if we want them + if not no_deps and installed: + if not role_data: + role_data = get_role_metadata(role.get("name"), options) + role_dependencies = role_data['dependencies'] + else: + role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) + for dep in role_dependencies: + if isinstance(dep, basestring): + dep = ansible.utils.role_spec_parse(dep) + else: + dep = ansible.utils.role_yaml_parse(dep) + if not get_role_metadata(dep["name"], options): + if dep not in roles_left: + print '- adding dependency: %s' % dep["name"] + roles_left.append(dep) + else: + print '- dependency %s already pending installation.' % dep["name"] + else: + print '- dependency %s is already installed, skipping.' % dep["name"] + if not tmp_file or not installed: + print "- %s was NOT installed successfully." % role.get("name") + exit_without_ignore(options) + sys.exit(0) + +def execute_remove(args, options, parser): + """ + Executes the remove action. The args list contains the list + of roles to be removed. This list can contain more than one role. + """ + + if len(args) == 0: + parser.print_help() + print '- you must specify at least one role to remove.' + sys.exit() + + for role in args: + if get_role_metadata(role, options): + if remove_role(role, options): + print '- successfully removed %s' % role + else: + print "- failed to remove role: %s" % role + else: + print '- %s is not installed, skipping.' % role + sys.exit(0) + +def execute_list(args, options, parser): + """ + Executes the list action. The args list can contain zero + or one role. If one is specified, only that role will be + shown, otherwise all roles in the specified directory will + be shown. + """ + + if len(args) > 1: + print "- please specify only one role to list, or specify no roles to see a full list" + sys.exit(1) + + if len(args) == 1: + # show only the request role, if it exists + role_name = args[0] + metadata = get_role_metadata(role_name, options) + if metadata: + install_info = get_galaxy_install_info(role_name, options) + version = None + if install_info: + version = install_info.get("version", None) + if not version: + version = "(unknown version)" + # show some more info about single roles here + print "- %s, %s" % (role_name, version) + else: + print "- the role %s was not found" % role_name + else: + # show all valid roles in the roles_path directory + roles_path = get_opt(options, 'roles_path') + roles_path = os.path.expanduser(roles_path) + if not os.path.exists(roles_path): + parser.print_help() + print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path + sys.exit(1) + elif not os.path.isdir(roles_path): + print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path + parser.print_help() + sys.exit(1) + path_files = os.listdir(roles_path) + for path_file in path_files: + if get_role_metadata(path_file, options): + install_info = get_galaxy_install_info(path_file, options) + version = None + if install_info: + version = install_info.get("version", None) + if not version: + version = "(unknown version)" + print "- %s, %s" % (path_file, version) + sys.exit(0) + +#------------------------------------------------------------------------------------- +# The main entry point +#------------------------------------------------------------------------------------- + +def main(): + # parse the CLI options + action = get_action(sys.argv) + parser = build_option_parser(action) + (options, args) = parser.parse_args() + + # execute the desired action + if 1: #try: + fn = globals()["execute_%s" % action] + fn(args, options, parser) + #except KeyError, e: + # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS)) + # sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/v1/bin/ansible-playbook b/v1/bin/ansible-playbook new file mode 100755 index 00000000000000..3d6e1f9f4029de --- /dev/null +++ b/v1/bin/ansible-playbook @@ -0,0 +1,330 @@ +#!/usr/bin/env python +# (C) 2012, Michael DeHaan, + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +####################################################### + +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + +import sys +import os +import stat + +# Augment PYTHONPATH to find Python modules relative to this file path +# This is so that we can find the modules when running from a local checkout +# installed as editable with `pip install -e ...` or `python setup.py develop` +local_module_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..', 'lib') +) +sys.path.append(local_module_path) + +import ansible.playbook +import ansible.constants as C +import ansible.utils.template +from ansible import errors +from ansible import callbacks +from ansible import utils +from ansible.color import ANSIBLE_COLOR, stringc +from ansible.callbacks import display + +def colorize(lead, num, color): + """ Print 'lead' = 'num' in 'color' """ + if num != 0 and ANSIBLE_COLOR and color is not None: + return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color)) + else: + return "%s=%-4s" % (lead, str(num)) + +def hostcolor(host, stats, color=True): + if ANSIBLE_COLOR and color: + if stats['failures'] != 0 or stats['unreachable'] != 0: + return "%-37s" % stringc(host, 'red') + elif stats['changed'] != 0: + return "%-37s" % stringc(host, 'yellow') + else: + return "%-37s" % stringc(host, 'green') + return "%-26s" % host + + +def main(args): + ''' run ansible-playbook operations ''' + + # create parser for CLI options + parser = utils.base_parser( + constants=C, + usage = "%prog playbook.yml", + connect_opts=True, + runas_opts=True, + subset_opts=True, + check_opts=True, + diff_opts=True + ) + #parser.add_option('--vault-password', dest="vault_password", + # help="password for vault encrypted files") + parser.add_option('-t', '--tags', dest='tags', default='all', + help="only run plays and tasks tagged with these values") + parser.add_option('--skip-tags', dest='skip_tags', + help="only run plays and tasks whose tags do not match these values") + parser.add_option('--syntax-check', dest='syntax', action='store_true', + help="perform a syntax check on the playbook, but do not execute it") + parser.add_option('--list-tasks', dest='listtasks', action='store_true', + help="list all tasks that would be executed") + parser.add_option('--list-tags', dest='listtags', action='store_true', + help="list all available tags") + parser.add_option('--step', dest='step', action='store_true', + help="one-step-at-a-time: confirm each task before running") + parser.add_option('--start-at-task', dest='start_at', + help="start the playbook at the task matching this name") + parser.add_option('--force-handlers', dest='force_handlers', + default=C.DEFAULT_FORCE_HANDLERS, action='store_true', + help="run handlers even if a task fails") + parser.add_option('--flush-cache', dest='flush_cache', action='store_true', + help="clear the fact cache") + + options, args = parser.parse_args(args) + + if len(args) == 0: + parser.print_help(file=sys.stderr) + return 1 + + # privlege escalation command line arguments need to be mutually exclusive + utils.check_mutually_exclusive_privilege(options, parser) + + if (options.ask_vault_pass and options.vault_password_file): + parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") + + sshpass = None + becomepass = None + vault_pass = None + + options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS + + if options.listhosts or options.syntax or options.listtasks or options.listtags: + (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass) + else: + options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS + # Never ask for an SSH password when we run with local connection + if options.connection == "local": + options.ask_pass = False + + # set pe options + utils.normalize_become_options(options) + prompt_method = utils.choose_pass_prompt(options) + (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, + become_ask_pass=options.become_ask_pass, + ask_vault_pass=options.ask_vault_pass, + become_method=prompt_method) + + # read vault_pass from a file + if not options.ask_vault_pass and options.vault_password_file: + vault_pass = utils.read_vault_file(options.vault_password_file) + + extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) + + only_tags = options.tags.split(",") + skip_tags = options.skip_tags + if options.skip_tags is not None: + skip_tags = options.skip_tags.split(",") + + for playbook in args: + if not os.path.exists(playbook): + raise errors.AnsibleError("the playbook: %s could not be found" % playbook) + if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): + raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook) + + inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass) + + # Note: slightly wrong, this is written so that implicit localhost + # (which is not returned in list_hosts()) is taken into account for + # warning if inventory is empty. But it can't be taken into account for + # checking if limit doesn't match any hosts. Instead we don't worry about + # limit if only implicit localhost was in inventory to start with. + # + # Fix this in v2 + no_hosts = False + if len(inventory.list_hosts()) == 0: + # Empty inventory + utils.warning("provided hosts list is empty, only localhost is available") + no_hosts = True + inventory.subset(options.subset) + if len(inventory.list_hosts()) == 0 and no_hosts is False: + # Invalid limit + raise errors.AnsibleError("Specified --limit does not match any hosts") + + # run all playbooks specified on the command line + for playbook in args: + + stats = callbacks.AggregateStats() + playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY) + if options.step: + playbook_cb.step = options.step + if options.start_at: + playbook_cb.start_at = options.start_at + runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY) + + pb = ansible.playbook.PlayBook( + playbook=playbook, + module_path=options.module_path, + inventory=inventory, + forks=options.forks, + remote_user=options.remote_user, + remote_pass=sshpass, + callbacks=playbook_cb, + runner_callbacks=runner_cb, + stats=stats, + timeout=options.timeout, + transport=options.connection, + become=options.become, + become_method=options.become_method, + become_user=options.become_user, + become_pass=becomepass, + extra_vars=extra_vars, + private_key_file=options.private_key_file, + only_tags=only_tags, + skip_tags=skip_tags, + check=options.check, + diff=options.diff, + vault_password=vault_pass, + force_handlers=options.force_handlers, + ) + + if options.flush_cache: + display(callbacks.banner("FLUSHING FACT CACHE")) + pb.SETUP_CACHE.flush() + + if options.listhosts or options.listtasks or options.syntax or options.listtags: + print '' + print 'playbook: %s' % playbook + print '' + playnum = 0 + for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs): + playnum += 1 + play = ansible.playbook.Play(pb, play_ds, play_basedir, + vault_password=pb.vault_password) + label = play.name + hosts = pb.inventory.list_hosts(play.hosts) + + if options.listhosts: + print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts)) + for host in hosts: + print ' %s' % host + + if options.listtags or options.listtasks: + print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags)))) + + if options.listtags: + tags = [] + for task in pb.tasks_to_run_in_play(play): + tags.extend(task.tags) + print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged'])))) + + if options.listtasks: + + for task in pb.tasks_to_run_in_play(play): + if getattr(task, 'name', None) is not None: + # meta tasks have no names + print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged'])))) + + if options.listhosts or options.listtasks or options.listtags: + print '' + continue + + if options.syntax: + # if we've not exited by now then we are fine. + print 'Playbook Syntax is fine' + return 0 + + failed_hosts = [] + unreachable_hosts = [] + + try: + + pb.run() + + hosts = sorted(pb.stats.processed.keys()) + display(callbacks.banner("PLAY RECAP")) + playbook_cb.on_stats(pb.stats) + + for h in hosts: + t = pb.stats.summarize(h) + if t['failures'] > 0: + failed_hosts.append(h) + if t['unreachable'] > 0: + unreachable_hosts.append(h) + + retries = failed_hosts + unreachable_hosts + + if C.RETRY_FILES_ENABLED and len(retries) > 0: + filename = pb.generate_retry_inventory(retries) + if filename: + display(" to retry, use: --limit @%s\n" % filename) + + for h in hosts: + t = pb.stats.summarize(h) + + display("%s : %s %s %s %s" % ( + hostcolor(h, t), + colorize('ok', t['ok'], 'green'), + colorize('changed', t['changed'], 'yellow'), + colorize('unreachable', t['unreachable'], 'red'), + colorize('failed', t['failures'], 'red')), + screen_only=True + ) + + display("%s : %s %s %s %s" % ( + hostcolor(h, t, False), + colorize('ok', t['ok'], None), + colorize('changed', t['changed'], None), + colorize('unreachable', t['unreachable'], None), + colorize('failed', t['failures'], None)), + log_only=True + ) + + + print "" + if len(failed_hosts) > 0: + return 2 + if len(unreachable_hosts) > 0: + return 3 + + except errors.AnsibleError, e: + display("ERROR: %s" % e, color='red') + return 1 + + return 0 + + +if __name__ == "__main__": + display(" ", log_only=True) + display(" ".join(sys.argv), log_only=True) + display(" ", log_only=True) + try: + sys.exit(main(sys.argv[1:])) + except errors.AnsibleError, e: + display("ERROR: %s" % e, color='red', stderr=True) + sys.exit(1) + except KeyboardInterrupt, ke: + display("ERROR: interrupted", color='red', stderr=True) + sys.exit(1) diff --git a/v1/bin/ansible-pull b/v1/bin/ansible-pull new file mode 100755 index 00000000000000..d4887631e0fdfb --- /dev/null +++ b/v1/bin/ansible-pull @@ -0,0 +1,257 @@ +#!/usr/bin/env python + +# (c) 2012, Stephen Fromm +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-pull is a script that runs ansible in local mode +# after checking out a playbooks directory from source repo. There is an +# example playbook to bootstrap this script in the examples/ dir which +# installs ansible and sets it up to run on cron. + +# usage: +# ansible-pull -d /var/lib/ansible \ +# -U http://example.net/content.git [-C production] \ +# [path/playbook.yml] +# +# the -d and -U arguments are required; the -C argument is optional. +# +# ansible-pull accepts an optional argument to specify a playbook +# location underneath the workdir and then searches the source repo +# for playbooks in the following order, stopping at the first match: +# +# 1. $workdir/path/playbook.yml, if specified +# 2. $workdir/$fqdn.yml +# 3. $workdir/$hostname.yml +# 4. $workdir/local.yml +# +# the source repo must contain at least one of these playbooks. + +import os +import shutil +import sys +import datetime +import socket +import random +import time +from ansible import utils +from ansible.utils import cmd_functions +from ansible import errors +from ansible import inventory + +DEFAULT_REPO_TYPE = 'git' +DEFAULT_PLAYBOOK = 'local.yml' +PLAYBOOK_ERRORS = {1: 'File does not exist', + 2: 'File is not readable'} + +VERBOSITY=0 + +def increment_debug(option, opt, value, parser): + global VERBOSITY + VERBOSITY += 1 + +def try_playbook(path): + if not os.path.exists(path): + return 1 + if not os.access(path, os.R_OK): + return 2 + return 0 + + +def select_playbook(path, args): + playbook = None + if len(args) > 0 and args[0] is not None: + playbook = "%s/%s" % (path, args[0]) + rc = try_playbook(playbook) + if rc != 0: + print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc]) + return None + return playbook + else: + fqdn = socket.getfqdn() + hostpb = "%s/%s.yml" % (path, fqdn) + shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0]) + localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK) + errors = [] + for pb in [hostpb, shorthostpb, localpb]: + rc = try_playbook(pb) + if rc == 0: + playbook = pb + break + else: + errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc])) + if playbook is None: + print >>sys.stderr, "\n".join(errors) + return playbook + + +def main(args): + """ Set up and run a local playbook """ + usage = "%prog [options] [playbook.yml]" + parser = utils.SortedOptParser(usage=usage) + parser.add_option('--purge', default=False, action='store_true', + help='purge checkout after playbook run') + parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', + help='only run the playbook if the repository has been updated') + parser.add_option('-s', '--sleep', dest='sleep', default=None, + help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests') + parser.add_option('-f', '--force', dest='force', default=False, + action='store_true', + help='run the playbook even if the repository could ' + 'not be updated') + parser.add_option('-d', '--directory', dest='dest', default=None, + help='directory to checkout repository to') + #parser.add_option('-l', '--live', default=True, action='store_live', + # help='Print the ansible-playbook output while running') + parser.add_option('-U', '--url', dest='url', default=None, + help='URL of the playbook repository') + parser.add_option('-C', '--checkout', dest='checkout', + help='branch/tag/commit to checkout. ' + 'Defaults to behavior of repository module.') + parser.add_option('-i', '--inventory-file', dest='inventory', + help="location of the inventory host file") + parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", + help="set additional variables as key=value or YAML/JSON", default=[]) + parser.add_option('-v', '--verbose', default=False, action="callback", + callback=increment_debug, + help='Pass -vvvv to ansible-playbook') + parser.add_option('-m', '--module-name', dest='module_name', + default=DEFAULT_REPO_TYPE, + help='Module name used to check out repository. ' + 'Default is %s.' % DEFAULT_REPO_TYPE) + parser.add_option('--vault-password-file', dest='vault_password_file', + help="vault password file") + parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password') + parser.add_option('-t', '--tags', dest='tags', default=False, + help='only run plays and tasks tagged with these values') + parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', + help='adds the hostkey for the repo url if not already added') + parser.add_option('--key-file', dest='key_file', + help="Pass '-i ' to the SSH arguments used by git.") + options, args = parser.parse_args(args) + + hostname = socket.getfqdn() + if not options.dest: + # use a hostname dependent directory, in case of $HOME on nfs + options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname) + + options.dest = os.path.abspath(options.dest) + + if not options.url: + parser.error("URL for repository not specified, use -h for help") + return 1 + + now = datetime.datetime.now() + print now.strftime("Starting ansible-pull at %F %T") + + # Attempt to use the inventory passed in as an argument + # It might not yet have been downloaded so use localhost if note + if not options.inventory or not os.path.exists(options.inventory): + inv_opts = 'localhost,' + else: + inv_opts = options.inventory + limit_opts = 'localhost:%s:127.0.0.1' % hostname + repo_opts = "name=%s dest=%s" % (options.url, options.dest) + + if VERBOSITY == 0: + base_opts = '-c local --limit "%s"' % limit_opts + elif VERBOSITY > 0: + debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ]) + base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts) + + if options.checkout: + repo_opts += ' version=%s' % options.checkout + + # Only git module is supported + if options.module_name == DEFAULT_REPO_TYPE: + if options.accept_host_key: + repo_opts += ' accept_hostkey=yes' + + if options.key_file: + repo_opts += ' key_file=%s' % options.key_file + + path = utils.plugins.module_finder.find_plugin(options.module_name) + if path is None: + sys.stderr.write("module '%s' not found.\n" % options.module_name) + return 1 + + bin_path = os.path.dirname(os.path.abspath(__file__)) + cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( + bin_path, inv_opts, base_opts, options.module_name, repo_opts + ) + + for ev in options.extra_vars: + cmd += ' -e "%s"' % ev + + if options.sleep: + try: + secs = random.randint(0,int(options.sleep)); + except ValueError: + parser.error("%s is not a number." % options.sleep) + return 1 + + print >>sys.stderr, "Sleeping for %d seconds..." % secs + time.sleep(secs); + + + # RUN THe CHECKOUT COMMAND + rc, out, err = cmd_functions.run_cmd(cmd, live=True) + + if rc != 0: + if options.force: + print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook." + else: + return rc + elif options.ifchanged and '"changed": true' not in out: + print "Repository has not changed, quitting." + return 0 + + playbook = select_playbook(options.dest, args) + + if playbook is None: + print >>sys.stderr, "Could not find a playbook to run." + return 1 + + cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook) + if options.vault_password_file: + cmd += " --vault-password-file=%s" % options.vault_password_file + if options.inventory: + cmd += ' -i "%s"' % options.inventory + for ev in options.extra_vars: + cmd += ' -e "%s"' % ev + if options.ask_sudo_pass: + cmd += ' -K' + if options.tags: + cmd += ' -t "%s"' % options.tags + os.chdir(options.dest) + + # RUN THE PLAYBOOK COMMAND + rc, out, err = cmd_functions.run_cmd(cmd, live=True) + + if options.purge: + os.chdir('/') + try: + shutil.rmtree(options.dest) + except Exception, e: + print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e)) + + return rc + +if __name__ == '__main__': + try: + sys.exit(main(sys.argv[1:])) + except KeyboardInterrupt, e: + print >>sys.stderr, "Exit on user request.\n" + sys.exit(1) diff --git a/v1/bin/ansible-vault b/v1/bin/ansible-vault new file mode 100755 index 00000000000000..22cfc0e14877af --- /dev/null +++ b/v1/bin/ansible-vault @@ -0,0 +1,241 @@ +#!/usr/bin/env python + +# (c) 2014, James Tanner +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# ansible-vault is a script that encrypts/decrypts YAML files. See +# http://docs.ansible.com/playbooks_vault.html for more details. + +__requires__ = ['ansible'] +try: + import pkg_resources +except Exception: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. But we + # have code that better expresses the errors in the places where the code + # is actually used (the deps are optional for many code paths) so we don't + # want to fail here. + pass + +import os +import sys +import traceback + +import ansible.constants as C + +from ansible import utils +from ansible import errors +from ansible.utils.vault import VaultEditor + +from optparse import OptionParser + +#------------------------------------------------------------------------------------- +# Utility functions for parsing actions/options +#------------------------------------------------------------------------------------- + +VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") + +def build_option_parser(action): + """ + Builds an option parser object based on the action + the user wants to execute. + """ + + usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS) + epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) + OptionParser.format_epilog = lambda self, formatter: self.epilog + parser = OptionParser(usage=usage, epilog=epilog) + + if not action: + parser.print_help() + sys.exit() + + # options for all actions + #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use") + parser.add_option('--debug', dest='debug', action="store_true", help="debug") + parser.add_option('--vault-password-file', dest='password_file', + help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE) + + # options specific to actions + if action == "create": + parser.set_usage("usage: %prog create [options] file_name") + elif action == "decrypt": + parser.set_usage("usage: %prog decrypt [options] file_name") + elif action == "edit": + parser.set_usage("usage: %prog edit [options] file_name") + elif action == "view": + parser.set_usage("usage: %prog view [options] file_name") + elif action == "encrypt": + parser.set_usage("usage: %prog encrypt [options] file_name") + elif action == "rekey": + parser.set_usage("usage: %prog rekey [options] file_name") + + # done, return the parser + return parser + +def get_action(args): + """ + Get the action the user wants to execute from the + sys argv list. + """ + for i in range(0,len(args)): + arg = args[i] + if arg in VALID_ACTIONS: + del args[i] + return arg + return None + +def get_opt(options, k, defval=""): + """ + Returns an option from an Optparse values instance. + """ + try: + data = getattr(options, k) + except: + return defval + if k == "roles_path": + if os.pathsep in data: + data = data.split(os.pathsep)[0] + return data + +#------------------------------------------------------------------------------------- +# Command functions +#------------------------------------------------------------------------------------- + +def execute_create(args, options, parser): + if len(args) > 1: + raise errors.AnsibleError("'create' does not accept more than one filename") + + if not options.password_file: + password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) + else: + password = utils.read_vault_file(options.password_file) + + cipher = 'AES256' + if hasattr(options, 'cipher'): + cipher = options.cipher + + this_editor = VaultEditor(cipher, password, args[0]) + this_editor.create_file() + +def execute_decrypt(args, options, parser): + + if not options.password_file: + password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) + else: + password = utils.read_vault_file(options.password_file) + + cipher = 'AES256' + if hasattr(options, 'cipher'): + cipher = options.cipher + + for f in args: + this_editor = VaultEditor(cipher, password, f) + this_editor.decrypt_file() + + print "Decryption successful" + +def execute_edit(args, options, parser): + + if len(args) > 1: + raise errors.AnsibleError("edit does not accept more than one filename") + + if not options.password_file: + password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) + else: + password = utils.read_vault_file(options.password_file) + + cipher = None + + for f in args: + this_editor = VaultEditor(cipher, password, f) + this_editor.edit_file() + +def execute_view(args, options, parser): + + if len(args) > 1: + raise errors.AnsibleError("view does not accept more than one filename") + + if not options.password_file: + password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) + else: + password = utils.read_vault_file(options.password_file) + + cipher = None + + for f in args: + this_editor = VaultEditor(cipher, password, f) + this_editor.view_file() + +def execute_encrypt(args, options, parser): + + if not options.password_file: + password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) + else: + password = utils.read_vault_file(options.password_file) + + cipher = 'AES256' + if hasattr(options, 'cipher'): + cipher = options.cipher + + for f in args: + this_editor = VaultEditor(cipher, password, f) + this_editor.encrypt_file() + + print "Encryption successful" + +def execute_rekey(args, options, parser): + + if not options.password_file: + password, __ = utils.ask_vault_passwords(ask_vault_pass=True) + else: + password = utils.read_vault_file(options.password_file) + + __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True) + + cipher = None + for f in args: + this_editor = VaultEditor(cipher, password, f) + this_editor.rekey_file(new_password) + + print "Rekey successful" + +#------------------------------------------------------------------------------------- +# MAIN +#------------------------------------------------------------------------------------- + +def main(): + + action = get_action(sys.argv) + parser = build_option_parser(action) + (options, args) = parser.parse_args() + + if not len(args): + raise errors.AnsibleError( + "The '%s' command requires a filename as the first argument" % action + ) + + # execute the desired action + try: + fn = globals()["execute_%s" % action] + fn(args, options, parser) + except Exception, err: + if options.debug: + print traceback.format_exc() + print "ERROR:",err + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/test/units/README.md b/v1/tests/README.md similarity index 100% rename from test/units/README.md rename to v1/tests/README.md diff --git a/test/units/TestConstants.py b/v1/tests/TestConstants.py similarity index 100% rename from test/units/TestConstants.py rename to v1/tests/TestConstants.py diff --git a/test/units/TestFilters.py b/v1/tests/TestFilters.py similarity index 100% rename from test/units/TestFilters.py rename to v1/tests/TestFilters.py diff --git a/test/units/TestInventory.py b/v1/tests/TestInventory.py similarity index 100% rename from test/units/TestInventory.py rename to v1/tests/TestInventory.py diff --git a/test/units/TestModuleUtilsBasic.py b/v1/tests/TestModuleUtilsBasic.py similarity index 100% rename from test/units/TestModuleUtilsBasic.py rename to v1/tests/TestModuleUtilsBasic.py diff --git a/test/units/TestModuleUtilsDatabase.py b/v1/tests/TestModuleUtilsDatabase.py similarity index 100% rename from test/units/TestModuleUtilsDatabase.py rename to v1/tests/TestModuleUtilsDatabase.py diff --git a/test/units/TestModules.py b/v1/tests/TestModules.py similarity index 100% rename from test/units/TestModules.py rename to v1/tests/TestModules.py diff --git a/test/units/TestPlayVarsFiles.py b/v1/tests/TestPlayVarsFiles.py similarity index 100% rename from test/units/TestPlayVarsFiles.py rename to v1/tests/TestPlayVarsFiles.py diff --git a/test/units/TestSynchronize.py b/v1/tests/TestSynchronize.py similarity index 100% rename from test/units/TestSynchronize.py rename to v1/tests/TestSynchronize.py diff --git a/test/units/TestUtils.py b/v1/tests/TestUtils.py similarity index 100% rename from test/units/TestUtils.py rename to v1/tests/TestUtils.py diff --git a/test/units/TestUtilsStringFunctions.py b/v1/tests/TestUtilsStringFunctions.py similarity index 100% rename from test/units/TestUtilsStringFunctions.py rename to v1/tests/TestUtilsStringFunctions.py diff --git a/test/units/TestVault.py b/v1/tests/TestVault.py similarity index 100% rename from test/units/TestVault.py rename to v1/tests/TestVault.py diff --git a/test/units/TestVaultEditor.py b/v1/tests/TestVaultEditor.py similarity index 100% rename from test/units/TestVaultEditor.py rename to v1/tests/TestVaultEditor.py diff --git a/test/units/ansible.cfg b/v1/tests/ansible.cfg similarity index 100% rename from test/units/ansible.cfg rename to v1/tests/ansible.cfg diff --git a/test/units/inventory_test_data/ansible_hosts b/v1/tests/inventory_test_data/ansible_hosts similarity index 100% rename from test/units/inventory_test_data/ansible_hosts rename to v1/tests/inventory_test_data/ansible_hosts diff --git a/test/units/inventory_test_data/broken.yml b/v1/tests/inventory_test_data/broken.yml similarity index 100% rename from test/units/inventory_test_data/broken.yml rename to v1/tests/inventory_test_data/broken.yml diff --git a/test/units/inventory_test_data/common_vars.yml b/v1/tests/inventory_test_data/common_vars.yml similarity index 100% rename from test/units/inventory_test_data/common_vars.yml rename to v1/tests/inventory_test_data/common_vars.yml diff --git a/test/units/inventory_test_data/complex_hosts b/v1/tests/inventory_test_data/complex_hosts similarity index 100% rename from test/units/inventory_test_data/complex_hosts rename to v1/tests/inventory_test_data/complex_hosts diff --git a/test/units/inventory_test_data/encrypted.yml b/v1/tests/inventory_test_data/encrypted.yml similarity index 100% rename from test/units/inventory_test_data/encrypted.yml rename to v1/tests/inventory_test_data/encrypted.yml diff --git a/test/units/inventory_test_data/hosts_list.yml b/v1/tests/inventory_test_data/hosts_list.yml similarity index 100% rename from test/units/inventory_test_data/hosts_list.yml rename to v1/tests/inventory_test_data/hosts_list.yml diff --git a/test/units/inventory_test_data/inventory/test_alpha_end_before_beg b/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg similarity index 100% rename from test/units/inventory_test_data/inventory/test_alpha_end_before_beg rename to v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg diff --git a/test/units/inventory_test_data/inventory/test_combined_range b/v1/tests/inventory_test_data/inventory/test_combined_range similarity index 100% rename from test/units/inventory_test_data/inventory/test_combined_range rename to v1/tests/inventory_test_data/inventory/test_combined_range diff --git a/test/units/inventory_test_data/inventory/test_incorrect_format b/v1/tests/inventory_test_data/inventory/test_incorrect_format similarity index 100% rename from test/units/inventory_test_data/inventory/test_incorrect_format rename to v1/tests/inventory_test_data/inventory/test_incorrect_format diff --git a/test/units/inventory_test_data/inventory/test_incorrect_range b/v1/tests/inventory_test_data/inventory/test_incorrect_range similarity index 100% rename from test/units/inventory_test_data/inventory/test_incorrect_range rename to v1/tests/inventory_test_data/inventory/test_incorrect_range diff --git a/test/units/inventory_test_data/inventory/test_leading_range b/v1/tests/inventory_test_data/inventory/test_leading_range similarity index 100% rename from test/units/inventory_test_data/inventory/test_leading_range rename to v1/tests/inventory_test_data/inventory/test_leading_range diff --git a/test/units/inventory_test_data/inventory/test_missing_end b/v1/tests/inventory_test_data/inventory/test_missing_end similarity index 100% rename from test/units/inventory_test_data/inventory/test_missing_end rename to v1/tests/inventory_test_data/inventory/test_missing_end diff --git a/test/units/inventory_test_data/inventory_api.py b/v1/tests/inventory_test_data/inventory_api.py similarity index 100% rename from test/units/inventory_test_data/inventory_api.py rename to v1/tests/inventory_test_data/inventory_api.py diff --git a/test/units/inventory_test_data/inventory_dir/0hosts b/v1/tests/inventory_test_data/inventory_dir/0hosts similarity index 100% rename from test/units/inventory_test_data/inventory_dir/0hosts rename to v1/tests/inventory_test_data/inventory_dir/0hosts diff --git a/test/units/inventory_test_data/inventory_dir/1mythology b/v1/tests/inventory_test_data/inventory_dir/1mythology similarity index 100% rename from test/units/inventory_test_data/inventory_dir/1mythology rename to v1/tests/inventory_test_data/inventory_dir/1mythology diff --git a/test/units/inventory_test_data/inventory_dir/2levels b/v1/tests/inventory_test_data/inventory_dir/2levels similarity index 100% rename from test/units/inventory_test_data/inventory_dir/2levels rename to v1/tests/inventory_test_data/inventory_dir/2levels diff --git a/test/units/inventory_test_data/inventory_dir/3comments b/v1/tests/inventory_test_data/inventory_dir/3comments similarity index 100% rename from test/units/inventory_test_data/inventory_dir/3comments rename to v1/tests/inventory_test_data/inventory_dir/3comments diff --git a/test/units/inventory_test_data/inventory_dir/4skip_extensions.ini b/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini similarity index 100% rename from test/units/inventory_test_data/inventory_dir/4skip_extensions.ini rename to v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini diff --git a/test/units/inventory_test_data/large_range b/v1/tests/inventory_test_data/large_range similarity index 100% rename from test/units/inventory_test_data/large_range rename to v1/tests/inventory_test_data/large_range diff --git a/test/units/inventory_test_data/restrict_pattern b/v1/tests/inventory_test_data/restrict_pattern similarity index 100% rename from test/units/inventory_test_data/restrict_pattern rename to v1/tests/inventory_test_data/restrict_pattern diff --git a/test/units/inventory_test_data/simple_hosts b/v1/tests/inventory_test_data/simple_hosts similarity index 100% rename from test/units/inventory_test_data/simple_hosts rename to v1/tests/inventory_test_data/simple_hosts diff --git a/test/units/module_tests/TestApt.py b/v1/tests/module_tests/TestApt.py similarity index 100% rename from test/units/module_tests/TestApt.py rename to v1/tests/module_tests/TestApt.py diff --git a/test/units/module_tests/TestDocker.py b/v1/tests/module_tests/TestDocker.py similarity index 100% rename from test/units/module_tests/TestDocker.py rename to v1/tests/module_tests/TestDocker.py diff --git a/test/units/vault_test_data/foo-ansible-1.0.yml b/v1/tests/vault_test_data/foo-ansible-1.0.yml similarity index 100% rename from test/units/vault_test_data/foo-ansible-1.0.yml rename to v1/tests/vault_test_data/foo-ansible-1.0.yml diff --git a/test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml b/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml similarity index 100% rename from test/units/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml rename to v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml diff --git a/test/units/vault_test_data/foo-ansible-1.1.yml b/v1/tests/vault_test_data/foo-ansible-1.1.yml similarity index 100% rename from test/units/vault_test_data/foo-ansible-1.1.yml rename to v1/tests/vault_test_data/foo-ansible-1.1.yml diff --git a/v2/README-tests.md b/v2/README-tests.md deleted file mode 100644 index 956160b653a0ee..00000000000000 --- a/v2/README-tests.md +++ /dev/null @@ -1,33 +0,0 @@ -Ansible Test System -=================== - -Folders -======= - -test ----- - -Unit tests that test small pieces of code not suited for the integration test -layer, usually very API based, and should leverage mock interfaces rather than -producing side effects. - -Playbook engine code is better suited for integration tests. - -Requirements: sudo pip install paramiko PyYAML jinja2 httplib2 passlib unittest2 mock - -integration ------------ - -Integration test layer, constructed using playbooks. - -Some tests may require cloud credentials, others will not, and destructive -tests are separated from non-destructive so a subset can be run on development -machines. - -learn more ----------- - -hop into a subdirectory and see the associated README.md for more info. - - - diff --git a/v2/ansible/__init__.py b/v2/ansible/__init__.py deleted file mode 100644 index 8637adb54d6c16..00000000000000 --- a/v2/ansible/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -__version__ = '2.0' diff --git a/v2/ansible/inventory/host.py b/v2/ansible/inventory/host.py deleted file mode 100644 index 29d6afd991208a..00000000000000 --- a/v2/ansible/inventory/host.py +++ /dev/null @@ -1,130 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible import constants as C -from ansible.inventory.group import Group -from ansible.utils.vars import combine_vars - -__all__ = ['Host'] - -class Host: - ''' a single ansible host ''' - - #__slots__ = [ 'name', 'vars', 'groups' ] - - def __getstate__(self): - return self.serialize() - - def __setstate__(self, data): - return self.deserialize(data) - - def __eq__(self, other): - return self.name == other.name - - def serialize(self): - groups = [] - for group in self.groups: - groups.append(group.serialize()) - - return dict( - name=self.name, - vars=self.vars.copy(), - ipv4_address=self.ipv4_address, - ipv6_address=self.ipv6_address, - port=self.port, - gathered_facts=self._gathered_facts, - groups=groups, - ) - - def deserialize(self, data): - self.__init__() - - self.name = data.get('name') - self.vars = data.get('vars', dict()) - self.ipv4_address = data.get('ipv4_address', '') - self.ipv6_address = data.get('ipv6_address', '') - self.port = data.get('port') - - groups = data.get('groups', []) - for group_data in groups: - g = Group() - g.deserialize(group_data) - self.groups.append(g) - - def __init__(self, name=None, port=None): - - self.name = name - self.vars = {} - self.groups = [] - - self.ipv4_address = name - self.ipv6_address = name - - if port and port != C.DEFAULT_REMOTE_PORT: - self.port = int(port) - else: - self.port = C.DEFAULT_REMOTE_PORT - - self._gathered_facts = False - - def __repr__(self): - return self.get_name() - - def get_name(self): - return self.name - - @property - def gathered_facts(self): - return self._gathered_facts - - def set_gathered_facts(self, gathered): - self._gathered_facts = gathered - - def add_group(self, group): - - self.groups.append(group) - - def set_variable(self, key, value): - - self.vars[key]=value - - def get_groups(self): - - groups = {} - for g in self.groups: - groups[g.name] = g - ancestors = g.get_ancestors() - for a in ancestors: - groups[a.name] = a - return groups.values() - - def get_vars(self): - - results = {} - groups = self.get_groups() - for group in sorted(groups, key=lambda g: g.depth): - results = combine_vars(results, group.get_vars()) - results = combine_vars(results, self.vars) - results['inventory_hostname'] = self.name - results['inventory_hostname_short'] = self.name.split('.')[0] - results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) - return results - diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core deleted file mode 160000 index 0341ddd35ed5ff..00000000000000 --- a/v2/ansible/modules/core +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0341ddd35ed5ff477ad5de2488d947255ce86259 diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras deleted file mode 160000 index dd80fa221ce0ad..00000000000000 --- a/v2/ansible/modules/extras +++ /dev/null @@ -1 +0,0 @@ -Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc diff --git a/v2/ansible/playbook/__init__.py b/v2/ansible/playbook/__init__.py deleted file mode 100644 index 40e6638f23921e..00000000000000 --- a/v2/ansible/playbook/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os - -from ansible.errors import AnsibleError, AnsibleParserError -from ansible.parsing import DataLoader -from ansible.playbook.attribute import Attribute, FieldAttribute -from ansible.playbook.play import Play -from ansible.playbook.playbook_include import PlaybookInclude -from ansible.plugins import push_basedir - - -__all__ = ['Playbook'] - - -class Playbook: - - def __init__(self, loader): - # Entries in the datastructure of a playbook may - # be either a play or an include statement - self._entries = [] - self._basedir = os.getcwd() - self._loader = loader - - @staticmethod - def load(file_name, variable_manager=None, loader=None): - pb = Playbook(loader=loader) - pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) - return pb - - def _load_playbook_data(self, file_name, variable_manager): - - if os.path.isabs(file_name): - self._basedir = os.path.dirname(file_name) - else: - self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name))) - - # set the loaders basedir - self._loader.set_basedir(self._basedir) - - # also add the basedir to the list of module directories - push_basedir(self._basedir) - - ds = self._loader.load_from_file(os.path.basename(file_name)) - if not isinstance(ds, list): - raise AnsibleParserError("playbooks must be a list of plays", obj=ds) - - # Parse the playbook entries. For plays, we simply parse them - # using the Play() object, and includes are parsed using the - # PlaybookInclude() object - for entry in ds: - if not isinstance(entry, dict): - raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry) - - if 'include' in entry: - pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader) - self._entries.extend(pb._entries) - else: - entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader) - self._entries.append(entry_obj) - - def get_loader(self): - return self._loader - - def get_plays(self): - return self._entries[:] diff --git a/v2/ansible/playbook/play.py b/v2/ansible/playbook/play.py deleted file mode 100644 index b99c01fdf74e63..00000000000000 --- a/v2/ansible/playbook/play.py +++ /dev/null @@ -1,263 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleError, AnsibleParserError - -from ansible.playbook.attribute import Attribute, FieldAttribute -from ansible.playbook.base import Base -from ansible.playbook.become import Become -from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles -from ansible.playbook.role import Role -from ansible.playbook.taggable import Taggable -from ansible.playbook.block import Block - -from ansible.utils.vars import combine_vars - - -__all__ = ['Play'] - - -class Play(Base, Taggable, Become): - - """ - A play is a language feature that represents a list of roles and/or - task/handler blocks to execute on a given set of hosts. - - Usage: - - Play.load(datastructure) -> Play - Play.something(...) - """ - - # ================================================================================= - # Connection-Related Attributes - - # TODO: generalize connection - _accelerate = FieldAttribute(isa='bool', default=False) - _accelerate_ipv6 = FieldAttribute(isa='bool', default=False) - _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port - - # Connection - _gather_facts = FieldAttribute(isa='string', default='smart') - _hosts = FieldAttribute(isa='list', default=[], required=True) - _name = FieldAttribute(isa='string', default='') - - # Variable Attributes - _vars_files = FieldAttribute(isa='list', default=[]) - _vars_prompt = FieldAttribute(isa='dict', default=dict()) - _vault_password = FieldAttribute(isa='string') - - # Block (Task) Lists Attributes - _handlers = FieldAttribute(isa='list', default=[]) - _pre_tasks = FieldAttribute(isa='list', default=[]) - _post_tasks = FieldAttribute(isa='list', default=[]) - _tasks = FieldAttribute(isa='list', default=[]) - - # Role Attributes - _roles = FieldAttribute(isa='list', default=[]) - - # Flag/Setting Attributes - _any_errors_fatal = FieldAttribute(isa='bool', default=False) - _max_fail_percentage = FieldAttribute(isa='string', default='0') - _serial = FieldAttribute(isa='int', default=0) - _strategy = FieldAttribute(isa='string', default='linear') - - # ================================================================================= - - def __init__(self): - super(Play, self).__init__() - - def __repr__(self): - return self.get_name() - - def get_name(self): - ''' return the name of the Play ''' - return "PLAY: %s" % self._attributes.get('name') - - @staticmethod - def load(data, variable_manager=None, loader=None): - p = Play() - return p.load_data(data, variable_manager=variable_manager, loader=loader) - - def preprocess_data(self, ds): - ''' - Adjusts play datastructure to cleanup old/legacy items - ''' - - assert isinstance(ds, dict) - - # The use of 'user' in the Play datastructure was deprecated to - # line up with the same change for Tasks, due to the fact that - # 'user' conflicted with the user module. - if 'user' in ds: - # this should never happen, but error out with a helpful message - # to the user if it does... - if 'remote_user' in ds: - raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds) - - ds['remote_user'] = ds['user'] - del ds['user'] - - return super(Play, self).preprocess_data(ds) - - def _load_vars(self, attr, ds): - ''' - Vars in a play can be specified either as a dictionary directly, or - as a list of dictionaries. If the later, this method will turn the - list into a single dictionary. - ''' - - try: - if isinstance(ds, dict): - return ds - elif isinstance(ds, list): - all_vars = dict() - for item in ds: - if not isinstance(item, dict): - raise ValueError - all_vars = combine_vars(all_vars, item) - return all_vars - else: - raise ValueError - except ValueError: - raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds) - - def _load_tasks(self, attr, ds): - ''' - Loads a list of blocks from a list which may be mixed tasks/blocks. - Bare tasks outside of a block are given an implicit block. - ''' - return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) - - def _load_pre_tasks(self, attr, ds): - ''' - Loads a list of blocks from a list which may be mixed tasks/blocks. - Bare tasks outside of a block are given an implicit block. - ''' - return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) - - def _load_post_tasks(self, attr, ds): - ''' - Loads a list of blocks from a list which may be mixed tasks/blocks. - Bare tasks outside of a block are given an implicit block. - ''' - return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader) - - def _load_handlers(self, attr, ds): - ''' - Loads a list of blocks from a list which may be mixed handlers/blocks. - Bare handlers outside of a block are given an implicit block. - ''' - return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader) - - def _load_roles(self, attr, ds): - ''' - Loads and returns a list of RoleInclude objects from the datastructure - list of role definitions and creates the Role from those objects - ''' - - role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader) - - roles = [] - for ri in role_includes: - roles.append(Role.load(ri)) - return roles - - # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set - - def _compile_roles(self): - ''' - Handles the role compilation step, returning a flat list of tasks - with the lowest level dependencies first. For example, if a role R - has a dependency D1, which also has a dependency D2, the tasks from - D2 are merged first, followed by D1, and lastly by the tasks from - the parent role R last. This is done for all roles in the Play. - ''' - - block_list = [] - - if len(self.roles) > 0: - for r in self.roles: - block_list.extend(r.compile(play=self)) - - return block_list - - def compile(self): - ''' - Compiles and returns the task list for this play, compiled from the - roles (which are themselves compiled recursively) and/or the list of - tasks specified in the play. - ''' - - block_list = [] - - block_list.extend(self.pre_tasks) - block_list.extend(self._compile_roles()) - block_list.extend(self.tasks) - block_list.extend(self.post_tasks) - - return block_list - - def get_vars(self): - return self.vars.copy() - - def get_vars_files(self): - return self.vars_files - - def get_handlers(self): - return self.handlers[:] - - def get_roles(self): - return self.roles[:] - - def get_tasks(self): - tasklist = [] - for task in self.pre_tasks + self.tasks + self.post_tasks: - if isinstance(task, Block): - tasklist.append(task.block + task.rescue + task.always) - else: - tasklist.append(task) - return tasklist - - def serialize(self): - data = super(Play, self).serialize() - - roles = [] - for role in self.get_roles(): - roles.append(role.serialize()) - data['roles'] = roles - - return data - - def deserialize(self, data): - super(Play, self).deserialize(data) - - if 'roles' in data: - role_data = data.get('roles', []) - roles = [] - for role in role_data: - r = Role() - r.deserialize(role) - roles.append(r) - - setattr(self, 'roles', roles) - del data['roles'] - diff --git a/v2/ansible/playbook/task.py b/v2/ansible/playbook/task.py deleted file mode 100644 index 060602579851d3..00000000000000 --- a/v2/ansible/playbook/task.py +++ /dev/null @@ -1,310 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleError - -from ansible.parsing.mod_args import ModuleArgsParser -from ansible.parsing.splitter import parse_kv -from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping - -from ansible.plugins import module_loader, lookup_loader - -from ansible.playbook.attribute import Attribute, FieldAttribute -from ansible.playbook.base import Base -from ansible.playbook.become import Become -from ansible.playbook.block import Block -from ansible.playbook.conditional import Conditional -from ansible.playbook.role import Role -from ansible.playbook.taggable import Taggable - -__all__ = ['Task'] - -class Task(Base, Conditional, Taggable, Become): - - """ - A task is a language feature that represents a call to a module, with given arguments and other parameters. - A handler is a subclass of a task. - - Usage: - - Task.load(datastructure) -> Task - Task.something(...) - """ - - # ================================================================================= - # ATTRIBUTES - # load_ and - # validate_ - # will be used if defined - # might be possible to define others - - _args = FieldAttribute(isa='dict', default=dict()) - _action = FieldAttribute(isa='string') - - _always_run = FieldAttribute(isa='bool') - _any_errors_fatal = FieldAttribute(isa='bool') - _async = FieldAttribute(isa='int', default=0) - _changed_when = FieldAttribute(isa='string') - _delay = FieldAttribute(isa='int', default=5) - _delegate_to = FieldAttribute(isa='string') - _failed_when = FieldAttribute(isa='string') - _first_available_file = FieldAttribute(isa='list') - _ignore_errors = FieldAttribute(isa='bool') - - _loop = FieldAttribute(isa='string', private=True) - _loop_args = FieldAttribute(isa='list', private=True) - _local_action = FieldAttribute(isa='string') - - # FIXME: this should not be a Task - _meta = FieldAttribute(isa='string') - - _name = FieldAttribute(isa='string', default='') - - _notify = FieldAttribute(isa='list') - _poll = FieldAttribute(isa='int') - _register = FieldAttribute(isa='string') - _retries = FieldAttribute(isa='int', default=1) - _run_once = FieldAttribute(isa='bool') - _until = FieldAttribute(isa='list') # ? - - def __init__(self, block=None, role=None, task_include=None): - ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' - - self._block = block - self._role = role - self._task_include = task_include - - super(Task, self).__init__() - - def get_name(self): - ''' return the name of the task ''' - - if self._role and self.name: - return "%s : %s" % (self._role.get_name(), self.name) - elif self.name: - return self.name - else: - flattened_args = self._merge_kv(self.args) - if self._role: - return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args) - else: - return "%s %s" % (self.action, flattened_args) - - def _merge_kv(self, ds): - if ds is None: - return "" - elif isinstance(ds, basestring): - return ds - elif isinstance(ds, dict): - buf = "" - for (k,v) in ds.iteritems(): - if k.startswith('_'): - continue - buf = buf + "%s=%s " % (k,v) - buf = buf.strip() - return buf - - @staticmethod - def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None): - t = Task(block=block, role=role, task_include=task_include) - return t.load_data(data, variable_manager=variable_manager, loader=loader) - - def __repr__(self): - ''' returns a human readable representation of the task ''' - return "TASK: %s" % self.get_name() - - def _preprocess_loop(self, ds, new_ds, k, v): - ''' take a lookup plugin name and store it correctly ''' - - loop_name = k.replace("with_", "") - if new_ds.get('loop') is not None: - raise AnsibleError("duplicate loop in task: %s" % loop_name) - new_ds['loop'] = loop_name - new_ds['loop_args'] = v - - def preprocess_data(self, ds): - ''' - tasks are especially complex arguments so need pre-processing. - keep it short. - ''' - - assert isinstance(ds, dict) - - # the new, cleaned datastructure, which will have legacy - # items reduced to a standard structure suitable for the - # attributes of the task class - new_ds = AnsibleMapping() - if isinstance(ds, AnsibleBaseYAMLObject): - new_ds.ansible_pos = ds.ansible_pos - - # use the args parsing class to determine the action, args, - # and the delegate_to value from the various possible forms - # supported as legacy - args_parser = ModuleArgsParser(task_ds=ds) - (action, args, delegate_to) = args_parser.parse() - - new_ds['action'] = action - new_ds['args'] = args - new_ds['delegate_to'] = delegate_to - - for (k,v) in ds.iteritems(): - if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell': - # we don't want to re-assign these values, which were - # determined by the ModuleArgsParser() above - continue - elif k.replace("with_", "") in lookup_loader: - self._preprocess_loop(ds, new_ds, k, v) - else: - new_ds[k] = v - - return super(Task, self).preprocess_data(new_ds) - - def post_validate(self, templar): - ''' - Override of base class post_validate, to also do final validation on - the block and task include (if any) to which this task belongs. - ''' - - if self._block: - self._block.post_validate(templar) - if self._task_include: - self._task_include.post_validate(templar) - - super(Task, self).post_validate(templar) - - def get_vars(self): - all_vars = self.vars.copy() - if self._block: - all_vars.update(self._block.get_vars()) - if self._task_include: - all_vars.update(self._task_include.get_vars()) - - all_vars.update(self.serialize()) - - if 'tags' in all_vars: - del all_vars['tags'] - if 'when' in all_vars: - del all_vars['when'] - return all_vars - - def copy(self, exclude_block=False): - new_me = super(Task, self).copy() - - new_me._block = None - if self._block and not exclude_block: - new_me._block = self._block.copy() - - new_me._role = None - if self._role: - new_me._role = self._role - - new_me._task_include = None - if self._task_include: - new_me._task_include = self._task_include.copy() - - return new_me - - def serialize(self): - data = super(Task, self).serialize() - - if self._block: - data['block'] = self._block.serialize() - - if self._role: - data['role'] = self._role.serialize() - - if self._task_include: - data['task_include'] = self._task_include.serialize() - - return data - - def deserialize(self, data): - - # import is here to avoid import loops - #from ansible.playbook.task_include import TaskInclude - - block_data = data.get('block') - - if block_data: - b = Block() - b.deserialize(block_data) - self._block = b - del data['block'] - - role_data = data.get('role') - if role_data: - r = Role() - r.deserialize(role_data) - self._role = r - del data['role'] - - ti_data = data.get('task_include') - if ti_data: - #ti = TaskInclude() - ti = Task() - ti.deserialize(ti_data) - self._task_include = ti - del data['task_include'] - - super(Task, self).deserialize(data) - - def evaluate_conditional(self, all_vars): - if self._block is not None: - if not self._block.evaluate_conditional(all_vars): - return False - if self._task_include is not None: - if not self._task_include.evaluate_conditional(all_vars): - return False - return super(Task, self).evaluate_conditional(all_vars) - - def set_loader(self, loader): - ''' - Sets the loader on this object and recursively on parent, child objects. - This is used primarily after the Task has been serialized/deserialized, which - does not preserve the loader. - ''' - - self._loader = loader - - if self._block: - self._block.set_loader(loader) - if self._task_include: - self._task_include.set_loader(loader) - - def _get_parent_attribute(self, attr, extend=False): - ''' - Generic logic to get the attribute or parent attribute for a task value. - ''' - value = self._attributes[attr] - if self._block and (not value or extend): - parent_value = getattr(self._block, attr) - if extend: - value = self._extend_value(value, parent_value) - else: - value = parent_value - if self._task_include and (not value or extend): - parent_value = getattr(self._task_include, attr) - if extend: - value = self._extend_value(value, parent_value) - else: - value = parent_value - return value - diff --git a/v2/ansible/utils/vault.py b/v2/ansible/utils/vault.py deleted file mode 100644 index 5c704afac59b2b..00000000000000 --- a/v2/ansible/utils/vault.py +++ /dev/null @@ -1,56 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import subprocess - -from ansible import constants as C -from ansible.errors import AnsibleError -from ansible.utils.path import is_executable - -def read_vault_file(vault_password_file): - """ - Read a vault password from a file or if executable, execute the script and - retrieve password from STDOUT - """ - - this_path = os.path.realpath(os.path.expanduser(vault_password_file)) - if not os.path.exists(this_path): - raise AnsibleError("The vault password file %s was not found" % this_path) - - if is_executable(this_path): - try: - # STDERR not captured to make it easier for users to prompt for input in their scripts - p = subprocess.Popen(this_path, stdout=subprocess.PIPE) - except OSError as e: - raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) - stdout, stderr = p.communicate() - vault_pass = stdout.strip('\r\n') - else: - try: - f = open(this_path, "rb") - vault_pass=f.read().strip() - f.close() - except (OSError, IOError) as e: - raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) - - return vault_pass - diff --git a/v2/bin/ansible b/v2/bin/ansible deleted file mode 100755 index 467dd505a2e17a..00000000000000 --- a/v2/bin/ansible +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -######################################################## -from __future__ import (absolute_import) -__metaclass__ = type - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - -import os -import sys - -from ansible.errors import AnsibleError, AnsibleOptionsError -from ansible.utils.display import Display - -######################################################## - -if __name__ == '__main__': - - cli = None - display = Display() - me = os.path.basename(__file__) - - try: - if me == 'ansible-playbook': - from ansible.cli.playbook import PlaybookCLI as mycli - elif me == 'ansible': - from ansible.cli.adhoc import AdHocCLI as mycli - elif me == 'ansible-pull': - from ansible.cli.pull import PullCLI as mycli - elif me == 'ansible-doc': - from ansible.cli.doc import DocCLI as mycli - elif me == 'ansible-vault': - from ansible.cli.vault import VaultCLI as mycli - elif me == 'ansible-galaxy': - from ansible.cli.galaxy import GalaxyCLI as mycli - - cli = mycli(sys.argv, display=display) - if cli: - cli.parse() - sys.exit(cli.run()) - else: - raise AnsibleError("Program not implemented: %s" % me) - - except AnsibleOptionsError as e: - cli.parser.print_help() - display.display(str(e), stderr=True, color='red') - sys.exit(1) - except AnsibleError as e: - display.display(str(e), stderr=True, color='red') - sys.exit(2) - except KeyboardInterrupt: - display.error("interrupted") - sys.exit(4) diff --git a/v2/bin/ansible-doc b/v2/bin/ansible-doc deleted file mode 120000 index cabb1f519aad06..00000000000000 --- a/v2/bin/ansible-doc +++ /dev/null @@ -1 +0,0 @@ -ansible \ No newline at end of file diff --git a/v2/bin/ansible-galaxy b/v2/bin/ansible-galaxy deleted file mode 120000 index cabb1f519aad06..00000000000000 --- a/v2/bin/ansible-galaxy +++ /dev/null @@ -1 +0,0 @@ -ansible \ No newline at end of file diff --git a/v2/bin/ansible-playbook b/v2/bin/ansible-playbook deleted file mode 120000 index cabb1f519aad06..00000000000000 --- a/v2/bin/ansible-playbook +++ /dev/null @@ -1 +0,0 @@ -ansible \ No newline at end of file diff --git a/v2/bin/ansible-pull b/v2/bin/ansible-pull deleted file mode 120000 index cabb1f519aad06..00000000000000 --- a/v2/bin/ansible-pull +++ /dev/null @@ -1 +0,0 @@ -ansible \ No newline at end of file diff --git a/v2/bin/ansible-vault b/v2/bin/ansible-vault deleted file mode 120000 index cabb1f519aad06..00000000000000 --- a/v2/bin/ansible-vault +++ /dev/null @@ -1 +0,0 @@ -ansible \ No newline at end of file diff --git a/v2/hacking/README.md b/v2/hacking/README.md deleted file mode 100644 index 6d65464eee83bc..00000000000000 --- a/v2/hacking/README.md +++ /dev/null @@ -1,48 +0,0 @@ -'Hacking' directory tools -========================= - -Env-setup ---------- - -The 'env-setup' script modifies your environment to allow you to run -ansible from a git checkout using python 2.6+. (You may not use -python 3 at this time). - -First, set up your environment to run from the checkout: - - $ source ./hacking/env-setup - -You will need some basic prerequisites installed. If you do not already have them -and do not wish to install them from your operating system package manager, you -can install them from pip - - $ easy_install pip # if pip is not already available - $ pip install pyyaml jinja2 nose passlib pycrypto - -From there, follow ansible instructions on docs.ansible.com as normal. - -Test-module ------------ - -'test-module' is a simple program that allows module developers (or testers) to run -a module outside of the ansible program, locally, on the current machine. - -Example: - - $ ./hacking/test-module -m library/commands/shell -a "echo hi" - -This is a good way to insert a breakpoint into a module, for instance. - -Module-formatter ----------------- - -The module formatter is a script used to generate manpages and online -module documentation. This is used by the system makefiles and rarely -needs to be run directly. - -Authors -------- -'authors' is a simple script that generates a list of everyone who has -contributed code to the ansible repository. - - diff --git a/v2/hacking/authors.sh b/v2/hacking/authors.sh deleted file mode 100755 index 7c97840b2fbc83..00000000000000 --- a/v2/hacking/authors.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# script from http://stackoverflow.com/questions/12133583 -set -e - -# Get a list of authors ordered by number of commits -# and remove the commit count column -AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- | sort -f) -if [ -z "$AUTHORS" ] ; then - echo "Authors list was empty" - exit 1 -fi - -# Display the authors list and write it to the file -echo "$AUTHORS" | tee "$(git rev-parse --show-toplevel)/AUTHORS.TXT" diff --git a/v2/hacking/env-setup b/v2/hacking/env-setup deleted file mode 100644 index 8f2c331fe46927..00000000000000 --- a/v2/hacking/env-setup +++ /dev/null @@ -1,78 +0,0 @@ -# usage: source hacking/env-setup [-q] -# modifies environment for running Ansible from checkout - -# Default values for shell variables we use -PYTHONPATH=${PYTHONPATH-""} -PATH=${PATH-""} -MANPATH=${MANPATH-""} -verbosity=${1-info} # Defaults to `info' if unspecified - -if [ "$verbosity" = -q ]; then - verbosity=silent -fi - -# When run using source as directed, $0 gets set to bash, so we must use $BASH_SOURCE -if [ -n "$BASH_SOURCE" ] ; then - HACKING_DIR=$(dirname "$BASH_SOURCE") -elif [ $(basename -- "$0") = "env-setup" ]; then - HACKING_DIR=$(dirname "$0") -# Works with ksh93 but not pdksh -elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then - HACKING_DIR=$(dirname "${.sh.file}") -else - HACKING_DIR="$PWD/hacking" -fi -# The below is an alternative to readlink -fn which doesn't exist on OS X -# Source: http://stackoverflow.com/a/1678636 -FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))") -ANSIBLE_HOME=$(dirname "$FULL_PATH") - -PREFIX_PYTHONPATH="$ANSIBLE_HOME" -PREFIX_PATH="$ANSIBLE_HOME/bin" -PREFIX_MANPATH="$ANSIBLE_HOME/docs/man" - -expr "$PYTHONPATH" : "${PREFIX_PYTHONPATH}.*" > /dev/null || export PYTHONPATH="$PREFIX_PYTHONPATH:$PYTHONPATH" -expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || export PATH="$PREFIX_PATH:$PATH" -expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_MANPATH:$MANPATH" - -# -# Generate egg_info so that pkg_resources works -# - -# Do the work in a function so we don't repeat ourselves later -gen_egg_info() -{ - if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then - rm -r "$PREFIX_PYTHONPATH/ansible.egg-info" - fi - python setup.py egg_info -} - -if [ "$ANSIBLE_HOME" != "$PWD" ] ; then - current_dir="$PWD" -else - current_dir="$ANSIBLE_HOME" -fi -cd "$ANSIBLE_HOME" -#if [ "$verbosity" = silent ] ; then -# gen_egg_info > /dev/null 2>&1 -#else -# gen_egg_info -#fi -cd "$current_dir" - -if [ "$verbosity" != silent ] ; then - cat <<- EOF - - Setting up Ansible to run out of checkout... - - PATH=$PATH - PYTHONPATH=$PYTHONPATH - MANPATH=$MANPATH - - Remember, you may wish to specify your host file with -i - - Done! - - EOF -fi diff --git a/v2/hacking/env-setup.fish b/v2/hacking/env-setup.fish deleted file mode 100644 index 05fb60672d1c00..00000000000000 --- a/v2/hacking/env-setup.fish +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env fish -# usage: . ./hacking/env-setup [-q] -# modifies environment for running Ansible from checkout -set HACKING_DIR (dirname (status -f)) -set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))") -set ANSIBLE_HOME (dirname $FULL_PATH) -set PREFIX_PYTHONPATH $ANSIBLE_HOME/lib -set PREFIX_PATH $ANSIBLE_HOME/bin -set PREFIX_MANPATH $ANSIBLE_HOME/docs/man - -# Set PYTHONPATH -if not set -q PYTHONPATH - set -gx PYTHONPATH $PREFIX_PYTHONPATH -else - switch PYTHONPATH - case "$PREFIX_PYTHONPATH*" - case "*" - echo "Appending PYTHONPATH" - set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH" - end -end - -# Set PATH -if not contains $PREFIX_PATH $PATH - set -gx PATH $PREFIX_PATH $PATH -end - -# Set MANPATH -if not contains $PREFIX_MANPATH $MANPATH - if not set -q MANPATH - set -gx MANPATH $PREFIX_MANPATH - else - set -gx MANPATH $PREFIX_MANPATH $MANPATH - end -end - -set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library - -if set -q argv - switch $argv - case '-q' '--quiet' - case '*' - echo "" - echo "Setting up Ansible to run out of checkout..." - echo "" - echo "PATH=$PATH" - echo "PYTHONPATH=$PYTHONPATH" - echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY" - echo "MANPATH=$MANPATH" - echo "" - - echo "Remember, you may wish to specify your host file with -i" - echo "" - echo "Done!" - echo "" - end -end diff --git a/v2/hacking/get_library.py b/v2/hacking/get_library.py deleted file mode 100755 index 571183b688c490..00000000000000 --- a/v2/hacking/get_library.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, Will Thames -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import ansible.constants as C -import sys - -def main(): - print C.DEFAULT_MODULE_PATH - return 0 - -if __name__ == '__main__': - sys.exit(main()) diff --git a/v2/hacking/module_formatter.py b/v2/hacking/module_formatter.py deleted file mode 100755 index e70eb982de041f..00000000000000 --- a/v2/hacking/module_formatter.py +++ /dev/null @@ -1,442 +0,0 @@ -#!/usr/bin/env python -# (c) 2012, Jan-Piet Mens -# (c) 2012-2014, Michael DeHaan and others -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import os -import glob -import sys -import yaml -import codecs -import json -import ast -import re -import optparse -import time -import datetime -import subprocess -import cgi -from jinja2 import Environment, FileSystemLoader - -import ansible.utils -import ansible.utils.module_docs as module_docs - -##################################################################################### -# constants and paths - -# if a module is added in a version of Ansible older than this, don't print the version added information -# in the module documentation because everyone is assumed to be running something newer than this already. -TO_OLD_TO_BE_NOTABLE = 1.0 - -# Get parent directory of the directory this script lives in -MODULEDIR=os.path.abspath(os.path.join( - os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules' -)) - -# The name of the DOCUMENTATION template -EXAMPLE_YAML=os.path.abspath(os.path.join( - os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml' -)) - -_ITALIC = re.compile(r"I\(([^)]+)\)") -_BOLD = re.compile(r"B\(([^)]+)\)") -_MODULE = re.compile(r"M\(([^)]+)\)") -_URL = re.compile(r"U\(([^)]+)\)") -_CONST = re.compile(r"C\(([^)]+)\)") - -DEPRECATED = " (D)" -NOTCORE = " (E)" -##################################################################################### - -def rst_ify(text): - ''' convert symbols like I(this is in italics) to valid restructured text ''' - - t = _ITALIC.sub(r'*' + r"\1" + r"*", text) - t = _BOLD.sub(r'**' + r"\1" + r"**", t) - t = _MODULE.sub(r'``' + r"\1" + r"``", t) - t = _URL.sub(r"\1", t) - t = _CONST.sub(r'``' + r"\1" + r"``", t) - - return t - -##################################################################################### - -def html_ify(text): - ''' convert symbols like I(this is in italics) to valid HTML ''' - - t = cgi.escape(text) - t = _ITALIC.sub("" + r"\1" + "", t) - t = _BOLD.sub("" + r"\1" + "", t) - t = _MODULE.sub("" + r"\1" + "", t) - t = _URL.sub("" + r"\1" + "", t) - t = _CONST.sub("" + r"\1" + "", t) - - return t - - -##################################################################################### - -def rst_fmt(text, fmt): - ''' helper for Jinja2 to do format strings ''' - - return fmt % (text) - -##################################################################################### - -def rst_xline(width, char="="): - ''' return a restructured text line of a given length ''' - - return char * width - -##################################################################################### - -def write_data(text, options, outputname, module): - ''' dumps module output to a file or the screen, as requested ''' - - if options.output_dir is not None: - fname = os.path.join(options.output_dir, outputname % module) - fname = fname.replace(".py","") - f = open(fname, 'w') - f.write(text.encode('utf-8')) - f.close() - else: - print text - -##################################################################################### - - -def list_modules(module_dir, depth=0): - ''' returns a hash of categories, each category being a hash of module names to file paths ''' - - categories = dict(all=dict(),_aliases=dict()) - if depth <= 3: # limit # of subdirs - - files = glob.glob("%s/*" % module_dir) - for d in files: - - category = os.path.splitext(os.path.basename(d))[0] - if os.path.isdir(d): - - res = list_modules(d, depth + 1) - for key in res.keys(): - if key in categories: - categories[key] = ansible.utils.merge_hash(categories[key], res[key]) - res.pop(key, None) - - if depth < 2: - categories.update(res) - else: - category = module_dir.split("/")[-1] - if not category in categories: - categories[category] = res - else: - categories[category].update(res) - else: - module = category - category = os.path.basename(module_dir) - if not d.endswith(".py") or d.endswith('__init__.py'): - # windows powershell modules have documentation stubs in python docstring - # format (they are not executed) so skip the ps1 format files - continue - elif module.startswith("_") and os.path.islink(d): - source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0] - module = module.replace("_","",1) - if not d in categories['_aliases']: - categories['_aliases'][source] = [module] - else: - categories['_aliases'][source].update(module) - continue - - if not category in categories: - categories[category] = {} - categories[category][module] = d - categories['all'][module] = d - - return categories - -##################################################################################### - -def generate_parser(): - ''' generate an optparse parser ''' - - p = optparse.OptionParser( - version='%prog 1.0', - usage='usage: %prog [options] arg1 arg2', - description='Generate module documentation from metadata', - ) - - p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number") - p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path") - p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates") - p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type") - p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose") - p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files") - p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules") - p.add_option('-V', action='version', help='Show version number and exit') - return p - -##################################################################################### - -def jinja2_environment(template_dir, typ): - - env = Environment(loader=FileSystemLoader(template_dir), - variable_start_string="@{", - variable_end_string="}@", - trim_blocks=True, - ) - env.globals['xline'] = rst_xline - - if typ == 'rst': - env.filters['convert_symbols_to_format'] = rst_ify - env.filters['html_ify'] = html_ify - env.filters['fmt'] = rst_fmt - env.filters['xline'] = rst_xline - template = env.get_template('rst.j2') - outputname = "%s_module.rst" - else: - raise Exception("unknown module format type: %s" % typ) - - return env, template, outputname - -##################################################################################### - -def process_module(module, options, env, template, outputname, module_map, aliases): - - fname = module_map[module] - if isinstance(fname, dict): - return "SKIPPED" - - basename = os.path.basename(fname) - deprecated = False - - # ignore files with extensions - if not basename.endswith(".py"): - return - elif module.startswith("_"): - if os.path.islink(fname): - return # ignore, its an alias - deprecated = True - module = module.replace("_","",1) - - print "rendering: %s" % module - - # use ansible core library to parse out doc metadata YAML and plaintext examples - doc, examples, returndocs = ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose) - - # crash if module is missing documentation and not explicitly hidden from docs index - if doc is None: - if module in ansible.utils.module_docs.BLACKLIST_MODULES: - return "SKIPPED" - else: - sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) - sys.exit(1) - - if deprecated and 'deprecated' not in doc: - sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module)) - sys.exit(1) - - if "/core/" in fname: - doc['core'] = True - else: - doc['core'] = False - - if module in aliases: - doc['aliases'] = aliases[module] - - all_keys = [] - - if not 'version_added' in doc: - sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module) - sys.exit(1) - - added = 0 - if doc['version_added'] == 'historical': - del doc['version_added'] - else: - added = doc['version_added'] - - # don't show version added information if it's too old to be called out - if added: - added_tokens = str(added).split(".") - added = added_tokens[0] + "." + added_tokens[1] - added_float = float(added) - if added and added_float < TO_OLD_TO_BE_NOTABLE: - del doc['version_added'] - - for (k,v) in doc['options'].iteritems(): - all_keys.append(k) - - all_keys = sorted(all_keys) - - doc['option_keys'] = all_keys - doc['filename'] = fname - doc['docuri'] = doc['module'].replace('_', '-') - doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') - doc['ansible_version'] = options.ansible_version - doc['plainexamples'] = examples #plain text - - # here is where we build the table of contents... - - text = template.render(doc) - write_data(text, options, outputname, module) - return doc['short_description'] - -##################################################################################### - -def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases): - modstring = module - modname = module - if module in deprecated: - modstring = modstring + DEPRECATED - modname = "_" + module - elif module not in core: - modstring = modstring + NOTCORE - - result = process_module(modname, options, env, template, outputname, module_map, aliases) - - if result != "SKIPPED": - category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) - -def process_category(category, categories, options, env, template, outputname): - - module_map = categories[category] - - aliases = {} - if '_aliases' in categories: - aliases = categories['_aliases'] - - category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category) - category_file = open(category_file_path, "w") - print "*** recording category %s in %s ***" % (category, category_file_path) - - # TODO: start a new category file - - category = category.replace("_"," ") - category = category.title() - - modules = [] - deprecated = [] - core = [] - for module in module_map.keys(): - - if isinstance(module_map[module], dict): - for mod in module_map[module].keys(): - if mod.startswith("_"): - mod = mod.replace("_","",1) - deprecated.append(mod) - elif '/core/' in module_map[module][mod]: - core.append(mod) - else: - if module.startswith("_"): - module = module.replace("_","",1) - deprecated.append(module) - elif '/core/' in module_map[module]: - core.append(module) - - modules.append(module) - - modules.sort() - - category_header = "%s Modules" % (category.title()) - underscores = "`" * len(category_header) - - category_file.write("""\ -%s -%s - -.. toctree:: :maxdepth: 1 - -""" % (category_header, underscores)) - sections = [] - for module in modules: - if module in module_map and isinstance(module_map[module], dict): - sections.append(module) - continue - else: - print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases) - - sections.sort() - for section in sections: - category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section))) - category_file.write(".. toctree:: :maxdepth: 1\n\n") - - section_modules = module_map[section].keys() - section_modules.sort() - #for module in module_map[section]: - for module in section_modules: - print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases) - - category_file.write("""\n\n -.. note:: - - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. - - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less activity maintained than 'core' modules. - - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_ -""" % (DEPRECATED, NOTCORE)) - category_file.close() - - # TODO: end a new category file - -##################################################################################### - -def validate_options(options): - ''' validate option parser options ''' - - if not options.module_dir: - print >>sys.stderr, "--module-dir is required" - sys.exit(1) - if not os.path.exists(options.module_dir): - print >>sys.stderr, "--module-dir does not exist: %s" % options.module_dir - sys.exit(1) - if not options.template_dir: - print "--template-dir must be specified" - sys.exit(1) - -##################################################################################### - -def main(): - - p = generate_parser() - - (options, args) = p.parse_args() - validate_options(options) - - env, template, outputname = jinja2_environment(options.template_dir, options.type) - - categories = list_modules(options.module_dir) - last_category = None - category_names = categories.keys() - category_names.sort() - - category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") - category_list_file = open(category_list_path, "w") - category_list_file.write("Module Index\n") - category_list_file.write("============\n") - category_list_file.write("\n\n") - category_list_file.write(".. toctree::\n") - category_list_file.write(" :maxdepth: 1\n\n") - - for category in category_names: - if category.startswith("_"): - continue - category_list_file.write(" list_of_%s_modules\n" % category) - process_category(category, categories, options, env, template, outputname) - - category_list_file.close() - -if __name__ == '__main__': - main() diff --git a/v2/hacking/templates/rst.j2 b/v2/hacking/templates/rst.j2 deleted file mode 100644 index 59b8f35474c2ba..00000000000000 --- a/v2/hacking/templates/rst.j2 +++ /dev/null @@ -1,153 +0,0 @@ -.. _@{ module }@: - -{% if short_description %} -{% set title = module + ' - ' + short_description|convert_symbols_to_format %} -{% else %} -{% set title = module %} -{% endif %} -{% set title_len = title|length %} - -@{ title }@ -@{ '+' * title_len }@ - -.. contents:: - :local: - :depth: 1 - -{# ------------------------------------------ - # - # Please note: this looks like a core dump - # but it isn't one. - # - --------------------------------------------#} - -{% if aliases is defined -%} -Aliases: @{ ','.join(aliases) }@ -{% endif %} - -{% if deprecated is defined -%} -DEPRECATED ----------- - -@{ deprecated }@ -{% endif %} - -Synopsis --------- - -{% if version_added is defined -%} -.. versionadded:: @{ version_added }@ -{% endif %} - -{% for desc in description -%} -@{ desc | convert_symbols_to_format }@ -{% endfor %} - -{% if options -%} -Options -------- - -.. raw:: html - - - - - - - - - - {% for k in option_keys %} - {% set v = options[k] %} - - - - - {% if v.get('type', 'not_bool') == 'bool' %} - - {% else %} - - {% endif %} - - - {% endfor %} -
parameterrequireddefaultchoicescomments
@{ k }@{% if v.get('required', False) %}yes{% else %}no{% endif %}{% if v['default'] %}@{ v['default'] }@{% endif %}
  • yes
  • no
    {% for choice in v.get('choices',[]) -%}
  • @{ choice }@
  • {% endfor -%}
{% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%}{% if v['version_added'] %} (added in Ansible @{v['version_added']}@){% endif %}
-{% endif %} - -{% if requirements %} -{% for req in requirements %} - -.. note:: Requires @{ req | convert_symbols_to_format }@ - -{% endfor %} -{% endif %} - -{% if examples or plainexamples %} -Examples --------- - -.. raw:: html - -{% for example in examples %} - {% if example['description'] %}

@{ example['description'] | html_ify }@

{% endif %} -

-

-@{ example['code'] | escape | indent(4, True) }@
-    
-

-{% endfor %} -
- -{% if plainexamples %} - -:: - -@{ plainexamples | indent(4, True) }@ -{% endif %} -{% endif %} - -{% if notes %} -{% for note in notes %} -.. note:: @{ note | convert_symbols_to_format }@ -{% endfor %} -{% endif %} - - -{% if not deprecated %} - {% if core %} - -This is a Core Module ---------------------- - -This source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. - -If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. - -Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. - -Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. - -This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos. - - {% else %} - -This is an Extras Module ------------------------- - -This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo. - -If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. - -Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group ` or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. - -Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. - -Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests. -Popular "extras" modules may be promoted to core modules over time. - - {% endif %} -{% endif %} - -For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`. - - diff --git a/v2/hacking/test-module b/v2/hacking/test-module deleted file mode 100755 index b672e23e260e38..00000000000000 --- a/v2/hacking/test-module +++ /dev/null @@ -1,192 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -# this script is for testing modules without running through the -# entire guts of ansible, and is very helpful for when developing -# modules -# -# example: -# test-module -m ../library/commands/command -a "/bin/sleep 3" -# test-module -m ../library/system/service -a "name=httpd ensure=restarted" -# test-module -m ../library/system/service -a "name=httpd ensure=restarted" --debugger /usr/bin/pdb -# test-modulr -m ../library/file/lineinfile -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check - -import sys -import base64 -import os -import subprocess -import traceback -import optparse - -from ansible import utils -from ansible import module_common -import ansible.constants as C - -try: - import json -except ImportError: - import simplejson as json - -def parse(): - """parse command line - - :return : (options, args)""" - parser = optparse.OptionParser() - - parser.usage = "%prog -[options] (-h for help)" - - parser.add_option('-m', '--module-path', dest='module_path', - help="REQUIRED: full path of module source to execute") - parser.add_option('-a', '--args', dest='module_args', default="", - help="module argument string") - parser.add_option('-D', '--debugger', dest='debugger', - help="path to python debugger (e.g. /usr/bin/pdb)") - parser.add_option('-I', '--interpreter', dest='interpreter', - help="path to interpeter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)", - metavar='INTERPRETER_TYPE=INTERPRETER_PATH') - parser.add_option('-c', '--check', dest='check', action='store_true', - help="run the module in check mode") - options, args = parser.parse_args() - if not options.module_path: - parser.print_help() - sys.exit(1) - else: - return options, args - -def write_argsfile(argstring, json=False): - """ Write args to a file for old-style module's use. """ - argspath = os.path.expanduser("~/.ansible_test_module_arguments") - argsfile = open(argspath, 'w') - if json: - args = utils.parse_kv(argstring) - argstring = utils.jsonify(args) - argsfile.write(argstring) - argsfile.close() - return argspath - -def boilerplate_module(modfile, args, interpreter, check): - """ simulate what ansible does with new style modules """ - - #module_fh = open(modfile) - #module_data = module_fh.read() - #module_fh.close() - - #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1 - - complex_args = {} - if args.startswith("@"): - # Argument is a YAML file (JSON is a subset of YAML) - complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:])) - args='' - elif args.startswith("{"): - # Argument is a YAML document (not a file) - complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args)) - args='' - - inject = {} - if interpreter: - if '=' not in interpreter: - print 'interpeter must by in the form of ansible_python_interpreter=/usr/bin/python' - sys.exit(1) - interpreter_type, interpreter_path = interpreter.split('=') - if not interpreter_type.startswith('ansible_'): - interpreter_type = 'ansible_%s' % interpreter_type - if not interpreter_type.endswith('_interpreter'): - interpreter_type = '%s_interpreter' % interpreter_type - inject[interpreter_type] = interpreter_path - - if check: - complex_args['CHECKMODE'] = True - - (module_data, module_style, shebang) = module_common.modify_module( - modfile, - complex_args, - args, - inject - ) - - modfile2_path = os.path.expanduser("~/.ansible_module_generated") - print "* including generated source, if any, saving to: %s" % modfile2_path - print "* this may offset any line numbers in tracebacks/debuggers!" - modfile2 = open(modfile2_path, 'w') - modfile2.write(module_data) - modfile2.close() - modfile = modfile2_path - - return (modfile2_path, module_style) - -def runtest( modfile, argspath): - """Test run a module, piping it's output for reporting.""" - - os.system("chmod +x %s" % modfile) - - invoke = "%s" % (modfile) - if argspath is not None: - invoke = "%s %s" % (modfile, argspath) - - cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - - try: - print "***********************************" - print "RAW OUTPUT" - print out - print err - results = utils.parse_json(out) - except: - print "***********************************" - print "INVALID OUTPUT FORMAT" - print out - traceback.print_exc() - sys.exit(1) - - print "***********************************" - print "PARSED OUTPUT" - print utils.jsonify(results,format=True) - -def rundebug(debugger, modfile, argspath): - """Run interactively with console debugger.""" - - if argspath is not None: - subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True) - else: - subprocess.call("%s %s" % (debugger, modfile), shell=True) - -def main(): - - options, args = parse() - (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check) - - argspath=None - if module_style != 'new': - if module_style == 'non_native_want_json': - argspath = write_argsfile(options.module_args, json=True) - elif module_style == 'old': - argspath = write_argsfile(options.module_args, json=False) - else: - raise Exception("internal error, unexpected module style: %s" % module_style) - if options.debugger: - rundebug(options.debugger, modfile, argspath) - else: - runtest(modfile, argspath) - -if __name__ == "__main__": - main() - diff --git a/v2/scripts/ansible b/v2/scripts/ansible deleted file mode 100644 index ae8ccff5952585..00000000000000 --- a/v2/scripts/ansible +++ /dev/null @@ -1,20 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type diff --git a/v2/setup.py b/v2/setup.py deleted file mode 100644 index e982c382f29823..00000000000000 --- a/v2/setup.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python - -import sys - -from ansible import __version__ -try: - from setuptools import setup, find_packages -except ImportError: - print("Ansible now needs setuptools in order to build. Install it using" - " your package manager (usually python-setuptools) or via pip (pip" - " install setuptools).") - sys.exit(1) - -setup(name='ansible', - version=__version__, - description='Radically simple IT automation', - author='Michael DeHaan', - author_email='michael@ansible.com', - url='http://ansible.com/', - license='GPLv3', - install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6', 'six >= 1.4.0'], - # package_dir={ '': 'lib' }, - # packages=find_packages('lib'), - package_data={ - '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'], - }, - scripts=[ - 'bin/ansible', - 'bin/ansible-playbook', - # 'bin/ansible-pull', - # 'bin/ansible-doc', - # 'bin/ansible-galaxy', - # 'bin/ansible-vault', - ], - data_files=[], -) diff --git a/v2/test/mock/__init__.py b/v2/test/mock/__init__.py deleted file mode 100644 index ae8ccff5952585..00000000000000 --- a/v2/test/mock/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type From 249fd2a7e1b79139e814e66a0a47e3e497e3f243 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 3 May 2015 21:58:48 -0500 Subject: [PATCH 0503/3617] Re-adding submodules after moving things around --- .gitmodules | 12 ++++++++++++ lib/ansible/__init__.py | 8 ++------ lib/ansible/modules/core | 1 + lib/ansible/modules/extras | 1 + v1/ansible/modules/core | 1 + v1/ansible/modules/extras | 1 + 6 files changed, 18 insertions(+), 6 deletions(-) create mode 160000 lib/ansible/modules/core create mode 160000 lib/ansible/modules/extras create mode 160000 v1/ansible/modules/core create mode 160000 v1/ansible/modules/extras diff --git a/.gitmodules b/.gitmodules index e69de29bb2d1d6..793522a29c6bce 100644 --- a/.gitmodules +++ b/.gitmodules @@ -0,0 +1,12 @@ +[submodule "lib/ansible/modules/core"] + path = lib/ansible/modules/core + url = https://github.com/ansible/ansible-modules-core +[submodule "lib/ansible/modules/extras"] + path = lib/ansible/modules/extras + url = https://github.com/ansible/ansible-modules-extras +[submodule "v1/ansible/modules/core"] + path = v1/ansible/modules/core + url = https://github.com/ansible/ansible-modules-core +[submodule "v1/ansible/modules/extras"] + path = v1/ansible/modules/extras + url = https://github.com/ansible/ansible-modules-extras diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py index 8637adb54d6c16..704b6456f74202 100644 --- a/lib/ansible/__init__.py +++ b/lib/ansible/__init__.py @@ -14,9 +14,5 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -__version__ = '2.0' +__version__ = '2.0.0' +__author__ = 'Ansible, Inc.' diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core new file mode 160000 index 00000000000000..0341ddd35ed5ff --- /dev/null +++ b/lib/ansible/modules/core @@ -0,0 +1 @@ +Subproject commit 0341ddd35ed5ff477ad5de2488d947255ce86259 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras new file mode 160000 index 00000000000000..495ad450e53feb --- /dev/null +++ b/lib/ansible/modules/extras @@ -0,0 +1 @@ +Subproject commit 495ad450e53feb1cd26218dc68056cc34d1ea9ff diff --git a/v1/ansible/modules/core b/v1/ansible/modules/core new file mode 160000 index 00000000000000..9028e9d4be8a3d --- /dev/null +++ b/v1/ansible/modules/core @@ -0,0 +1 @@ +Subproject commit 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd0 diff --git a/v1/ansible/modules/extras b/v1/ansible/modules/extras new file mode 160000 index 00000000000000..495ad450e53feb --- /dev/null +++ b/v1/ansible/modules/extras @@ -0,0 +1 @@ +Subproject commit 495ad450e53feb1cd26218dc68056cc34d1ea9ff From 803fb397f35fe190a9c10a4e25386a6450ff52ff Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 4 May 2015 01:33:10 -0500 Subject: [PATCH 0504/3617] Fixing filter plugins directory from switch --- lib/ansible/executor/task_executor.py | 21 +- lib/ansible/playbook/block.py | 12 +- lib/ansible/playbook/conditional.py | 4 +- lib/ansible/playbook/task.py | 8 +- lib/ansible/plugins/action/__init__.py | 3 +- lib/ansible/plugins/action/assert.py | 2 +- lib/ansible/plugins/action/debug.py | 4 +- lib/ansible/plugins/action/set_fact.py | 4 +- lib/ansible/plugins/action/template.py | 4 +- lib/ansible/plugins/filter | 1 - lib/ansible/plugins/filter/__init__.py | 0 lib/ansible/plugins/filter/core.py | 351 +++++++++++++ lib/ansible/plugins/filter/ipaddr.py | 659 ++++++++++++++++++++++++ lib/ansible/plugins/filter/mathstuff.py | 126 +++++ 14 files changed, 1166 insertions(+), 33 deletions(-) delete mode 120000 lib/ansible/plugins/filter create mode 100644 lib/ansible/plugins/filter/__init__.py create mode 100644 lib/ansible/plugins/filter/core.py create mode 100644 lib/ansible/plugins/filter/ipaddr.py create mode 100644 lib/ansible/plugins/filter/mathstuff.py diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 2f90b3d87eb534..7fa21349483f1b 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -180,7 +180,8 @@ def _squash_items(self, items, variables): final_items = [] for item in items: variables['item'] = item - if self._task.evaluate_conditional(variables): + templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) + if self._task.evaluate_conditional(templar, variables): final_items.append(item) return [",".join(final_items)] else: @@ -208,13 +209,13 @@ def _execute(self, variables=None): # get the connection and the handler for this execution self._connection = self._get_connection(variables) - self._handler = self._get_action_handler(connection=self._connection) + self._handler = self._get_action_handler(connection=self._connection, templar=templar) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a # variable not being present which would otherwise cause validation to fail - if not self._task.evaluate_conditional(variables): + if not self._task.evaluate_conditional(templar, variables): debug("when evaulation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed') @@ -268,7 +269,7 @@ def _execute(self, variables=None): return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) if self._task.poll > 0: - result = self._poll_async_result(result=result) + result = self._poll_async_result(result=result, templar=templar) # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution @@ -284,15 +285,15 @@ def _execute(self, variables=None): # FIXME: make sure until is mutually exclusive with changed_when/failed_when if self._task.until: cond.when = self._task.until - if cond.evaluate_conditional(vars_copy): + if cond.evaluate_conditional(templar, vars_copy): break elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result: if self._task.changed_when: cond.when = [ self._task.changed_when ] - result['changed'] = cond.evaluate_conditional(vars_copy) + result['changed'] = cond.evaluate_conditional(templar, vars_copy) if self._task.failed_when: cond.when = [ self._task.failed_when ] - failed_when_result = cond.evaluate_conditional(vars_copy) + failed_when_result = cond.evaluate_conditional(templar, vars_copy) result['failed_when_result'] = result['failed'] = failed_when_result if failed_when_result: break @@ -315,7 +316,7 @@ def _execute(self, variables=None): debug("attempt loop complete, returning result") return result - def _poll_async_result(self, result): + def _poll_async_result(self, result, templar): ''' Polls for the specified JID to be complete ''' @@ -339,6 +340,7 @@ def _poll_async_result(self, result): connection=self._connection, connection_info=self._connection_info, loader=self._loader, + templar=templar, shared_loader_obj=self._shared_loader_obj, ) @@ -391,7 +393,7 @@ def _get_connection(self, variables): return connection - def _get_action_handler(self, connection): + def _get_action_handler(self, connection, templar): ''' Returns the correct action plugin to handle the requestion task action ''' @@ -411,6 +413,7 @@ def _get_action_handler(self, connection): connection=connection, connection_info=self._connection_info, loader=self._loader, + templar=templar, shared_loader_obj=self._shared_loader_obj, ) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index e6ad8e5745fb3d..d65f78712798ef 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -225,21 +225,21 @@ def deserialize(self, data): ti.deserialize(ti_data) self._task_include = ti - def evaluate_conditional(self, all_vars): + def evaluate_conditional(self, templar, all_vars): if len(self._dep_chain): for dep in self._dep_chain: - if not dep.evaluate_conditional(all_vars): + if not dep.evaluate_conditional(templar, all_vars): return False if self._task_include is not None: - if not self._task_include.evaluate_conditional(all_vars): + if not self._task_include.evaluate_conditional(templar, all_vars): return False if self._parent_block is not None: - if not self._parent_block.evaluate_conditional(all_vars): + if not self._parent_block.evaluate_conditional(templar, all_vars): return False elif self._role is not None: - if not self._role.evaluate_conditional(all_vars): + if not self._role.evaluate_conditional(templar, all_vars): return False - return super(Block, self).evaluate_conditional(all_vars) + return super(Block, self).evaluate_conditional(templar, all_vars) def set_loader(self, loader): self._loader = loader diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index 2233f3fa9ea54d..707233aaa0e991 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -47,16 +47,16 @@ def _validate_when(self, attr, name, value): if not isinstance(value, list): setattr(self, name, [ value ]) - def evaluate_conditional(self, all_vars): + def evaluate_conditional(self, templar, all_vars): ''' Loops through the conditionals set on this object, returning False if any of them evaluate as such. ''' - templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False) for conditional in self.when: if not self._check_conditional(conditional, templar, all_vars): return False + return True def _check_conditional(self, conditional, templar, all_vars): diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 060602579851d3..58788df65b4cda 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -266,14 +266,14 @@ def deserialize(self, data): super(Task, self).deserialize(data) - def evaluate_conditional(self, all_vars): + def evaluate_conditional(self, templar, all_vars): if self._block is not None: - if not self._block.evaluate_conditional(all_vars): + if not self._block.evaluate_conditional(templar, all_vars): return False if self._task_include is not None: - if not self._task_include.evaluate_conditional(all_vars): + if not self._task_include.evaluate_conditional(templar, all_vars): return False - return super(Task, self).evaluate_conditional(all_vars) + return super(Task, self).evaluate_conditional(templar, all_vars) def set_loader(self, loader): ''' diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 62036cc7068211..83c129687ec88d 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -44,11 +44,12 @@ class ActionBase: action in use. ''' - def __init__(self, task, connection, connection_info, loader, shared_loader_obj): + def __init__(self, task, connection, connection_info, loader, templar, shared_loader_obj): self._task = task self._connection = connection self._connection_info = connection_info self._loader = loader + self._templar = templar self._shared_loader_obj = shared_loader_obj self._shell = self.get_shell() diff --git a/lib/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py index 5c4fdd7b89c222..d39484f3663431 100644 --- a/lib/ansible/plugins/action/assert.py +++ b/lib/ansible/plugins/action/assert.py @@ -48,7 +48,7 @@ def run(self, tmp=None, task_vars=dict()): cond = Conditional(loader=self._loader) for that in thats: cond.when = [ that ] - test_result = cond.evaluate_conditional(all_vars=task_vars) + test_result = cond.evaluate_conditional(templar=self._templar, all_vars=task_vars) if not test_result: result = dict( failed = True, diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index 04db3c9cc1ba9d..94056e496ce977 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -19,7 +19,6 @@ from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean -from ansible.template import Templar class ActionModule(ActionBase): ''' Print statements during execution ''' @@ -35,8 +34,7 @@ def run(self, tmp=None, task_vars=dict()): result = dict(msg=self._task.args['msg']) # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=task_vars) - results = templar.template(self._task.args['var'], convert_bare=True) + results = self._templar.template(self._task.args['var'], convert_bare=True) result = dict() result[self._task.args['var']] = results else: diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py index 6086ee6e8b2b50..10ff6f23225d76 100644 --- a/lib/ansible/plugins/action/set_fact.py +++ b/lib/ansible/plugins/action/set_fact.py @@ -19,7 +19,6 @@ from ansible.errors import AnsibleError from ansible.plugins.action import ActionBase -from ansible.template import Templar from ansible.utils.boolean import boolean class ActionModule(ActionBase): @@ -27,11 +26,10 @@ class ActionModule(ActionBase): TRANSFERS_FILES = False def run(self, tmp=None, task_vars=dict()): - templar = Templar(loader=self._loader, variables=task_vars) facts = dict() if self._task.args: for (k, v) in self._task.args.iteritems(): - k = templar.template(k) + k = self._templar.template(k) if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'): v = boolean(v) facts[k] = v diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index a234ef2eee920a..7300848e6b4d01 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -21,7 +21,6 @@ import os from ansible.plugins.action import ActionBase -from ansible.template import Templar from ansible.utils.hashing import checksum_s class ActionModule(ActionBase): @@ -99,11 +98,10 @@ def run(self, tmp=None, task_vars=dict()): dest = os.path.join(dest, base) # template the source data locally & get ready to transfer - templar = Templar(loader=self._loader, variables=task_vars) try: with open(source, 'r') as f: template_data = f.read() - resultant = templar.template(template_data, preserve_trailing_newlines=True) + resultant = self._templar.template(template_data, preserve_trailing_newlines=True) except Exception as e: return dict(failed=True, msg=type(e).__name__ + ": " + str(e)) diff --git a/lib/ansible/plugins/filter b/lib/ansible/plugins/filter deleted file mode 120000 index fa1d5885700cce..00000000000000 --- a/lib/ansible/plugins/filter +++ /dev/null @@ -1 +0,0 @@ -../../../lib/ansible/runner/filter_plugins \ No newline at end of file diff --git a/lib/ansible/plugins/filter/__init__.py b/lib/ansible/plugins/filter/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py new file mode 100644 index 00000000000000..bdf45509c3a610 --- /dev/null +++ b/lib/ansible/plugins/filter/core.py @@ -0,0 +1,351 @@ +# (c) 2012, Jeroen Hoekx +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import + +import sys +import base64 +import json +import os.path +import types +import pipes +import glob +import re +import crypt +import hashlib +import string +from functools import partial +import operator as py_operator +from random import SystemRandom, shuffle +import uuid + +import yaml +from jinja2.filters import environmentfilter +from distutils.version import LooseVersion, StrictVersion + +from ansible import errors +from ansible.utils.hashing import md5s, checksum_s +from ansible.utils.unicode import unicode_wrap, to_unicode + + +UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E') + + +def to_nice_yaml(*a, **kw): + '''Make verbose, human readable yaml''' + transformed = yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw) + return to_unicode(transformed) + +def to_json(a, *args, **kw): + ''' Convert the value to JSON ''' + return json.dumps(a, *args, **kw) + +def to_nice_json(a, *args, **kw): + '''Make verbose, human readable JSON''' + # python-2.6's json encoder is buggy (can't encode hostvars) + if sys.version_info < (2, 7): + try: + import simplejson + except ImportError: + pass + else: + try: + major = int(simplejson.__version__.split('.')[0]) + except: + pass + else: + if major >= 2: + return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw) + # Fallback to the to_json filter + return to_json(a, *args, **kw) + return json.dumps(a, indent=4, sort_keys=True, *args, **kw) + +def failed(*a, **kw): + ''' Test if task result yields failed ''' + item = a[0] + if type(item) != dict: + raise errors.AnsibleFilterError("|failed expects a dictionary") + rc = item.get('rc',0) + failed = item.get('failed',False) + if rc != 0 or failed: + return True + else: + return False + +def success(*a, **kw): + ''' Test if task result yields success ''' + return not failed(*a, **kw) + +def changed(*a, **kw): + ''' Test if task result yields changed ''' + item = a[0] + if type(item) != dict: + raise errors.AnsibleFilterError("|changed expects a dictionary") + if not 'changed' in item: + changed = False + if ('results' in item # some modules return a 'results' key + and type(item['results']) == list + and type(item['results'][0]) == dict): + for result in item['results']: + changed = changed or result.get('changed', False) + else: + changed = item.get('changed', False) + return changed + +def skipped(*a, **kw): + ''' Test if task result yields skipped ''' + item = a[0] + if type(item) != dict: + raise errors.AnsibleFilterError("|skipped expects a dictionary") + skipped = item.get('skipped', False) + return skipped + +def mandatory(a): + ''' Make a variable mandatory ''' + try: + a + except NameError: + raise errors.AnsibleFilterError('Mandatory variable not defined.') + else: + return a + +def bool(a): + ''' return a bool for the arg ''' + if a is None or type(a) == bool: + return a + if type(a) in types.StringTypes: + a = a.lower() + if a in ['yes', 'on', '1', 'true', 1]: + return True + else: + return False + +def quote(a): + ''' return its argument quoted for shell usage ''' + return pipes.quote(a) + +def fileglob(pathname): + ''' return list of matched files for glob ''' + return glob.glob(pathname) + +def regex(value='', pattern='', ignorecase=False, match_type='search'): + ''' Expose `re` as a boolean filter using the `search` method by default. + This is likely only useful for `search` and `match` which already + have their own filters. + ''' + if ignorecase: + flags = re.I + else: + flags = 0 + _re = re.compile(pattern, flags=flags) + _bool = __builtins__.get('bool') + return _bool(getattr(_re, match_type, 'search')(value)) + +def match(value, pattern='', ignorecase=False): + ''' Perform a `re.match` returning a boolean ''' + return regex(value, pattern, ignorecase, 'match') + +def search(value, pattern='', ignorecase=False): + ''' Perform a `re.search` returning a boolean ''' + return regex(value, pattern, ignorecase, 'search') + +def regex_replace(value='', pattern='', replacement='', ignorecase=False): + ''' Perform a `re.sub` returning a string ''' + + if not isinstance(value, basestring): + value = str(value) + + if ignorecase: + flags = re.I + else: + flags = 0 + _re = re.compile(pattern, flags=flags) + return _re.sub(replacement, value) + +def ternary(value, true_val, false_val): + ''' value ? true_val : false_val ''' + if value: + return true_val + else: + return false_val + + +def version_compare(value, version, operator='eq', strict=False): + ''' Perform a version comparison on a value ''' + op_map = { + '==': 'eq', '=': 'eq', 'eq': 'eq', + '<': 'lt', 'lt': 'lt', + '<=': 'le', 'le': 'le', + '>': 'gt', 'gt': 'gt', + '>=': 'ge', 'ge': 'ge', + '!=': 'ne', '<>': 'ne', 'ne': 'ne' + } + + if strict: + Version = StrictVersion + else: + Version = LooseVersion + + if operator in op_map: + operator = op_map[operator] + else: + raise errors.AnsibleFilterError('Invalid operator type') + + try: + method = getattr(py_operator, operator) + return method(Version(str(value)), Version(str(version))) + except Exception, e: + raise errors.AnsibleFilterError('Version comparison: %s' % e) + +@environmentfilter +def rand(environment, end, start=None, step=None): + r = SystemRandom() + if isinstance(end, (int, long)): + if not start: + start = 0 + if not step: + step = 1 + return r.randrange(start, end, step) + elif hasattr(end, '__iter__'): + if start or step: + raise errors.AnsibleFilterError('start and step can only be used with integer values') + return r.choice(end) + else: + raise errors.AnsibleFilterError('random can only be used on sequences and integers') + +def randomize_list(mylist): + try: + mylist = list(mylist) + shuffle(mylist) + except: + pass + return mylist + +def get_hash(data, hashtype='sha1'): + + try: # see if hash is supported + h = hashlib.new(hashtype) + except: + return None + + h.update(data) + return h.hexdigest() + +def get_encrypted_password(password, hashtype='sha512', salt=None): + + # TODO: find a way to construct dynamically from system + cryptmethod= { + 'md5': '1', + 'blowfish': '2a', + 'sha256': '5', + 'sha512': '6', + } + + hastype = hashtype.lower() + if hashtype in cryptmethod: + if salt is None: + r = SystemRandom() + salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)]) + + saltstring = "$%s$%s" % (cryptmethod[hashtype],salt) + encrypted = crypt.crypt(password,saltstring) + return encrypted + + return None + +def to_uuid(string): + return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string))) + +class FilterModule(object): + ''' Ansible core jinja2 filters ''' + + def filters(self): + return { + # base 64 + 'b64decode': partial(unicode_wrap, base64.b64decode), + 'b64encode': partial(unicode_wrap, base64.b64encode), + + # uuid + 'to_uuid': to_uuid, + + # json + 'to_json': to_json, + 'to_nice_json': to_nice_json, + 'from_json': json.loads, + + # yaml + 'to_yaml': yaml.safe_dump, + 'to_nice_yaml': to_nice_yaml, + 'from_yaml': yaml.safe_load, + + # path + 'basename': partial(unicode_wrap, os.path.basename), + 'dirname': partial(unicode_wrap, os.path.dirname), + 'expanduser': partial(unicode_wrap, os.path.expanduser), + 'realpath': partial(unicode_wrap, os.path.realpath), + 'relpath': partial(unicode_wrap, os.path.relpath), + + # failure testing + 'failed' : failed, + 'success' : success, + + # changed testing + 'changed' : changed, + + # skip testing + 'skipped' : skipped, + + # variable existence + 'mandatory': mandatory, + + # value as boolean + 'bool': bool, + + # quote string for shell usage + 'quote': quote, + + # hash filters + # md5 hex digest of string + 'md5': md5s, + # sha1 hex digeset of string + 'sha1': checksum_s, + # checksum of string as used by ansible for checksuming files + 'checksum': checksum_s, + # generic hashing + 'password_hash': get_encrypted_password, + 'hash': get_hash, + + # file glob + 'fileglob': fileglob, + + # regex + 'match': match, + 'search': search, + 'regex': regex, + 'regex_replace': regex_replace, + + # ? : ; + 'ternary': ternary, + + # list + # version comparison + 'version_compare': version_compare, + + # random stuff + 'random': rand, + 'shuffle': randomize_list, + } diff --git a/lib/ansible/plugins/filter/ipaddr.py b/lib/ansible/plugins/filter/ipaddr.py new file mode 100644 index 00000000000000..5d9d6e3136728d --- /dev/null +++ b/lib/ansible/plugins/filter/ipaddr.py @@ -0,0 +1,659 @@ +# (c) 2014, Maciej Delmanowski +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from functools import partial + +try: + import netaddr +except ImportError: + # in this case, we'll make the filters return error messages (see bottom) + netaddr = None +else: + class mac_linux(netaddr.mac_unix): + pass + mac_linux.word_fmt = '%.2x' + +from ansible import errors + + +# ---- IP address and network query helpers ---- + +def _empty_ipaddr_query(v, vtype): + # We don't have any query to process, so just check what type the user + # expects, and return the IP address in a correct format + if v: + if vtype == 'address': + return str(v.ip) + elif vtype == 'network': + return str(v) + +def _6to4_query(v, vtype, value): + if v.version == 4: + + if v.size == 1: + ipconv = str(v.ip) + elif v.size > 1: + if v.ip != v.network: + ipconv = str(v.ip) + else: + ipconv = False + + if ipaddr(ipconv, 'public'): + numbers = list(map(int, ipconv.split('.'))) + + try: + return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers) + except: + return False + + elif v.version == 6: + if vtype == 'address': + if ipaddr(str(v), '2002::/16'): + return value + elif vtype == 'network': + if v.ip != v.network: + if ipaddr(str(v.ip), '2002::/16'): + return value + else: + return False + +def _ip_query(v): + if v.size == 1: + return str(v.ip) + if v.size > 1: + if v.ip != v.network: + return str(v.ip) + +def _gateway_query(v): + if v.size > 1: + if v.ip != v.network: + return str(v.ip) + '/' + str(v.prefixlen) + +def _bool_ipaddr_query(v): + if v: + return True + +def _broadcast_query(v): + if v.size > 1: + return str(v.broadcast) + +def _cidr_query(v): + return str(v) + +def _cidr_lookup_query(v, iplist, value): + try: + if v in iplist: + return value + except: + return False + +def _host_query(v): + if v.size == 1: + return str(v) + elif v.size > 1: + if v.ip != v.network: + return str(v.ip) + '/' + str(v.prefixlen) + +def _hostmask_query(v): + return str(v.hostmask) + +def _int_query(v, vtype): + if vtype == 'address': + return int(v.ip) + elif vtype == 'network': + return str(int(v.ip)) + '/' + str(int(v.prefixlen)) + +def _ipv4_query(v, value): + if v.version == 6: + try: + return str(v.ipv4()) + except: + return False + else: + return value + +def _ipv6_query(v, value): + if v.version == 4: + return str(v.ipv6()) + else: + return value + +def _link_local_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v.version == 4: + if ipaddr(str(v_ip), '169.254.0.0/24'): + return value + + elif v.version == 6: + if ipaddr(str(v_ip), 'fe80::/10'): + return value + +def _loopback_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v_ip.is_loopback(): + return value + +def _multicast_query(v, value): + if v.is_multicast(): + return value + +def _net_query(v): + if v.size > 1: + if v.ip == v.network: + return str(v.network) + '/' + str(v.prefixlen) + +def _netmask_query(v): + if v.size > 1: + return str(v.netmask) + +def _network_query(v): + if v.size > 1: + return str(v.network) + +def _prefix_query(v): + return int(v.prefixlen) + +def _private_query(v, value): + if v.is_private(): + return value + +def _public_query(v, value): + v_ip = netaddr.IPAddress(str(v.ip)) + if v_ip.is_unicast() and not v_ip.is_private() and \ + not v_ip.is_loopback() and not v_ip.is_netmask() and \ + not v_ip.is_hostmask(): + return value + +def _revdns_query(v): + v_ip = netaddr.IPAddress(str(v.ip)) + return v_ip.reverse_dns + +def _size_query(v): + return v.size + +def _subnet_query(v): + return str(v.cidr) + +def _type_query(v): + if v.size == 1: + return 'address' + if v.size > 1: + if v.ip != v.network: + return 'address' + else: + return 'network' + +def _unicast_query(v, value): + if v.is_unicast(): + return value + +def _version_query(v): + return v.version + +def _wrap_query(v, vtype, value): + if v.version == 6: + if vtype == 'address': + return '[' + str(v.ip) + ']' + elif vtype == 'network': + return '[' + str(v.ip) + ']/' + str(v.prefixlen) + else: + return value + + +# ---- HWaddr query helpers ---- +def _bare_query(v): + v.dialect = netaddr.mac_bare + return str(v) + +def _bool_hwaddr_query(v): + if v: + return True + +def _cisco_query(v): + v.dialect = netaddr.mac_cisco + return str(v) + +def _empty_hwaddr_query(v, value): + if v: + return value + +def _linux_query(v): + v.dialect = mac_linux + return str(v) + +def _postgresql_query(v): + v.dialect = netaddr.mac_pgsql + return str(v) + +def _unix_query(v): + v.dialect = netaddr.mac_unix + return str(v) + +def _win_query(v): + v.dialect = netaddr.mac_eui48 + return str(v) + + +# ---- IP address and network filters ---- + +def ipaddr(value, query = '', version = False, alias = 'ipaddr'): + ''' Check if string is an IP address or network and filter it ''' + + query_func_extra_args = { + '': ('vtype',), + '6to4': ('vtype', 'value'), + 'cidr_lookup': ('iplist', 'value'), + 'int': ('vtype',), + 'ipv4': ('value',), + 'ipv6': ('value',), + 'link-local': ('value',), + 'loopback': ('value',), + 'lo': ('value',), + 'multicast': ('value',), + 'private': ('value',), + 'public': ('value',), + 'unicast': ('value',), + 'wrap': ('vtype', 'value'), + } + query_func_map = { + '': _empty_ipaddr_query, + '6to4': _6to4_query, + 'address': _ip_query, + 'address/prefix': _gateway_query, + 'bool': _bool_ipaddr_query, + 'broadcast': _broadcast_query, + 'cidr': _cidr_query, + 'cidr_lookup': _cidr_lookup_query, + 'gateway': _gateway_query, + 'gw': _gateway_query, + 'host': _host_query, + 'host/prefix': _gateway_query, + 'hostmask': _hostmask_query, + 'hostnet': _gateway_query, + 'int': _int_query, + 'ip': _ip_query, + 'ipv4': _ipv4_query, + 'ipv6': _ipv6_query, + 'link-local': _link_local_query, + 'lo': _loopback_query, + 'loopback': _loopback_query, + 'multicast': _multicast_query, + 'net': _net_query, + 'netmask': _netmask_query, + 'network': _network_query, + 'prefix': _prefix_query, + 'private': _private_query, + 'public': _public_query, + 'revdns': _revdns_query, + 'router': _gateway_query, + 'size': _size_query, + 'subnet': _subnet_query, + 'type': _type_query, + 'unicast': _unicast_query, + 'v4': _ipv4_query, + 'v6': _ipv6_query, + 'version': _version_query, + 'wrap': _wrap_query, + } + + vtype = None + + if not value: + return False + + elif value == True: + return False + + # Check if value is a list and parse each element + elif isinstance(value, (list, tuple)): + + _ret = [] + for element in value: + if ipaddr(element, str(query), version): + _ret.append(ipaddr(element, str(query), version)) + + if _ret: + return _ret + else: + return list() + + # Check if value is a number and convert it to an IP address + elif str(value).isdigit(): + + # We don't know what IP version to assume, so let's check IPv4 first, + # then IPv6 + try: + if ((not version) or (version and version == 4)): + v = netaddr.IPNetwork('0.0.0.0/0') + v.value = int(value) + v.prefixlen = 32 + elif version and version == 6: + v = netaddr.IPNetwork('::/0') + v.value = int(value) + v.prefixlen = 128 + + # IPv4 didn't work the first time, so it definitely has to be IPv6 + except: + try: + v = netaddr.IPNetwork('::/0') + v.value = int(value) + v.prefixlen = 128 + + # The value is too big for IPv6. Are you a nanobot? + except: + return False + + # We got an IP address, let's mark it as such + value = str(v) + vtype = 'address' + + # value has not been recognized, check if it's a valid IP string + else: + try: + v = netaddr.IPNetwork(value) + + # value is a valid IP string, check if user specified + # CIDR prefix or just an IP address, this will indicate default + # output format + try: + address, prefix = value.split('/') + vtype = 'network' + except: + vtype = 'address' + + # value hasn't been recognized, maybe it's a numerical CIDR? + except: + try: + address, prefix = value.split('/') + address.isdigit() + address = int(address) + prefix.isdigit() + prefix = int(prefix) + + # It's not numerical CIDR, give up + except: + return False + + # It is something, so let's try and build a CIDR from the parts + try: + v = netaddr.IPNetwork('0.0.0.0/0') + v.value = address + v.prefixlen = prefix + + # It's not a valid IPv4 CIDR + except: + try: + v = netaddr.IPNetwork('::/0') + v.value = address + v.prefixlen = prefix + + # It's not a valid IPv6 CIDR. Give up. + except: + return False + + # We have a valid CIDR, so let's write it in correct format + value = str(v) + vtype = 'network' + + # We have a query string but it's not in the known query types. Check if + # that string is a valid subnet, if so, we can check later if given IP + # address/network is inside that specific subnet + try: + ### ?? 6to4 and link-local were True here before. Should they still? + if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'): + iplist = netaddr.IPSet([netaddr.IPNetwork(query)]) + query = 'cidr_lookup' + except: + pass + + # This code checks if value maches the IP version the user wants, ie. if + # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()") + # If version does not match, return False + if version and v.version != version: + return False + + extras = [] + for arg in query_func_extra_args.get(query, tuple()): + extras.append(locals()[arg]) + try: + return query_func_map[query](v, *extras) + except KeyError: + try: + float(query) + if v.size == 1: + if vtype == 'address': + return str(v.ip) + elif vtype == 'network': + return str(v) + + elif v.size > 1: + try: + return str(v[query]) + '/' + str(v.prefixlen) + except: + return False + + else: + return value + + except: + raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query) + + return False + + +def ipwrap(value, query = ''): + try: + if isinstance(value, (list, tuple)): + _ret = [] + for element in value: + if ipaddr(element, query, version = False, alias = 'ipwrap'): + _ret.append(ipaddr(element, 'wrap')) + else: + _ret.append(element) + + return _ret + else: + _ret = ipaddr(value, query, version = False, alias = 'ipwrap') + if _ret: + return ipaddr(_ret, 'wrap') + else: + return value + + except: + return value + + +def ipv4(value, query = ''): + return ipaddr(value, query, version = 4, alias = 'ipv4') + + +def ipv6(value, query = ''): + return ipaddr(value, query, version = 6, alias = 'ipv6') + + +# Split given subnet into smaller subnets or find out the biggest subnet of +# a given IP address with given CIDR prefix +# Usage: +# +# - address or address/prefix | ipsubnet +# returns CIDR subnet of a given input +# +# - address/prefix | ipsubnet(cidr) +# returns number of possible subnets for given CIDR prefix +# +# - address/prefix | ipsubnet(cidr, index) +# returns new subnet with given CIDR prefix +# +# - address | ipsubnet(cidr) +# returns biggest subnet with given CIDR prefix that address belongs to +# +# - address | ipsubnet(cidr, index) +# returns next indexed subnet which contains given address +def ipsubnet(value, query = '', index = 'x'): + ''' Manipulate IPv4/IPv6 subnets ''' + + try: + vtype = ipaddr(value, 'type') + if vtype == 'address': + v = ipaddr(value, 'cidr') + elif vtype == 'network': + v = ipaddr(value, 'subnet') + + value = netaddr.IPNetwork(v) + except: + return False + + if not query: + return str(value) + + elif str(query).isdigit(): + vsize = ipaddr(v, 'size') + query = int(query) + + try: + float(index) + index = int(index) + + if vsize > 1: + try: + return str(list(value.subnet(query))[index]) + except: + return False + + elif vsize == 1: + try: + return str(value.supernet(query)[index]) + except: + return False + + except: + if vsize > 1: + try: + return str(len(list(value.subnet(query)))) + except: + return False + + elif vsize == 1: + try: + return str(value.supernet(query)[0]) + except: + return False + + return False + +# Returns the nth host within a network described by value. +# Usage: +# +# - address or address/prefix | nthhost(nth) +# returns the nth host within the given network +def nthhost(value, query=''): + ''' Get the nth host within a given network ''' + try: + vtype = ipaddr(value, 'type') + if vtype == 'address': + v = ipaddr(value, 'cidr') + elif vtype == 'network': + v = ipaddr(value, 'subnet') + + value = netaddr.IPNetwork(v) + except: + return False + + if not query: + return False + + try: + vsize = ipaddr(v, 'size') + nth = int(query) + if value.size > nth: + return value[nth] + + except ValueError: + return False + + return False + + +# ---- HWaddr / MAC address filters ---- + +def hwaddr(value, query = '', alias = 'hwaddr'): + ''' Check if string is a HW/MAC address and filter it ''' + + query_func_extra_args = { + '': ('value',), + } + query_func_map = { + '': _empty_hwaddr_query, + 'bare': _bare_query, + 'bool': _bool_hwaddr_query, + 'cisco': _cisco_query, + 'eui48': _win_query, + 'linux': _linux_query, + 'pgsql': _postgresql_query, + 'postgresql': _postgresql_query, + 'psql': _postgresql_query, + 'unix': _unix_query, + 'win': _win_query, + } + + try: + v = netaddr.EUI(value) + except: + if query and query != 'bool': + raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value) + + extras = [] + for arg in query_func_extra_args.get(query, tuple()): + extras.append(locals()[arg]) + try: + return query_func_map[query](v, *extras) + except KeyError: + raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query) + + return False + +def macaddr(value, query = ''): + return hwaddr(value, query, alias = 'macaddr') + +def _need_netaddr(f_name, *args, **kwargs): + raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be' + ' installed on the ansible controller'.format(f_name)) + +# ---- Ansible filters ---- + +class FilterModule(object): + ''' IP address and network manipulation filters ''' + filter_map = { + # IP addresses and networks + 'ipaddr': ipaddr, + 'ipwrap': ipwrap, + 'ipv4': ipv4, + 'ipv6': ipv6, + 'ipsubnet': ipsubnet, + 'nthhost': nthhost, + + # MAC / HW addresses + 'hwaddr': hwaddr, + 'macaddr': macaddr + } + + def filters(self): + if netaddr: + return self.filter_map + else: + # Need to install python-netaddr for these filters to work + return dict((f, partial(_need_netaddr, f)) for f in self.filter_map) diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py new file mode 100644 index 00000000000000..c6a49485a40bfd --- /dev/null +++ b/lib/ansible/plugins/filter/mathstuff.py @@ -0,0 +1,126 @@ +# (c) 2014, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import absolute_import + +import math +import collections +from ansible import errors + +def unique(a): + if isinstance(a,collections.Hashable): + c = set(a) + else: + c = [] + for x in a: + if x not in c: + c.append(x) + return c + +def intersect(a, b): + if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable): + c = set(a) & set(b) + else: + c = unique(filter(lambda x: x in b, a)) + return c + +def difference(a, b): + if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable): + c = set(a) - set(b) + else: + c = unique(filter(lambda x: x not in b, a)) + return c + +def symmetric_difference(a, b): + if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable): + c = set(a) ^ set(b) + else: + c = unique(filter(lambda x: x not in intersect(a,b), union(a,b))) + return c + +def union(a, b): + if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable): + c = set(a) | set(b) + else: + c = unique(a + b) + return c + +def min(a): + _min = __builtins__.get('min') + return _min(a); + +def max(a): + _max = __builtins__.get('max') + return _max(a); + +def isnotanumber(x): + try: + return math.isnan(x) + except TypeError: + return False + + +def logarithm(x, base=math.e): + try: + if base == 10: + return math.log10(x) + else: + return math.log(x, base) + except TypeError, e: + raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e)) + + +def power(x, y): + try: + return math.pow(x, y) + except TypeError, e: + raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e)) + + +def inversepower(x, base=2): + try: + if base == 2: + return math.sqrt(x) + else: + return math.pow(x, 1.0/float(base)) + except TypeError, e: + raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e)) + + +class FilterModule(object): + ''' Ansible math jinja2 filters ''' + + def filters(self): + return { + # general math + 'isnan': isnotanumber, + 'min' : min, + 'max' : max, + + # exponents and logarithms + 'log': logarithm, + 'pow': power, + 'root': inversepower, + + # set theory + 'unique' : unique, + 'intersect': intersect, + 'difference': difference, + 'symmetric_difference': symmetric_difference, + 'union': union, + + } From 354bdaacb88129817e75477e00b2c864e2259a04 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 11:19:20 -0400 Subject: [PATCH 0505/3617] added new cloud/open|stack modules to changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00d53e743ac524..f22a203de9738e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,12 +22,16 @@ New Modules: * cloudstack: cs_affinitygroup * cloudstack: cs_firewall * cloudstack: cs_iso + * cloudstack: cs_instance * cloudstack: cs_sshkeypair * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule * cloudstack: cs_vmsnapshot * maven_artifact * openstack: os_server_facts + * openstack: os_server_volume + * openstack: os_subnet + * openstack: os_volume * pushover * zabbix_host * zabbix_hostmacro From 8259f449de71fd716dc0f7b6e69e54146b686991 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 11:34:02 -0400 Subject: [PATCH 0506/3617] added cloudstack inventory --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f22a203de9738e..9485fd2198267d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ New Modules: * vmware_datacenter New Inventory scripts: + * cloudstack * fleetctl Other Notable Changes: From 1194195b1aa797f9a1e2d2b74990d233fccc9b3c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 11:38:28 -0400 Subject: [PATCH 0507/3617] smoother commands with less quotes for pbrun --- lib/ansible/executor/connection_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 1c168a8e26436b..9e91cd09eafb66 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -226,7 +226,7 @@ def make_become_cmd(self, cmd, executable, become_settings=None): elif self.become_method == 'pbrun': exe = become_settings.get('pbrun_exe', 'pbrun') flags = become_settings.get('pbrun_flags', '') - becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, success_cmd) + becomecmd = '%s -b -l %s -u %s %s' % (exe, flags, self.become_user, success_cmd) elif self.become_method == 'pfexec': exe = become_settings.get('pfexec_exe', 'pbrun') From 2543403c21071b4a1b2647062c2720beb2f406ef Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 12:04:18 -0400 Subject: [PATCH 0508/3617] deprecated nova_compute and added new os_server for openstack to changelog --- CHANGELOG.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9485fd2198267d..92354cd8520170 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,8 +8,9 @@ Major Changes: * template code now retains types for bools and Numbers instead of turning them into strings If you need the old behaviour, quote the value and it will get passed around as a string -Deprecated Modules: - * ec2_ami_search, in favor of the new ec2_ami_find +Deprecated Modules (new ones in parens): + * ec2_ami_search (ec2_ami_find) + * nova_compute (os_server) New Modules: * find @@ -28,6 +29,7 @@ New Modules: * cloudstack: cs_securitygroup_rule * cloudstack: cs_vmsnapshot * maven_artifact + * openstack: os_server * openstack: os_server_facts * openstack: os_server_volume * openstack: os_subnet From 61ec84ef717bade247590bda44ad5aa4372be2f9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 May 2015 11:09:54 -0700 Subject: [PATCH 0509/3617] Update module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- v2/ansible/modules/core | 2 +- v2/ansible/modules/extras | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9028e9d4be8a3d..f444e49dfa652e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd0 +Subproject commit f444e49dfa652e0bec0a140efe69ac5372cac321 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index dd80fa221ce0ad..70ea05856356ad 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc +Subproject commit 70ea05856356ad36f48b4bb7267d637efc56d292 diff --git a/v2/ansible/modules/core b/v2/ansible/modules/core index 0341ddd35ed5ff..85c8a892c80b92 160000 --- a/v2/ansible/modules/core +++ b/v2/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0341ddd35ed5ff477ad5de2488d947255ce86259 +Subproject commit 85c8a892c80b92730831d95fa654ef6d35b0eca0 diff --git a/v2/ansible/modules/extras b/v2/ansible/modules/extras index dd80fa221ce0ad..70ea05856356ad 160000 --- a/v2/ansible/modules/extras +++ b/v2/ansible/modules/extras @@ -1 +1 @@ -Subproject commit dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6dc +Subproject commit 70ea05856356ad36f48b4bb7267d637efc56d292 From fdb059187721779590d38646a215d4668cbc3f3a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 May 2015 12:06:02 -0700 Subject: [PATCH 0510/3617] Update module pointers --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0341ddd35ed5ff..85c8a892c80b92 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0341ddd35ed5ff477ad5de2488d947255ce86259 +Subproject commit 85c8a892c80b92730831d95fa654ef6d35b0eca0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 495ad450e53feb..70ea05856356ad 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 495ad450e53feb1cd26218dc68056cc34d1ea9ff +Subproject commit 70ea05856356ad36f48b4bb7267d637efc56d292 From 99909b08bad5e7e2d859cf8a17467df6df4efcda Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 4 May 2015 15:06:00 -0500 Subject: [PATCH 0511/3617] Submodule update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index f444e49dfa652e..c4f6e63117cd37 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit f444e49dfa652e0bec0a140efe69ac5372cac321 +Subproject commit c4f6e63117cd378ed5b144bf6c8391420a2381ab From b19d426f0b3d1983989979f564ef783b6e975e28 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 May 2015 13:38:12 -0700 Subject: [PATCH 0512/3617] Normalize the way requirements is specified --- lib/ansible/utils/module_docs_fragments/openstack.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index f989b3dcb80f8e..7e42841d6da9e9 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -91,7 +91,8 @@ class ModuleDocFragment(object): choices: [public, internal, admin] required: false default: public -requirements: [shade] +requirements: + - shade notes: - The standard OpenStack environment variables, such as C(OS_USERNAME) may be user instead of providing explicit values. From cbde1c5ec06a710616e734b5ae83dc3bb436ff4d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 May 2015 13:38:24 -0700 Subject: [PATCH 0513/3617] Fix extending non-dict types from doc fragments --- lib/ansible/utils/module_docs.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index ee99af2cb54dba..c6920571726931 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -23,6 +23,8 @@ import yaml import traceback +from collections import MutableMapping, MutableSet, MutableSequence + from ansible import utils # modules that are ok that they do not have documentation strings @@ -86,7 +88,14 @@ def get_docstring(filename, verbose=False): if not doc.has_key(key): doc[key] = value else: - doc[key].update(value) + if isinstance(doc[key], MutableMapping): + doc[key].update(value) + elif isinstance(doc[key], MutableSet): + doc[key].add(value) + elif isinstance(doc[key], MutableSequence): + doc[key] = sorted(frozenset(doc[key] + value)) + else: + raise Exception("Attempt to extend a documentation fragement of unknown type") if 'EXAMPLES' in (t.id for t in child.targets): plainexamples = child.value.s[1:] # Skip first empty line From b23a879273eab0c6e3aefa080f3b6aaefdadc110 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 16:42:25 -0400 Subject: [PATCH 0514/3617] now properly inherit data from ansible.cfg for sudo/su ask pass fixes #10891 --- lib/ansible/utils/__init__.py | 4 ++-- v2/ansible/cli/__init__.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 7ed07a54c840d3..476a1e28e81939 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1024,9 +1024,9 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") diff --git a/v2/ansible/cli/__init__.py b/v2/ansible/cli/__init__.py index 0b0494e03282b6..4a7f5bbacc1d47 100644 --- a/v2/ansible/cli/__init__.py +++ b/v2/ansible/cli/__init__.py @@ -245,9 +245,9 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") From 124a0d3519dac7d774c2cc5710a69b10a4ec4c92 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 16:44:54 -0400 Subject: [PATCH 0515/3617] now properly inherits from ansible.cfg sudo/su ask pass fixes #10891 --- lib/ansible/cli/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 0b0494e03282b6..4a7f5bbacc1d47 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -245,9 +245,9 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") From c488ea019f894a319e7bb27538a47722cdaf9fe5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 May 2015 13:52:16 -0700 Subject: [PATCH 0516/3617] Fix cs_instance docs --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 70ea05856356ad..28b0f3ce132dd7 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 70ea05856356ad36f48b4bb7267d637efc56d292 +Subproject commit 28b0f3ce132dd78e0407d5f95838d97fd69824b6 From 013c4631e3a65035471d85aabd9227c0fa701e10 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 18:37:38 -0400 Subject: [PATCH 0517/3617] hack to prevent tempalte/copy errors on vagrant synced folders that report incorrectly errno 26 fixes #9526 --- lib/ansible/module_utils/basic.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 54a1a9cfff7f88..fd0108c98b7486 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1356,8 +1356,9 @@ def atomic_move(self, src, dest): # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic. os.rename(src, dest) except (IOError,OSError), e: - # only try workarounds for errno 18 (cross device), 1 (not permitted) and 13 (permission denied) - if e.errno != errno.EPERM and e.errno != errno.EXDEV and e.errno != errno.EACCES: + # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) + # and 26 (text file busy) which happens on vagrant synced folders + if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY] self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) dest_dir = os.path.dirname(dest) From 483c61414e67a1b6c9f7ace406298cb2db08bf1d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 18:42:44 -0400 Subject: [PATCH 0518/3617] added missing : --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index fd0108c98b7486..0c42a2315af870 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1358,7 +1358,7 @@ def atomic_move(self, src, dest): except (IOError,OSError), e: # only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied) # and 26 (text file busy) which happens on vagrant synced folders - if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY] + if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY]: self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e)) dest_dir = os.path.dirname(dest) From efb190d5a5584a7500c5ceaea06a8ce76600668e Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Mon, 4 May 2015 23:42:46 -0400 Subject: [PATCH 0519/3617] add azure integration tests --- test/integration/azure.yml | 7 +++ test/integration/cleanup_azure.py | 1 + test/integration/credentials.template | 4 ++ .../roles/test_azure/defaults/main.yml | 10 +++ .../roles/test_azure/tasks/main.yml | 63 +++++++++++++++++++ 5 files changed, 85 insertions(+) create mode 100644 test/integration/azure.yml create mode 100644 test/integration/cleanup_azure.py create mode 100644 test/integration/roles/test_azure/defaults/main.yml create mode 100644 test/integration/roles/test_azure/tasks/main.yml diff --git a/test/integration/azure.yml b/test/integration/azure.yml new file mode 100644 index 00000000000000..4fceb2a13e7538 --- /dev/null +++ b/test/integration/azure.yml @@ -0,0 +1,7 @@ +- hosts: localhost + connection: local + gather_facts: no + tags: + - test_azure + roles: + - { role: test_azure } diff --git a/test/integration/cleanup_azure.py b/test/integration/cleanup_azure.py new file mode 100644 index 00000000000000..8b137891791fe9 --- /dev/null +++ b/test/integration/cleanup_azure.py @@ -0,0 +1 @@ + diff --git a/test/integration/credentials.template b/test/integration/credentials.template index 4894f5827b3ff5..78594aca97cc8b 100644 --- a/test/integration/credentials.template +++ b/test/integration/credentials.template @@ -13,5 +13,9 @@ service_account_email: pem_file: project_id: +# Azure Credentials +azure_subscription_id: +azure_cert_path: + # GITHUB SSH private key - a path to a SSH private key for use with github.com github_ssh_private_key: "{{ lookup('env','HOME') }}/.ssh/id_rsa" diff --git a/test/integration/roles/test_azure/defaults/main.yml b/test/integration/roles/test_azure/defaults/main.yml new file mode 100644 index 00000000000000..01018a9f7fd9cf --- /dev/null +++ b/test/integration/roles/test_azure/defaults/main.yml @@ -0,0 +1,10 @@ +--- +# defaults file for test_azure +instance_name: "{{ resource_prefix|lower }}" +cert_path: "{{ azure_cert_path }}" +subscription_id: "{{ azure_subscription_id }}" +storage_account: "{{ azure_storage_account|default('ansibleeast') }}" +role_size: "{{ azure_role_size|default('Basic_A0') }}" +user: "{{ azure_user|default('ansible_user') }}" +location: "{{ azure_location|default('East US') }}" +password: "{{ azure_password|default('abc123Q%') }}" diff --git a/test/integration/roles/test_azure/tasks/main.yml b/test/integration/roles/test_azure/tasks/main.yml new file mode 100644 index 00000000000000..cba93e3d65c1b0 --- /dev/null +++ b/test/integration/roles/test_azure/tasks/main.yml @@ -0,0 +1,63 @@ +# TODO: Implement create storage account feature. Currently, storage_account must be manually created on azure account. +# TODO: When more granular azure operations are implemented (i.e. list disk, list cloud services, etc). Use the +# fine-grain listings to ensure higher level operations are performed. +# ============================================================ +- name: test with no credentials + azure: + register: result + ignore_errors: true + +- name: assert failure when called with no credentials + assert: + that: + - 'result.failed' + - 'result.msg == "No subscription_id provided. Please set ''AZURE_SUBSCRIPTION_ID'' or use the ''subscription_id'' parameter"' + +# ============================================================ +- name: test credentials + azure: + subscription_id: "{{ subscription_id }}" + management_cert_path: "{{ cert_path }}" + register: result + ignore_errors: true + +- name: assert failure when called with credentials and no parameters + assert: + that: + - 'result.failed' + - 'result.msg == "name parameter is required for new instance"' + +# ============================================================ +- name: test status=Running (expected changed=true) + azure: + subscription_id: "{{ subscription_id }}" + management_cert_path: "{{ cert_path }}" + name: "{{ instance_name }}" + image: "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140514-en-us-30GB" + storage_account: "{{ storage_account }}" + user: "{{ user }}" + role_size: "{{ role_size }}" + password: "{{ password }}" + location: "{{ location }}" + wait: yes + state: present + register: result + +- name: assert state=Running (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.deployment.name == "{{ instance_name }}"' + - 'result.deployment.status == "Running"' + +# ============================================================ +- name: test state=absent (expected changed=true) + azure: + subscription_id: "{{ subscription_id }}" + management_cert_path: "{{ cert_path }}" + name: "{{ instance_name }}" + #storage_account: "{{ storage_account }}" + #location: "{{ location }}" + wait: yes + state: absent + register: result From cf300da02cb58f88086da4b76e175e2296a1f11c Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 5 May 2015 08:42:07 -0400 Subject: [PATCH 0520/3617] azure changes to Makefile --- test/integration/Makefile | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 28de76c7cdf759..923a29bc9fec98 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -105,13 +105,16 @@ test_tags: [ "$$(ansible-playbook --list-tasks --skip-tags tag test_tags.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) | fgrep Task_with | xargs)" = "Task_with_always_tag TAGS: [always] Task_without_tag TAGS: []" ] -cloud: amazon rackspace +cloud: amazon rackspace azure cloud_cleanup: amazon_cleanup rackspace_cleanup amazon_cleanup: python cleanup_ec2.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" +azure_cleanup: + python cleanup_azure.py -y --match="^$(CLOUD_RESOURCE_PREFIX)" + gce_setup: python setup_gce.py "$(CLOUD_RESOURCE_PREFIX)" @@ -131,6 +134,12 @@ amazon: $(CREDENTIALS_FILE) CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make amazon_cleanup ; \ exit $$RC; +azure: $(CREDENTIALS_FILE) + ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook azure.yml -i $(INVENTORY) $(CREDENTIALS_ARG) -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ + RC=$$? ; \ + CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make azure_cleanup ; \ + exit $$RC; + gce: $(CREDENTIALS_FILE) CLOUD_RESOURCE_PREFIX="$(CLOUD_RESOURCE_PREFIX)" make gce_setup ; \ ansible-playbook gce.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -e "resource_prefix=$(CLOUD_RESOURCE_PREFIX)" -v $(TEST_FLAGS) ; \ From e971b60f26c8fbe303da9ec2e558e61a199b5262 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 5 May 2015 09:48:26 -0400 Subject: [PATCH 0521/3617] updated docs about ansible_hosts/inventory env vars --- docsite/rst/intro_installation.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 604be2abc9ea52..6dc91c32bbcb73 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -137,13 +137,17 @@ which point at Ansible's own modules (not the same kind of modules, alas). $ git submodule update --init --recursive Once running the env-setup script you'll be running from checkout and the default inventory file -will be /etc/ansible/hosts. You can optionally specify an inventory file (see :doc:`intro_inventory`) +will be /etc/ansible/hosts. You can optionally specify an inventory file (see :doc:`intro_inventory`) other than /etc/ansible/hosts: .. code-block:: bash $ echo "127.0.0.1" > ~/ansible_hosts - $ export ANSIBLE_HOSTS=~/ansible_hosts + $ export ANSIBLE_INVENTORY=~/ansible_hosts + +.. note:: + + ANSIBLE_INVENTORY is available starting at 1.9 and subtitutes the deprecated ANSIBLE_HOSTS You can read more about the inventory file in later parts of the manual. From 8ad2eac7e1f97b20b20a1a6d37d5c8a080a7c9da Mon Sep 17 00:00:00 2001 From: Till Maas Date: Tue, 5 May 2015 15:22:44 +0200 Subject: [PATCH 0522/3617] Mention ANSIBLE_INVENTORY Support for ANSIBLE_HOSTS is faded out, see commit c73254543a9fc66bf2a22f978c6e979ae361221c, therefore do not mention it in the man pages. --- docs/man/man1/ansible-playbook.1.asciidoc.in | 2 +- docs/man/man1/ansible.1.asciidoc.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index e6b6c680a76527..44513d111124fe 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -133,7 +133,7 @@ ENVIRONMENT The following environment variables may be specified. -ANSIBLE_HOSTS -- Override the default ansible hosts file +ANSIBLE_INVENTORY -- Override the default ansible inventory file ANSIBLE_LIBRARY -- Override the default ansible module library path diff --git a/docs/man/man1/ansible.1.asciidoc.in b/docs/man/man1/ansible.1.asciidoc.in index 5ac1e49404335c..f0f81b7d9bd330 100644 --- a/docs/man/man1/ansible.1.asciidoc.in +++ b/docs/man/man1/ansible.1.asciidoc.in @@ -153,7 +153,7 @@ ENVIRONMENT The following environment variables may be specified. -ANSIBLE_HOSTS -- Override the default ansible hosts file +ANSIBLE_INVENTORY -- Override the default ansible inventory file ANSIBLE_LIBRARY -- Override the default ansible module library path From f36a92f72920538d52a69b2b8b4dae02f5c1724c Mon Sep 17 00:00:00 2001 From: Till Maas Date: Tue, 5 May 2015 15:24:36 +0200 Subject: [PATCH 0523/3617] Re-Generate man pages --- docs/man/man1/ansible-galaxy.1 | 6 +++--- docs/man/man1/ansible-playbook.1 | 16 +++++++++++----- docs/man/man1/ansible-pull.1 | 12 +++++++++--- docs/man/man1/ansible.1 | 30 +++++++++++++----------------- 4 files changed, 36 insertions(+), 28 deletions(-) diff --git a/docs/man/man1/ansible-galaxy.1 b/docs/man/man1/ansible-galaxy.1 index eac74b6a85dd46..f8486c75f422bc 100644 --- a/docs/man/man1/ansible-galaxy.1 +++ b/docs/man/man1/ansible-galaxy.1 @@ -2,12 +2,12 @@ .\" Title: ansible-galaxy .\" Author: [see the "AUTHOR" section] .\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 +.\" Date: 05/05/2015 .\" Manual: System administration commands -.\" Source: Ansible 1.9 +.\" Source: Ansible 2.0.0 .\" Language: English .\" -.TH "ANSIBLE\-GALAXY" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" +.TH "ANSIBLE\-GALAXY" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1 index 085c5f79f1e11a..f1a1babc763053 100644 --- a/docs/man/man1/ansible-playbook.1 +++ b/docs/man/man1/ansible-playbook.1 @@ -2,12 +2,12 @@ .\" Title: ansible-playbook .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 +.\" Date: 05/05/2015 .\" Manual: System administration commands -.\" Source: Ansible 1.9 +.\" Source: Ansible 2.0.0 .\" Language: English .\" -.TH "ANSIBLE\-PLAYBOOK" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" +.TH "ANSIBLE\-PLAYBOOK" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -66,7 +66,7 @@ search path to load modules from\&. The default is .PP \fB\-e\fR \fIVARS\fR, \fB\-\-extra\-vars=\fR\fIVARS\fR .RS 4 -Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays)\&. +Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays)\&. To load variables from a file, specify the file preceded by @ (e\&.g\&. @vars\&.yml)\&. .RE .PP \fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR @@ -156,7 +156,7 @@ Outputs a list of matching hosts; does not execute anything else\&. .sp The following environment variables may be specified\&. .sp -ANSIBLE_HOSTS \(em Override the default ansible hosts file +ANSIBLE_INVENTORY \(em Override the default ansible inventory file .sp ANSIBLE_LIBRARY \(em Override the default ansible module library path .SH "FILES" @@ -181,3 +181,9 @@ Ansible is released under the terms of the GPLv3 License\&. \fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible +.SH "AUTHOR" +.PP +\fB:doctype:manpage\fR +.RS 4 +Author. +.RE diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1 index a9b69788b47533..029d1e45bbc6bd 100644 --- a/docs/man/man1/ansible-pull.1 +++ b/docs/man/man1/ansible-pull.1 @@ -2,12 +2,12 @@ .\" Title: ansible .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 +.\" Date: 05/05/2015 .\" Manual: System administration commands -.\" Source: Ansible 1.9 +.\" Source: Ansible 2.0.0 .\" Language: English .\" -.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" +.TH "ANSIBLE" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -104,3 +104,9 @@ Ansible is released under the terms of the GPLv3 License\&. \fBansible\fR(1), \fBansible\-playbook\fR(1), \fBansible\-doc\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible +.SH "AUTHOR" +.PP +\fB:doctype:manpage\fR +.RS 4 +Author. +.RE diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1 index eb2e8aaeeb2915..102ba7e5b0ecee 100644 --- a/docs/man/man1/ansible.1 +++ b/docs/man/man1/ansible.1 @@ -2,12 +2,12 @@ .\" Title: ansible .\" Author: :doctype:manpage .\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 +.\" Date: 05/05/2015 .\" Manual: System administration commands -.\" Source: Ansible 1.9 +.\" Source: Ansible 2.0.0 .\" Language: English .\" -.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" +.TH "ANSIBLE" "1" "05/05/2015" "Ansible 2\&.0\&.0" "System administration commands" .\" ----------------------------------------------------------------- .\" * Define some portability stuff .\" ----------------------------------------------------------------- @@ -89,19 +89,14 @@ The to pass to the module\&. .RE .PP -\fB\-k\fR, \fB\-\-ask\-pass\fR +\fB\-k\fR, \fB\-\-ask\-pass\fR .RS 4 Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. .RE .PP -\fB--ask-su-pass\fR -.RS 4 -Prompt for the su password instead of assuming key\-based authentication with ssh\-agent\&. -.RE -.PP \fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR .RS 4 -Prompt for the password to use with \-\-sudo, if any\&. +Prompt for the password to use with \-\-sudo, if any .RE .PP \fB\-o\fR, \fB\-\-one\-line\fR @@ -111,12 +106,7 @@ Try to output everything on one line\&. .PP \fB\-s\fR, \fB\-\-sudo\fR .RS 4 -Run the command as the user given by \-u and sudo to root. -.RE -.PP -\fB\-S\fR, \fB\-\-su\fR -.RS 4 -Run operations with su\&. +Run the command as the user given by \-u and sudo to root\&. .RE .PP \fB\-t\fR \fIDIRECTORY\fR, \fB\-\-tree=\fR\fIDIRECTORY\fR @@ -203,7 +193,7 @@ Ranges of hosts are also supported\&. For more information and additional option .sp The following environment variables may be specified\&. .sp -ANSIBLE_HOSTS \(em Override the default ansible hosts file +ANSIBLE_INVENTORY \(em Override the default ansible inventory file .sp ANSIBLE_LIBRARY \(em Override the default ansible module library path .sp @@ -221,3 +211,9 @@ Ansible is released under the terms of the GPLv3 License\&. \fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) .sp Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible +.SH "AUTHOR" +.PP +\fB:doctype:manpage\fR +.RS 4 +Author. +.RE From ba822ce0f9383c979fff8f93d945227f905f0952 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 5 May 2015 10:23:10 -0500 Subject: [PATCH 0524/3617] Backporting release info/changelog stuff to devel --- CHANGELOG.md | 12 ++++++++++++ RELEASES.txt | 9 +++++++-- packaging/debian/changelog | 11 ++++++++--- packaging/rpm/ansible.spec | 6 ++++++ 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92354cd8520170..c85464edd689b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,18 @@ New Inventory scripts: Other Notable Changes: +## 1.9.1 "Dancing In the Street" - Apr 27, 2015 + +* Fixed a bug related to Kerberos auth when using winrm with a domain account. +* Fixing several bugs in the s3 module. +* Fixed a bug with upstart service detection in the service module. +* Fixed several bugs with the user module when used on OSX. +* Fixed unicode handling in some module situations (assert and shell/command execution). +* Fixed a bug in redhat_subscription when using the activationkey parameter. +* Fixed a traceback in the gce module on EL6 distros when multiple pycrypto installations are available. +* Added support for PostgreSQL 9.4 in rds_param_group +* Several other minor fixes. + ## 1.9 "Dancing In the Street" - Mar 25, 2015 Major changes: diff --git a/RELEASES.txt b/RELEASES.txt index ddcce78efab557..c147deddf384ae 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,12 +4,17 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -1.9 "Dancing In the Street" - in progress +2.0 "TBD" - in progress Released ++++++++ -1.8.1 "You Really Got Me" -- 11-26-2014 +1.9.1 "Dancing In the Streets" 04-27-2015 +1.9.0 "Dancing In the Streets" 03-25-2015 +1.8.4 "You Really Got Me" ---- 02-19-2015 +1.8.3 "You Really Got Me" ---- 02-17-2015 +1.8.2 "You Really Got Me" ---- 12-04-2014 +1.8.1 "You Really Got Me" ---- 11-26-2014 1.7.2 "Summer Nights" -------- 09-24-2014 1.7.1 "Summer Nights" -------- 08-14-2014 1.7 "Summer Nights" -------- 08-06-2014 diff --git a/packaging/debian/changelog b/packaging/debian/changelog index 84bf7e770336c4..311da7fda7d237 100644 --- a/packaging/debian/changelog +++ b/packaging/debian/changelog @@ -3,13 +3,18 @@ ansible (%VERSION%-%RELEASE%~%DIST%) %DIST%; urgency=low * %VERSION% release -- Ansible, Inc. %DATE% ->>>>>>> Stashed changes + +ansible (1.9.1) unstable; urgency=low + + * 1.9.1 + + -- Ansible, Inc. Mon, 27 Apr 2015 17:00:00 -0500 ansible (1.9.0.1) unstable; urgency=low - * 1.9 release + * 1.9.0.1 - -- Ansible, Inc. Wed, 25 Mar 2015 17:00:00 -0500 + -- Ansible, Inc. Wed, 25 Mar 2015 15:00:00 -0500 ansible (1.8.4) unstable; urgency=low diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 8ae7286b63d61d..394017dc0fbdae 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -110,6 +110,12 @@ rm -rf %{buildroot} %changelog +* Mon Apr 27 2015 Ansible, Inc. - 1.9.1 +- Release 1.9.1 + +* Wed Mar 25 2015 Ansible, Inc. - 1.9.0 +- Release 1.9.0 + * Thu Feb 19 2015 Ansible, Inc. - 1.8.4 - Release 1.8.4 From fba5588028def5463f9b281fe69f117b76c3845b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 5 May 2015 13:17:04 -0500 Subject: [PATCH 0525/3617] Handle empty role definitions in YAML (v2) --- lib/ansible/playbook/play.py | 3 +++ lib/ansible/playbook/role/metadata.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index b99c01fdf74e63..b247503d9cb14c 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -174,6 +174,9 @@ def _load_roles(self, attr, ds): list of role definitions and creates the Role from those objects ''' + if ds is None: + ds = [] + role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader) roles = [] diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py index 461a9a4a6271f9..61e92ce9b50d1c 100644 --- a/lib/ansible/playbook/role/metadata.py +++ b/lib/ansible/playbook/role/metadata.py @@ -65,6 +65,9 @@ def _load_dependencies(self, attr, ds): which returns a list of RoleInclude objects ''' + if ds is None: + ds = [] + current_role_path = None if self._owner: current_role_path = os.path.dirname(self._owner._role_path) From 8fae2abed4c12a55ae0c98b374b9bfd2fb4d287e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 5 May 2015 13:41:32 -0500 Subject: [PATCH 0526/3617] Properly fail out of the task loop in the linear strategy on failures (v2) --- lib/ansible/executor/playbook_executor.py | 8 +++++++- lib/ansible/plugins/strategies/linear.py | 3 +++ samples/test_play_failure.yml | 9 +++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 samples/test_play_failure.yml diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 2d5958697b395e..5d72ef15bd0bf0 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -117,15 +117,17 @@ def run(self): if len(batch) == 0: self._tqm.send_callback('v2_playbook_on_play_start', new_play) self._tqm.send_callback('v2_playbook_on_no_hosts_matched') - result = 0 + result = 1 break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) # and run it... result = self._tqm.run(play=play) + # if the last result wasn't zero, break out of the serial batch loop if result != 0: break + # if the last result wasn't zero, break out of the play loop if result != 0: break @@ -134,6 +136,10 @@ def run(self): if entry: entrylist.append(entry) # per playbook + # if the last result wasn't zero, break out of the playbook file name loop + if result != 0: + break + if entrylist: return entrylist diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index 95ecac1451f51a..bd510dc55742ff 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -226,6 +226,9 @@ def __repr__(self): # FIXME: this should also be moved to the base class in a method included_files = [] for res in host_results: + if res.is_failed(): + return 1 + if res._task.action == 'include': if res._task.loop: include_results = res._result['results'] diff --git a/samples/test_play_failure.yml b/samples/test_play_failure.yml new file mode 100644 index 00000000000000..b33fc2e757cd05 --- /dev/null +++ b/samples/test_play_failure.yml @@ -0,0 +1,9 @@ +- hosts: localhost + gather_facts: no + tasks: + - fail: + +- hosts: localhost + gather_facts: no + tasks: + - debug: msg="you should not see me..." From 4c8d27f7bbb196486b27b46b78053bac0ada2def Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 5 May 2015 13:48:04 -0700 Subject: [PATCH 0527/3617] Make module formatting into links to the other module docs --- hacking/module_formatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index c3aca94949c2bf..32df84deb9b7a1 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -68,7 +68,7 @@ def rst_ify(text): t = _ITALIC.sub(r'*' + r"\1" + r"*", text) t = _BOLD.sub(r'**' + r"\1" + r"**", t) - t = _MODULE.sub(r'``' + r"\1" + r"``", t) + t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t) t = _URL.sub(r"\1", t) t = _CONST.sub(r'``' + r"\1" + r"``", t) From 483924336691f75029f3a34a24578f3bc3de57eb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 5 May 2015 13:50:46 -0700 Subject: [PATCH 0528/3617] Documentation fixes --- docsite/rst/become.rst | 4 ++-- docsite/rst/guide_aws.rst | 2 +- docsite/rst/intro_configuration.rst | 2 +- lib/ansible/utils/module_docs_fragments/cloudstack.py | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 42484d9816afd8..4507b191009909 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -26,8 +26,8 @@ become_method at play or task level overrides the default method set in ansible.cfg -New ansible_ variables ----------------------- +New ansible\_ variables +----------------------- Each allows you to set an option per group and/or host ansible_become diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index 97eb0904fe2f98..c4e12eab4970a8 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -157,7 +157,7 @@ it will be automatically discoverable via a dynamic group like so:: Using this philosophy can be a great way to keep systems separated by the function they perform. In this example, if we wanted to define variables that are automatically applied to each machine tagged with the 'class' of 'webserver', 'group_vars' -in ansible can be used. See :doc:`splitting_out_vars`. +in ansible can be used. See :ref:`splitting_out_vars`. Similar groups are available for regions and other classifications, and can be similarly assigned variables using the same mechanism. diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 73d8fd0f0d6920..368013d7f1a415 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -264,7 +264,7 @@ This option causes notified handlers to run on a host even if a failure occurs o force_handlers = True The default is False, meaning that handlers will not run if a failure has occurred on a host. -This can also be set per play or on the command line. See :doc:`_handlers_and_failure` for more details. +This can also be set per play or on the command line. See :ref:`handlers_and_failure` for more details. .. _forks: diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py index 8d173ea756f3c4..2e89178d0021c4 100644 --- a/lib/ansible/utils/module_docs_fragments/cloudstack.py +++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py @@ -51,12 +51,12 @@ class ModuleDocFragment(object): notes: - Ansible uses the C(cs) library's configuration method if credentials are not provided by the options C(api_url), C(api_key), C(api_secret). - Configuration is read from several locations, in the following order: + Configuration is read from several locations, in the following order":" - The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and - C(CLOUDSTACK_METHOD) environment variables. + C(CLOUDSTACK_METHOD) environment variables. - A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, - A C(cloudstack.ini) file in the current working directory. - A C(.cloudstack.ini) file in the users home directory. - See https://github.com/exoscale/cs for more information. + See https://github.com/exoscale/cs for more information. - This module supports check mode. ''' From 6cbff51408234364d6b9259054b49167c249a164 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 5 May 2015 13:50:59 -0700 Subject: [PATCH 0529/3617] Pick up documentation fixes --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c4f6e63117cd37..73737b294efe29 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c4f6e63117cd378ed5b144bf6c8391420a2381ab +Subproject commit 73737b294efe299097eee959d3ba42cfcfd88438 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 28b0f3ce132dd7..3d00e1c5d10dde 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 28b0f3ce132dd78e0407d5f95838d97fd69824b6 +Subproject commit 3d00e1c5d10dde12146d52dbc493cad6454756b5 From 9b95c22dc0b82bf7888e8fe42702b156f6f01674 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 5 May 2015 13:59:00 -0700 Subject: [PATCH 0530/3617] Update for doc fixes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 73737b294efe29..1fa3efd7b4ac55 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 73737b294efe299097eee959d3ba42cfcfd88438 +Subproject commit 1fa3efd7b4ac55d429bd470a1f6e8f82a3e94182 From d34b586eb6bf162c6c168a3065b3471f0522abf8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 5 May 2015 16:40:11 -0500 Subject: [PATCH 0531/3617] Add ability for connection plugins to set attributes based on host variables (v2) --- lib/ansible/executor/task_executor.py | 4 +++- lib/ansible/plugins/connections/__init__.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 7fa21349483f1b..6d62eea68bab66 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -209,7 +209,9 @@ def _execute(self, variables=None): # get the connection and the handler for this execution self._connection = self._get_connection(variables) - self._handler = self._get_action_handler(connection=self._connection, templar=templar) + self._connection.set_host_overrides(host=self._host) + + self._handler = self._get_action_handler(connection=self._connection, templar=templar) # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index d11f3651827304..5558f5ba86a2b3 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -64,6 +64,17 @@ def _become_method_supported(self, become_method): raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method) + def set_host_overrides(self, host): + ''' + An optional method, which can be used to set connection plugin parameters + from variables set on the host (or groups to which the host belongs) + + Any connection plugin using this should first initialize its attributes in + an overridden `def __init__(self):`, and then use `host.get_vars()` to find + variables which may be used to set those attributes in this method. + ''' + pass + @abstractproperty def transport(self): """String used to identify this Connection class from other classes""" From 9d572afb613ec83b6040af0c8de4f534fc0133fe Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Wed, 6 May 2015 06:33:34 +0100 Subject: [PATCH 0532/3617] Add advice about using unpatched Windows Management Framework 3.0. Hopefully this should reduce bug reports like #10878 and #10825 --- docsite/rst/intro_windows.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 00cd8af404f038..b675cd77d9d683 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -84,6 +84,17 @@ To automate setup of WinRM, you can run `this PowerShell script Date: Wed, 6 May 2015 01:31:02 -0500 Subject: [PATCH 0533/3617] Add serializer/deserializer to plugin base object (v2) Fixes #10923 --- lib/ansible/plugins/__init__.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 5791677bd26f9e..ad18bfe09bc9d5 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -77,6 +77,36 @@ def __init__(self, class_name, package, config, subdir, aliases={}): self._extra_dirs = [] self._searched_paths = set() + def __setstate__(self, data): + ''' + Deserializer. + ''' + + class_name = data.get('class_name') + package = data.get('package') + config = data.get('config') + subdir = data.get('subdir') + aliases = data.get('aliases') + + self.__init__(class_name, package, config, subdir, aliases) + self._extra_dirs = data.get('_extra_dirs', []) + self._searched_paths = data.get('_searched_paths', set()) + + def __getstate__(self): + ''' + Serializer. + ''' + + return dict( + class_name = self.class_name, + package = self.package, + config = self.config, + subdir = self.subdir, + aliases = self.aliases, + _extra_dirs = self._extra_dirs, + _searched_paths = self._searched_paths, + ) + def print_paths(self): ''' Returns a string suitable for printing of the search path ''' From 50542db0bed0f5be4fd06d11fea489ccbc2b8902 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 6 May 2015 02:56:52 -0500 Subject: [PATCH 0534/3617] Make the default playbook name an empty string (v2) --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index b247503d9cb14c..c891571a985859 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -58,7 +58,7 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='string', default='smart') _hosts = FieldAttribute(isa='list', default=[], required=True) - _name = FieldAttribute(isa='string', default='') + _name = FieldAttribute(isa='string', default='') # Variable Attributes _vars_files = FieldAttribute(isa='list', default=[]) From 5489d172de95a94bb92e63090202e519b2204c39 Mon Sep 17 00:00:00 2001 From: gimoh Date: Wed, 6 May 2015 11:57:25 +0100 Subject: [PATCH 0535/3617] Use same interpreter for test-module and module it runs Default python interpreter to the same interpreter the test-module script is executed with. This is so that the interpreter doesn't have to be specified twice in the command when using non-default python (e.g. ``/path/to/python ./hacking/test-module -I python=/path/to/python ...``) --- hacking/test-module | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hacking/test-module b/hacking/test-module index c226f32e889906..44b49b06b9e261 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -59,7 +59,8 @@ def parse(): help="path to python debugger (e.g. /usr/bin/pdb)") parser.add_option('-I', '--interpreter', dest='interpreter', help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)", - metavar='INTERPRETER_TYPE=INTERPRETER_PATH') + metavar='INTERPRETER_TYPE=INTERPRETER_PATH', + default='python={}'.format(sys.executable)) parser.add_option('-c', '--check', dest='check', action='store_true', help="run the module in check mode") options, args = parser.parse_args() From 374ea10e6f33055bc9114ee3e5b38aee5e59fe41 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 6 May 2015 06:02:33 -0700 Subject: [PATCH 0536/3617] Update the core module pointer --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 1fa3efd7b4ac55..a6c0cf036918e3 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 1fa3efd7b4ac55d429bd470a1f6e8f82a3e94182 +Subproject commit a6c0cf036918e3bb637602fdd9435857c45f7405 From 79fe1901f6642e9178d2ae778613f7be888d246d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 6 May 2015 06:05:44 -0700 Subject: [PATCH 0537/3617] Update module pointers for v2 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 85c8a892c80b92..aedcd37ff69e07 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 85c8a892c80b92730831d95fa654ef6d35b0eca0 +Subproject commit aedcd37ff69e074f702ef592096f2a02448c4936 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 2690f096a47646..3d00e1c5d10dde 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 2690f096a47646cd17db135648def88afc40d92c +Subproject commit 3d00e1c5d10dde12146d52dbc493cad6454756b5 From 7733dc7bb51dd1632babfbdf90e6c305cc5764a7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 6 May 2015 06:41:16 -0700 Subject: [PATCH 0538/3617] Fix for new octal syntax --- lib/ansible/plugins/connections/paramiko_ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py index 01e95451b80a55..797eeea9e021ef 100644 --- a/lib/ansible/plugins/connections/paramiko_ssh.py +++ b/lib/ansible/plugins/connections/paramiko_ssh.py @@ -370,7 +370,7 @@ def close(self): # the file will be moved into place rather than cleaned up. tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False) - os.chmod(tmp_keyfile.name, key_stat.st_mode & 07777) + os.chmod(tmp_keyfile.name, key_stat.st_mode & 0o7777) os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid) self._save_ssh_host_keys(tmp_keyfile.name) From 4f28a814ae97eb81c16a90a7d217b5a301041627 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 6 May 2015 08:46:33 -0500 Subject: [PATCH 0539/3617] Return a list instead of tuple when password is specified to ssh connection plugin (v2) --- lib/ansible/plugins/connections/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 49e1e3b966098b..7c95cc3c0f532c 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -144,7 +144,7 @@ def _password_cmd(self): except OSError: raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program") (self.rfd, self.wfd) = os.pipe() - return ("sshpass", "-d{0}".format(self.rfd)) + return ["sshpass", "-d{0}".format(self.rfd)] return [] def _send_password(self): From 1152c7327af74b4fbd57b47a83833e8647295b50 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 6 May 2015 15:18:37 -0500 Subject: [PATCH 0540/3617] Fix serialization bug for plugins (v2) --- lib/ansible/plugins/__init__.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index ad18bfe09bc9d5..36b5c3d0334e68 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -88,6 +88,9 @@ def __setstate__(self, data): subdir = data.get('subdir') aliases = data.get('aliases') + PATH_CACHE[class_name] = data.get('PATH_CACHE') + PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE') + self.__init__(class_name, package, config, subdir, aliases) self._extra_dirs = data.get('_extra_dirs', []) self._searched_paths = data.get('_searched_paths', set()) @@ -98,13 +101,15 @@ def __getstate__(self): ''' return dict( - class_name = self.class_name, - package = self.package, - config = self.config, - subdir = self.subdir, - aliases = self.aliases, - _extra_dirs = self._extra_dirs, - _searched_paths = self._searched_paths, + class_name = self.class_name, + package = self.package, + config = self.config, + subdir = self.subdir, + aliases = self.aliases, + _extra_dirs = self._extra_dirs, + _searched_paths = self._searched_paths, + PATH_CACHE = PATH_CACHE[self.class_name], + PLUGIN_PATH_CACHE = PLUGIN_PATH_CACHE[self.class_name], ) def print_paths(self): @@ -258,12 +263,14 @@ def get(self, name, *args, **kwargs): path = self.find_plugin(name) if path is None: return None - elif kwargs.get('class_only', False): - return getattr(self._module_cache[path], self.class_name) if path not in self._module_cache: self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) - return getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + + if kwargs.get('class_only', False): + return getattr(self._module_cache[path], self.class_name) + else: + return getattr(self._module_cache[path], self.class_name)(*args, **kwargs) def all(self, *args, **kwargs): ''' instantiates all plugins with the same arguments ''' @@ -275,12 +282,15 @@ def all(self, *args, **kwargs): name, ext = os.path.splitext(os.path.basename(path)) if name.startswith("_"): continue + if path not in self._module_cache: self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) + if kwargs.get('class_only', False): obj = getattr(self._module_cache[path], self.class_name) else: obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + # set extra info on the module, in case we want it later setattr(obj, '_original_path', path) yield obj From 1108fd3dd2d438da0d352571f272223b0911b6d4 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Thu, 7 May 2015 12:22:08 +0100 Subject: [PATCH 0541/3617] Update playbooks_tags.rst highlight the command part of the sentence to clarify use of 'all' tag. --- docsite/rst/playbooks_tags.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_tags.rst b/docsite/rst/playbooks_tags.rst index 01c4f6fa2b0449..a03b975a4eb04a 100644 --- a/docsite/rst/playbooks_tags.rst +++ b/docsite/rst/playbooks_tags.rst @@ -59,7 +59,7 @@ Example:: - tag1 There are another 3 special keywords for tags, 'tagged', 'untagged' and 'all', which run only tagged, only untagged -and all tasks respectively. By default ansible runs as if --tags all had been specified. +and all tasks respectively. By default ansible runs as if '--tags all' had been specified. .. seealso:: From bc4d51a4fdc35ceb85c60fc0bbd4adeeb015f662 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 7 May 2015 08:17:25 -0700 Subject: [PATCH 0542/3617] Update module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a6c0cf036918e3..7540cbb845d69b 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a6c0cf036918e3bb637602fdd9435857c45f7405 +Subproject commit 7540cbb845d69b7278c2543b3c469a2db971e379 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 3d00e1c5d10dde..66a96ad6e2a93f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 3d00e1c5d10dde12146d52dbc493cad6454756b5 +Subproject commit 66a96ad6e2a93f7ed786c630cf81e996b9a50403 From cee7cd5d3b979f7481e0c7c3e42aa040193d14a7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 7 May 2015 08:29:04 -0700 Subject: [PATCH 0543/3617] Update v2 module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index aedcd37ff69e07..31b6f75570de2d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit aedcd37ff69e074f702ef592096f2a02448c4936 +Subproject commit 31b6f75570de2d9c321c596e659fd5daf42e786d diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 3d00e1c5d10dde..66a96ad6e2a93f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 3d00e1c5d10dde12146d52dbc493cad6454756b5 +Subproject commit 66a96ad6e2a93f7ed786c630cf81e996b9a50403 From 4f4df29cb0bddde5c88c9357f78c24c1ef0a0ac7 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 6 May 2015 17:06:43 -0500 Subject: [PATCH 0544/3617] Add ability to specify using ssh_args in synchronize for v2 --- lib/ansible/plugins/action/synchronize.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index 1bc64ff4d5bcfb..171bcef6e0280e 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -22,6 +22,8 @@ from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean +from ansible import constants + class ActionModule(ActionBase): @@ -81,6 +83,7 @@ def run(self, tmp=None, task_vars=dict()): src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) + use_ssh_args = self._task.args.pop('use_ssh_args', None) # FIXME: this doesn't appear to be used anywhere? local_rsync_path = task_vars.get('ansible_rsync_path') @@ -162,6 +165,9 @@ def run(self, tmp=None, task_vars=dict()): if rsync_path: self._task.args['rsync_path'] = '"%s"' % rsync_path + if use_ssh_args: + self._task.args['ssh_args'] = constants.ANSIBLE_SSH_ARGS + # run the module and store the result result = self._execute_module('synchronize') From 88e8ecb620e99948f162b920354366851d79f94f Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 7 May 2015 12:20:11 -0500 Subject: [PATCH 0545/3617] Actually get the synchronize action plugin to work --- lib/ansible/plugins/action/synchronize.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index 171bcef6e0280e..c1b2f60e7f0700 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -51,7 +51,7 @@ def _process_origin(self, host, path, user): path = self._get_absolute_path(path=path) return path - def _process_remote(self, host, task, path, user): + def _process_remote(self, host, path, user): transport = self._connection_info.connection return_data = None if not host in ['127.0.0.1', 'localhost'] or transport != "local": @@ -71,7 +71,7 @@ def _process_remote(self, host, task, path, user): def run(self, tmp=None, task_vars=dict()): ''' generates params and passes them on to the rsync module ''' - original_transport = task_vars.get('ansible_connection', self._connection_info.connection) + original_transport = task_vars.get('ansible_connection') or self._connection_info.connection transport_overridden = False if task_vars.get('delegate_to') is None: task_vars['delegate_to'] = '127.0.0.1' @@ -79,7 +79,7 @@ def run(self, tmp=None, task_vars=dict()): if original_transport != 'local': task_vars['ansible_connection'] = 'local' transport_overridden = True - self.runner.sudo = False + self._connection_info.become = False src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) @@ -90,14 +90,14 @@ def run(self, tmp=None, task_vars=dict()): # from the perspective of the rsync call the delegate is the localhost src_host = '127.0.0.1' - dest_host = task_vars.get('ansible_ssh_host', task_vars.get('inventory_hostname')) + dest_host = task_vars.get('ansible_ssh_host') or task_vars.get('inventory_hostname') # allow ansible_ssh_host to be templated dest_is_local = dest_host in ['127.0.0.1', 'localhost'] # CHECK FOR NON-DEFAULT SSH PORT dest_port = self._task.args.get('dest_port') - inv_port = task_vars.get('ansible_ssh_port', task_vars.get('inventory_hostname')) + inv_port = task_vars.get('ansible_ssh_port') or task_vars.get('inventory_hostname') if inv_port != dest_port and inv_port != task_vars.get('inventory_hostname'): dest_port = inv_port @@ -133,17 +133,18 @@ def run(self, tmp=None, task_vars=dict()): user = task_vars['hostvars'][conn.delegate].get('ansible_ssh_user') if not use_delegate or not user: - user = task_vars.get('ansible_ssh_user', self.runner.remote_user) + user = task_vars.get('ansible_ssh_user') or self._connection_info.remote_user if use_delegate: # FIXME - private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file) + private_key = task_vars.get('ansible_ssh_private_key_file') or self._connection_info.private_key_file else: - private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file) + private_key = task_vars.get('ansible_ssh_private_key_file') or self._connection_info.private_key_file if private_key is not None: private_key = os.path.expanduser(private_key) - + self._task.args['private_key'] = private_key + # use the mode to define src and dest's url if self._task.args.get('mode', 'push') == 'pull': # src is a remote path: @, dest is a local path @@ -154,6 +155,9 @@ def run(self, tmp=None, task_vars=dict()): src = self._process_origin(src_host, src, user) dest = self._process_remote(dest_host, dest, user) + self._task.args['src'] = src + self._task.args['dest'] = dest + # Allow custom rsync path argument. rsync_path = self._task.args.get('rsync_path', None) From 8db21f99b74c4c483bf53df599db20d9257ff55f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 7 May 2015 12:53:22 -0500 Subject: [PATCH 0546/3617] Set the inventory on the variable manager for the adhoc cli usage (v2) --- lib/ansible/cli/adhoc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 16c2dc9e4215fe..f7692a13351d04 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -93,6 +93,7 @@ def run(self): variable_manager = VariableManager() inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) + variable_manager.set_inventory(inventory) hosts = inventory.list_hosts(pattern) if len(hosts) == 0: From 198476e34545a356aeddb405ddd73ae309b9e109 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 7 May 2015 13:06:51 -0500 Subject: [PATCH 0547/3617] Cleaning up some portions of synchronize action plugin (v2) --- lib/ansible/plugins/action/synchronize.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index c1b2f60e7f0700..219a982cb142b0 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -96,10 +96,7 @@ def run(self, tmp=None, task_vars=dict()): dest_is_local = dest_host in ['127.0.0.1', 'localhost'] # CHECK FOR NON-DEFAULT SSH PORT - dest_port = self._task.args.get('dest_port') - inv_port = task_vars.get('ansible_ssh_port') or task_vars.get('inventory_hostname') - if inv_port != dest_port and inv_port != task_vars.get('inventory_hostname'): - dest_port = inv_port + dest_port = task_vars.get('ansible_ssh_port') or self._task.args.get('dest_port') or 22 # edge case: explicit delegate and dest_host are the same if dest_host == task_vars.get('delegate_to'): From 0d3e015dd105d32395995c3e583ee8e9f8fb18f1 Mon Sep 17 00:00:00 2001 From: Aleksey Zhukov Date: Thu, 7 May 2015 22:53:10 +0300 Subject: [PATCH 0548/3617] Update DigitalOcean dynamic inventory to API v2 --- plugins/inventory/digital_ocean.py | 299 +++++++---------------------- 1 file changed, 74 insertions(+), 225 deletions(-) diff --git a/plugins/inventory/digital_ocean.py b/plugins/inventory/digital_ocean.py index 1c3eccd21ed618..29c4856efb5515 100755 --- a/plugins/inventory/digital_ocean.py +++ b/plugins/inventory/digital_ocean.py @@ -68,10 +68,7 @@ usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] [--regions] [--images] [--sizes] [--ssh-keys] [--domains] [--pretty] - [--cache-path CACHE_PATH] - [--cache-max_age CACHE_MAX_AGE] - [--refresh-cache] [--client-id CLIENT_ID] - [--api-key API_KEY] + [--api-token API_TOKEN] Produce an Ansible Inventory file based on DigitalOcean credentials @@ -89,16 +86,8 @@ --ssh-keys List SSH keys as JSON --domains List Domains as JSON --pretty, -p Pretty-print results - --cache-path CACHE_PATH - Path to the cache files (default: .) - --cache-max_age CACHE_MAX_AGE - Maximum age of the cached items (default: 0) - --refresh-cache Force refresh of cache by making API requests to - DigitalOcean (default: False - use cache files) - --client-id CLIENT_ID, -c CLIENT_ID - DigitalOcean Client ID - --api-key API_KEY, -a API_KEY - DigitalOcean API Key + --api-token API_TOKEN, -a API_TOKEN + DigitalOcean API Token ``` ''' @@ -157,11 +146,6 @@ def __init__(self): # DigitalOceanInventory data self.data = {} # All DigitalOcean data self.inventory = {} # Ansible Inventory - self.index = {} # Various indices of Droplet metadata - - # Define defaults - self.cache_path = '.' - self.cache_max_age = 0 # Read settings, environment variables, and CLI arguments self.read_settings() @@ -169,49 +153,40 @@ def __init__(self): self.read_cli_args() # Verify credentials were set - if not hasattr(self, 'client_id') or not hasattr(self, 'api_key'): - print '''Could not find values for DigitalOcean client_id and api_key. -They must be specified via either ini file, command line argument (--client-id and --api-key), -or environment variables (DO_CLIENT_ID and DO_API_KEY)''' + if not hasattr(self, 'api_token'): + print '''Could not find values for DigitalOcean api_token. +They must be specified via either ini file, command line argument (--api-token), +or environment variables (DO_API_TOKEN)''' sys.exit(-1) # env command, show DigitalOcean credentials if self.args.env: - print "DO_CLIENT_ID=%s DO_API_KEY=%s" % (self.client_id, self.api_key) + print "DO_API_TOKEN=%s" % self.api_token sys.exit(0) - # Manage cache - self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" - self.cache_refreshed = False - - if not self.args.force_cache and self.args.refresh_cache or not self.is_cache_valid(): - self.load_all_data_from_digital_ocean() - else: - self.load_from_cache() - if len(self.data) == 0: - if self.args.force_cache: - print '''Cache is empty and --force-cache was specified''' - sys.exit(-1) - self.load_all_data_from_digital_ocean() - else: - # We always get fresh droplets for --list, --host, --all, and --droplets - # unless --force-cache is specified - if not self.args.force_cache and ( - self.args.list or self.args.host or self.args.all or self.args.droplets): - self.load_droplets_from_digital_ocean() + self.manager = DoManager(None, self.api_token, api_version=2) # Pick the json_data to print based on the CLI command - if self.args.droplets: json_data = { 'droplets': self.data['droplets'] } - elif self.args.regions: json_data = { 'regions': self.data['regions'] } - elif self.args.images: json_data = { 'images': self.data['images'] } - elif self.args.sizes: json_data = { 'sizes': self.data['sizes'] } - elif self.args.ssh_keys: json_data = { 'ssh_keys': self.data['ssh_keys'] } - elif self.args.domains: json_data = { 'domains': self.data['domains'] } - elif self.args.all: json_data = self.data - - elif self.args.host: json_data = self.load_droplet_variables_for_host() + if self.args.droplets: + json_data = self.load_from_digital_ocean('droplets') + elif self.args.regions: + json_data = self.load_from_digital_ocean('regions') + elif self.args.images: + json_data = self.load_from_digital_ocean('images') + elif self.args.sizes: + json_data = self.load_from_digital_ocean('sizes') + elif self.args.ssh_keys: + json_data = self.load_from_digital_ocean('ssh_keys') + elif self.args.domains: + json_data = self.load_from_digital_ocean('domains') + elif self.args.all: + json_data = self.load_from_digital_ocean() + elif self.args.host: + json_data = self.load_droplet_variables_for_host() else: # '--list' this is last to make it default - json_data = self.inventory + self.data = self.load_from_digital_ocean('droplets') + self.build_inventory() + json_data = self.inventory if self.args.pretty: print json.dumps(json_data, sort_keys=True, indent=2) @@ -230,10 +205,8 @@ def read_settings(self): config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') # Credentials - if config.has_option('digital_ocean', 'client_id'): - self.client_id = config.get('digital_ocean', 'client_id') - if config.has_option('digital_ocean', 'api_key'): - self.api_key = config.get('digital_ocean', 'api_key') + if config.has_option('digital_ocean', 'api_token'): + self.api_token = config.get('digital_ocean', 'api_token') # Cache related if config.has_option('digital_ocean', 'cache_path'): @@ -245,8 +218,10 @@ def read_settings(self): def read_environment(self): ''' Reads the settings from environment variables ''' # Setup credentials - if os.getenv("DO_CLIENT_ID"): self.client_id = os.getenv("DO_CLIENT_ID") - if os.getenv("DO_API_KEY"): self.api_key = os.getenv("DO_API_KEY") + if os.getenv("DO_API_TOKEN"): + self.api_token = os.getenv("DO_API_TOKEN") + if os.getenv("DO_API_KEY"): + self.api_token = os.getenv("DO_API_KEY") def read_cli_args(self): @@ -266,73 +241,42 @@ def read_cli_args(self): parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results') - parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') - parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') - parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') - parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') - - parser.add_argument('--env','-e', action='store_true', help='Display DO_CLIENT_ID and DO_API_KEY') - parser.add_argument('--client-id','-c', action='store', help='DigitalOcean Client ID') - parser.add_argument('--api-key','-a', action='store', help='DigitalOcean API Key') + parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN') + parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token') self.args = parser.parse_args() - if self.args.client_id: self.client_id = self.args.client_id - if self.args.api_key: self.api_key = self.args.api_key - if self.args.cache_path: self.cache_path = self.args.cache_path - if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age + if self.args.api_token: + self.api_token = self.args.api_token # Make --list default if none of the other commands are specified - if (not self.args.droplets and not self.args.regions and not self.args.images and - not self.args.sizes and not self.args.ssh_keys and not self.args.domains and - not self.args.all and not self.args.host): - self.args.list = True + if (not self.args.droplets and not self.args.regions and + not self.args.images and not self.args.sizes and + not self.args.ssh_keys and not self.args.domains and + not self.args.all and not self.args.host): + self.args.list = True ########################################################################### # Data Management ########################################################################### - def load_all_data_from_digital_ocean(self): - ''' Use dopy to get all the information from DigitalOcean and save data in cache files ''' - manager = DoManager(self.client_id, self.api_key) - - self.data = {} - self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) - self.data['regions'] = self.sanitize_list(manager.all_regions()) - self.data['images'] = self.sanitize_list(manager.all_images(filter=None)) - self.data['sizes'] = self.sanitize_list(manager.sizes()) - self.data['ssh_keys'] = self.sanitize_list(manager.all_ssh_keys()) - self.data['domains'] = self.sanitize_list(manager.all_domains()) - - self.index = {} - self.index['region_to_name'] = self.build_index(self.data['regions'], 'id', 'name') - self.index['size_to_name'] = self.build_index(self.data['sizes'], 'id', 'name') - self.index['image_to_name'] = self.build_index(self.data['images'], 'id', 'name') - self.index['image_to_distro'] = self.build_index(self.data['images'], 'id', 'distribution') - self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) - - self.build_inventory() - - self.write_to_cache() - - - def load_droplets_from_digital_ocean(self): - ''' Use dopy to get droplet information from DigitalOcean and save data in cache files ''' - manager = DoManager(self.client_id, self.api_key) - self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) - self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) - self.build_inventory() - self.write_to_cache() - - - def build_index(self, source_seq, key_from, key_to, use_slug=True): - dest_dict = {} - for item in source_seq: - name = (use_slug and item.has_key('slug')) and item['slug'] or item[key_to] - key = item[key_from] - dest_dict[key] = name - return dest_dict + def load_from_digital_ocean(self, resource=None): + '''Get JSON from DigitalOcean API''' + json_data = {} + if resource == 'droplets' or resource is None: + json_data['droplets'] = self.manager.all_active_droplets() + if resource == 'regions' or resource is None: + json_data['regions'] = self.manager.all_regions() + if resource == 'images' or resource is None: + json_data['images'] = self.manager.all_images(filter=None) + if resource == 'sizes' or resource is None: + json_data['sizes'] = self.manager.sizes() + if resource == 'ssh_keys' or resource is None: + json_data['ssh_keys'] = self.manager.all_ssh_keys() + if resource == 'domains' or resource is None: + json_data['domains'] = self.manager.all_domains() + return json_data def build_inventory(self): @@ -345,107 +289,27 @@ def build_inventory(self): self.inventory[droplet['id']] = [dest] self.push(self.inventory, droplet['name'], dest) - self.push(self.inventory, 'region_'+droplet['region_id'], dest) - self.push(self.inventory, 'image_' +droplet['image_id'], dest) - self.push(self.inventory, 'size_' +droplet['size_id'], dest) - self.push(self.inventory, 'status_'+droplet['status'], dest) - - region_name = self.index['region_to_name'].get(droplet['region_id']) - if region_name: - self.push(self.inventory, 'region_'+region_name, dest) + self.push(self.inventory, 'region_' + droplet['region']['slug'], dest) + self.push(self.inventory, 'image_' + str(droplet['image']['id']), dest) + self.push(self.inventory, 'size_' + droplet['size']['slug'], dest) - size_name = self.index['size_to_name'].get(droplet['size_id']) - if size_name: - self.push(self.inventory, 'size_'+size_name, dest) - - image_name = self.index['image_to_name'].get(droplet['image_id']) - if image_name: - self.push(self.inventory, 'image_'+image_name, dest) + image_slug = droplet['image']['slug'] + if image_slug: + self.push(self.inventory, 'image_' + self.to_safe(image_slug), dest) + else: + image_name = droplet['image']['name'] + if image_name: + self.push(self.inventory, 'image_' + self.to_safe(image_name), dest) - distro_name = self.index['image_to_distro'].get(droplet['image_id']) - if distro_name: - self.push(self.inventory, 'distro_'+distro_name, dest) + self.push(self.inventory, 'distro_' + self.to_safe(droplet['image']['distribution']), dest) + self.push(self.inventory, 'status_' + droplet['status'], dest) def load_droplet_variables_for_host(self): '''Generate a JSON response to a --host call''' - host = self.to_safe(str(self.args.host)) - - if not host in self.index['host_to_droplet']: - # try updating cache - if not self.args.force_cache: - self.load_all_data_from_digital_ocean() - if not host in self.index['host_to_droplet']: - # host might not exist anymore - return {} - - droplet = None - if self.cache_refreshed: - for drop in self.data['droplets']: - if drop['ip_address'] == host: - droplet = self.sanitize_dict(drop) - break - else: - # Cache wasn't refreshed this run, so hit DigitalOcean API - manager = DoManager(self.client_id, self.api_key) - droplet_id = self.index['host_to_droplet'][host] - droplet = self.sanitize_dict(manager.show_droplet(droplet_id)) - - if not droplet: - return {} - - # Put all the information in a 'do_' namespace - info = {} - for k, v in droplet.items(): - info['do_'+k] = v + host = int(self.args.host) - # Generate user-friendly variables (i.e. not the ID's) - if droplet.has_key('region_id'): - info['do_region'] = self.index['region_to_name'].get(droplet['region_id']) - if droplet.has_key('size_id'): - info['do_size'] = self.index['size_to_name'].get(droplet['size_id']) - if droplet.has_key('image_id'): - info['do_image'] = self.index['image_to_name'].get(droplet['image_id']) - info['do_distro'] = self.index['image_to_distro'].get(droplet['image_id']) - - return info - - - - ########################################################################### - # Cache Management - ########################################################################### - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - if os.path.isfile(self.cache_filename): - mod_time = os.path.getmtime(self.cache_filename) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - return True - return False - - - def load_from_cache(self): - ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' - cache = open(self.cache_filename, 'r') - json_data = cache.read() - cache.close() - data = json.loads(json_data) - - self.data = data['data'] - self.inventory = data['inventory'] - self.index = data['index'] - - - def write_to_cache(self): - ''' Writes data in JSON format to a file ''' - data = { 'data': self.data, 'index': self.index, 'inventory': self.inventory } - json_data = json.dumps(data, sort_keys=True, indent=2) - - cache = open(self.cache_filename, 'w') - cache.write(json_data) - cache.close() + return self.manager.show_droplet(host) @@ -456,7 +320,7 @@ def write_to_cache(self): def push(self, my_dict, key, element): ''' Pushed an element onto an array that may not have been defined in the dict ''' if key in my_dict: - my_dict[key].append(element); + my_dict[key].append(element) else: my_dict[key] = [element] @@ -466,21 +330,6 @@ def to_safe(self, word): return re.sub("[^A-Za-z0-9\-\.]", "_", word) - def sanitize_dict(self, d): - new_dict = {} - for k, v in d.items(): - if v != None: - new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) - return new_dict - - - def sanitize_list(self, seq): - new_seq = [] - for d in seq: - new_seq.append(self.sanitize_dict(d)) - return new_seq - - ########################################################################### # Run the script From 238e2dee5dba5921b8075ebe5dbe335b23fc2b95 Mon Sep 17 00:00:00 2001 From: Allen Luce Date: Thu, 7 May 2015 22:24:33 +0000 Subject: [PATCH 0549/3617] Config might be important for issues. --- ISSUE_TEMPLATE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 8ce40348ca1e8a..ac252d5414608e 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -6,6 +6,10 @@ Can you help us out in labelling this by telling us what kind of ticket this thi Let us know what version of Ansible you are using. Please supply the verbatim output from running “ansible --version”. If you're filing a ticket on a version of Ansible which is not the latest, we'd greatly appreciate it if you could retest on the latest version first. We don't expect you to test against the development branch most of the time, but we may ask for that if you have cycles. Thanks! +##### Ansible Configuration: + +What have you changed about your Ansible installation? What configuration settings have you changed/added/removed? Compare your /etc/ansible/ansible.cfg against a clean version from Github and let us know what's different. + ##### Environment: What OS are you running Ansible from and what OS are you managing? Examples include RHEL 5/6, Centos 5/6, Ubuntu 12.04/13.10, *BSD, Solaris. If this is a generic feature request or it doesn’t apply, just say “N/A”. Not all tickets may be about operating system related things and we understand that. From cd6d1f9221ce1b437cbe92b20b4f8fa3f5926562 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 7 May 2015 21:14:16 -0500 Subject: [PATCH 0550/3617] Fix pickling errors with cache plugins (v2) Fixes #10945 --- lib/ansible/plugins/cache/base.py | 4 ++++ lib/ansible/plugins/cache/memcached.py | 2 ++ lib/ansible/plugins/cache/memory.py | 2 ++ lib/ansible/plugins/cache/redis.py | 3 +++ 4 files changed, 11 insertions(+) diff --git a/lib/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py index 051f02d0b00335..1f85aa6174d210 100644 --- a/lib/ansible/plugins/cache/base.py +++ b/lib/ansible/plugins/cache/base.py @@ -26,6 +26,9 @@ @add_metaclass(ABCMeta) class BaseCacheModule: + def __init__(self): + self.__getstate__ = self.copy + @abstractmethod def get(self, key): pass @@ -53,3 +56,4 @@ def flush(self): @abstractmethod def copy(self): pass + diff --git a/lib/ansible/plugins/cache/memcached.py b/lib/ansible/plugins/cache/memcached.py index e7321a5a6b5c87..519ca776e097c2 100644 --- a/lib/ansible/plugins/cache/memcached.py +++ b/lib/ansible/plugins/cache/memcached.py @@ -113,6 +113,8 @@ def __init__(self, cache, *args, **kwargs): self._cache = cache self._keyset = dict(*args, **kwargs) + super(CacheModule, self).__init__() + def __contains__(self, key): return key in self._keyset diff --git a/lib/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py index 15628361513121..19591a40cf245f 100644 --- a/lib/ansible/plugins/cache/memory.py +++ b/lib/ansible/plugins/cache/memory.py @@ -24,6 +24,8 @@ class CacheModule(BaseCacheModule): def __init__(self, *args, **kwargs): self._cache = {} + super(CacheModule, self).__init__() + def get(self, key): return self._cache.get(key) diff --git a/lib/ansible/plugins/cache/redis.py b/lib/ansible/plugins/cache/redis.py index 287c14bd2a2bf4..b7a624520aff77 100644 --- a/lib/ansible/plugins/cache/redis.py +++ b/lib/ansible/plugins/cache/redis.py @@ -51,6 +51,8 @@ def __init__(self, *args, **kwargs): self._cache = StrictRedis(*connection) self._keys_set = 'ansible_cache_keys' + super(CacheModule, self).__init__() + def _make_key(self, key): return self._prefix + key @@ -100,3 +102,4 @@ def copy(self): for key in self.keys(): ret[key] = self.get(key) return ret + From 0f1eb3cfc2b6eb6652d13aa4cc1055b7d726f4fb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 7 May 2015 23:56:33 -0500 Subject: [PATCH 0551/3617] Better fix for serializing/deserializing cache plugins (v2) --- lib/ansible/plugins/cache/base.py | 3 --- lib/ansible/plugins/cache/memcached.py | 8 ++++++-- lib/ansible/plugins/cache/memory.py | 8 ++++++-- lib/ansible/plugins/cache/redis.py | 7 +++++-- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/lib/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py index 1f85aa6174d210..767964b281cbd4 100644 --- a/lib/ansible/plugins/cache/base.py +++ b/lib/ansible/plugins/cache/base.py @@ -26,9 +26,6 @@ @add_metaclass(ABCMeta) class BaseCacheModule: - def __init__(self): - self.__getstate__ = self.copy - @abstractmethod def get(self, key): pass diff --git a/lib/ansible/plugins/cache/memcached.py b/lib/ansible/plugins/cache/memcached.py index 519ca776e097c2..a34855bafc45e7 100644 --- a/lib/ansible/plugins/cache/memcached.py +++ b/lib/ansible/plugins/cache/memcached.py @@ -113,8 +113,6 @@ def __init__(self, cache, *args, **kwargs): self._cache = cache self._keyset = dict(*args, **kwargs) - super(CacheModule, self).__init__() - def __contains__(self, key): return key in self._keyset @@ -193,3 +191,9 @@ def flush(self): def copy(self): return self._keys.copy() + + def __getstate__(self): + return dict() + + def __setstate__(self, data): + self.__init__() diff --git a/lib/ansible/plugins/cache/memory.py b/lib/ansible/plugins/cache/memory.py index 19591a40cf245f..417ef20e0edea6 100644 --- a/lib/ansible/plugins/cache/memory.py +++ b/lib/ansible/plugins/cache/memory.py @@ -24,8 +24,6 @@ class CacheModule(BaseCacheModule): def __init__(self, *args, **kwargs): self._cache = {} - super(CacheModule, self).__init__() - def get(self, key): return self._cache.get(key) @@ -46,3 +44,9 @@ def flush(self): def copy(self): return self._cache.copy() + + def __getstate__(self): + return self.copy() + + def __setstate__(self, data): + self._cache = data diff --git a/lib/ansible/plugins/cache/redis.py b/lib/ansible/plugins/cache/redis.py index b7a624520aff77..6c97f3eab8be2e 100644 --- a/lib/ansible/plugins/cache/redis.py +++ b/lib/ansible/plugins/cache/redis.py @@ -51,8 +51,6 @@ def __init__(self, *args, **kwargs): self._cache = StrictRedis(*connection) self._keys_set = 'ansible_cache_keys' - super(CacheModule, self).__init__() - def _make_key(self, key): return self._prefix + key @@ -103,3 +101,8 @@ def copy(self): ret[key] = self.get(key) return ret + def __getstate__(self): + return dict() + + def __setstate__(self, data): + self.__init__() From 62ccc1b9b643196b8de36980a597c2d5d644b957 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 8 May 2015 16:41:15 +0200 Subject: [PATCH 0552/3617] cloudstack: fix typo in variable, fixes get_domain() --- lib/ansible/module_utils/cloudstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index f791b403263f91..e887367c2fd69b 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -288,7 +288,7 @@ def get_domain(self, key=None): args = {} args['name'] = domain args['listall'] = True - domain = self.cs.listDomains(**args) + domains = self.cs.listDomains(**args) if domains: self.domain = domains['domain'][0] return self._get_by_key(key, self.domain) From 2e8758d8c4aef20ae1f3fd3bb7172363a93e8136 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 8 May 2015 11:08:10 -0400 Subject: [PATCH 0553/3617] fixed docs on the meaning of -H sudo arg fixes #7418 --- docsite/rst/intro_configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 368013d7f1a415..2ff53c22485d11 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -524,7 +524,7 @@ the sudo implementation is matching CLI flags with the standard sudo:: sudo_flags ========== -Additional flags to pass to sudo when engaging sudo support. The default is '-H' which preserves the environment +Additional flags to pass to sudo when engaging sudo support. The default is '-H' which preserves the $HOME environment variable of the original user. In some situations you may wish to add or remove flags, but in general most users will not need to change this setting:: From e6844f7e6cfe66a6fa30154faf6b8df06a7d739b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 8 May 2015 10:41:31 -0700 Subject: [PATCH 0554/3617] Update core module ref for mysql fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7540cbb845d69b..8b4e201772cf94 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7540cbb845d69b7278c2543b3c469a2db971e379 +Subproject commit 8b4e201772cf94e738bdabae0b4e6b68759cdd85 From 56c9614e74668dc4cfc2b1de3372d6bd24a96769 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 8 May 2015 14:25:31 -0400 Subject: [PATCH 0555/3617] made playbook include taggable, removed unused conditional import --- lib/ansible/playbook/playbook_include.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index 5c91dd14adb701..075e6dcbdf2f3e 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -25,11 +25,10 @@ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base -from ansible.playbook.conditional import Conditional from ansible.playbook.taggable import Taggable from ansible.errors import AnsibleParserError -class PlaybookInclude(Base): +class PlaybookInclude(Base, Taggable): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') From a0fc8bb0bd834e29a652ed7face4ca360dc6cc56 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 8 May 2015 11:34:19 -0500 Subject: [PATCH 0556/3617] Testing additions and fixes * Fix import pathing for units.mock * Add some additional requirements * Use compileall to test compatiblity with different python versions --- .travis.yml | 6 ++++ setup.py | 2 +- test-requirements.txt | 1 + test/units/executor/test_play_iterator.py | 2 +- test/units/playbook/test_play.py | 2 +- test/units/playbook/test_playbook.py | 2 +- test/units/playbook/test_role.py | 2 +- test/units/vars/test_variable_manager.py | 2 +- tox.ini | 36 ++++++++++++++--------- 9 files changed, 35 insertions(+), 20 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6e18e06050cd88..e53b870597ce8c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,12 @@ language: python env: - TOXENV=py26 - TOXENV=py27 +addons: + apt: + sources: + - deadsnakes + packages: + - python2.4 install: - pip install tox script: diff --git a/setup.py b/setup.py index 37527414067c4f..1f73836cbd3c54 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ author_email='michael@ansible.com', url='http://ansible.com/', license='GPLv3', - install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6'], + install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6', 'six'], package_dir={ '': 'lib' }, packages=find_packages('lib'), package_data={ diff --git a/test-requirements.txt b/test-requirements.txt index abb61ed1e97c1d..fe65457f372f2d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,3 +7,4 @@ mock passlib coverage coveralls +unittest2 diff --git a/test/units/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py index 47c0352b25a4ee..2fa32c7119e4e6 100644 --- a/test/units/executor/test_play_iterator.py +++ b/test/units/executor/test_play_iterator.py @@ -26,7 +26,7 @@ from ansible.executor.play_iterator import PlayIterator from ansible.playbook import Playbook -from test.mock.loader import DictDataLoader +from units.mock.loader import DictDataLoader class TestPlayIterator(unittest.TestCase): diff --git a/test/units/playbook/test_play.py b/test/units/playbook/test_play.py index 22486f41290729..637b6dbbe13a13 100644 --- a/test/units/playbook/test_play.py +++ b/test/units/playbook/test_play.py @@ -27,7 +27,7 @@ from ansible.playbook.role import Role from ansible.playbook.task import Task -from test.mock.loader import DictDataLoader +from units.mock.loader import DictDataLoader class TestPlay(unittest.TestCase): diff --git a/test/units/playbook/test_playbook.py b/test/units/playbook/test_playbook.py index dfb52dc7b12726..97307c4b272319 100644 --- a/test/units/playbook/test_playbook.py +++ b/test/units/playbook/test_playbook.py @@ -26,7 +26,7 @@ from ansible.playbook import Playbook from ansible.vars import VariableManager -from test.mock.loader import DictDataLoader +from units.mock.loader import DictDataLoader class TestPlaybook(unittest.TestCase): diff --git a/test/units/playbook/test_role.py b/test/units/playbook/test_role.py index d0f3708898d157..7aab5133da2eba 100644 --- a/test/units/playbook/test_role.py +++ b/test/units/playbook/test_role.py @@ -28,7 +28,7 @@ from ansible.playbook.role.include import RoleInclude from ansible.playbook.task import Task -from test.mock.loader import DictDataLoader +from units.mock.loader import DictDataLoader class TestRole(unittest.TestCase): diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py index f8d815eb6f78a7..173ba1370ddea9 100644 --- a/test/units/vars/test_variable_manager.py +++ b/test/units/vars/test_variable_manager.py @@ -24,7 +24,7 @@ from ansible.vars import VariableManager -from test.mock.loader import DictDataLoader +from units.mock.loader import DictDataLoader class TestVariableManager(unittest.TestCase): diff --git a/tox.ini b/tox.ini index 5440a5825c9a6e..26d80ff7d330f6 100644 --- a/tox.ini +++ b/tox.ini @@ -1,23 +1,31 @@ [tox] -envlist = {py26,py27}-v{1} +envlist = {py26,py27} [testenv] commands = make tests deps = -r{toxinidir}/test-requirements.txt whitelist_externals = make -[testenv:py26-v1] - -[testenv:py27-v1] - -[testenv:py26-v2] -deps = -r{toxinidir}/v2/test-requirements.txt -commands = make newtests +[testenv:py26] +commands = + python -m compileall -fq -x 'test|samples' . + python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils + make tests +deps = -r{toxinidir}/test-requirements.txt +whitelist_externals = + make + python2.4 -[testenv:py27-v2] -deps = -r{toxinidir}/v2/test-requirements.txt -commands = make newtests +[testenv:py27] +commands = + python -m compileall -fq -x 'test|samples' . + make tests +deps = -r{toxinidir}/test-requirements.txt +whitelist_externals = make -[testenv:py34-v2] -deps = -r{toxinidir}/v2/test-requirements.txt -commands = make newtests +[testenv:py34] +commands = + python -m compileall -fq -x 'lib/ansible/module_utils' lib + make tests +deps = -r-r{toxinidir}/test-requirements.txt +whitelist_externals = make From 3a87b2727d5cf5cbedef0d68eb95a81d4f54a69d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 8 May 2015 13:10:40 -0700 Subject: [PATCH 0557/3617] Fix format strings for python2.6 --- lib/ansible/parsing/vault/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index e45fddc197056c..40d02d3d59c7c4 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -102,7 +102,7 @@ def encrypt(self, data): cipher = globals()['Vault' + self.cipher_name] this_cipher = cipher() else: - raise errors.AnsibleError("{} cipher could not be found".format(self.cipher_name)) + raise errors.AnsibleError("{0} cipher could not be found".format(self.cipher_name)) """ # combine sha + data @@ -135,7 +135,7 @@ def decrypt(self, data): cipher = globals()['Vault' + ciphername] this_cipher = cipher() else: - raise errors.AnsibleError("{} cipher could not be found".format(ciphername)) + raise errors.AnsibleError("{0} cipher could not be found".format(ciphername)) # try to unencrypt data data = this_cipher.decrypt(data, self.password) @@ -379,7 +379,7 @@ def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): d = d_i = b'' while len(d) < key_length + iv_length: - text = "{}{}{}".format(d_i, password, salt) + text = "{0}{1}{2}".format(d_i, password, salt) d_i = md5(to_bytes(text)).digest() d += d_i From 7f21f270d9ea51b352c6918a3d70a522367b7cd1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 8 May 2015 13:18:19 -0700 Subject: [PATCH 0558/3617] Be more lenient in instance check: MutableMapping is more general than dict --- lib/ansible/vars/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index f30d52b7a3a1c0..040c2244483984 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -22,6 +22,7 @@ import os from collections import defaultdict +from collections import MutableMapping try: from hashlib import sha1 @@ -73,7 +74,7 @@ def extra_vars(self): def set_extra_vars(self, value): ''' ensures a clean copy of the extra_vars are used to set the value ''' - assert isinstance(value, dict) + assert isinstance(value, MutableMapping) self._extra_vars = value.copy() def set_inventory(self, inventory): @@ -83,7 +84,7 @@ def _validate_both_dicts(self, a, b): ''' Validates that both arguments are dictionaries, or an error is raised. ''' - if not (isinstance(a, dict) and isinstance(b, dict)): + if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)): raise AnsibleError("failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)) def _combine_vars(self, a, b): From f9f8af06fc241659468c8c1663dfa4aaff7f1eb8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 8 May 2015 13:49:10 -0700 Subject: [PATCH 0559/3617] Change asserts to assertIsInstance for better error messages --- test/units/playbook/test_block.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/units/playbook/test_block.py b/test/units/playbook/test_block.py index 348681527bb7b4..2c202002267372 100644 --- a/test/units/playbook/test_block.py +++ b/test/units/playbook/test_block.py @@ -60,18 +60,18 @@ def test_load_block_with_tasks(self): ) b = Block.load(ds) self.assertEqual(len(b.block), 1) - assert isinstance(b.block[0], Task) + self.assertIsInstance(b.block[0], Task) self.assertEqual(len(b.rescue), 1) - assert isinstance(b.rescue[0], Task) + self.assertIsInstance(b.rescue[0], Task) self.assertEqual(len(b.always), 1) - assert isinstance(b.always[0], Task) + self.assertIsInstance(b.always[0], Task) # not currently used #self.assertEqual(len(b.otherwise), 1) - #assert isinstance(b.otherwise[0], Task) + #self.assertIsInstance(b.otherwise[0], Task) def test_load_implicit_block(self): ds = [dict(action='foo')] b = Block.load(ds) self.assertEqual(len(b.block), 1) - assert isinstance(b.block[0], Task) + self.assertIsInstance(b.block[0], Task) From 4a445a1247d940482c8c95326e53a20230681877 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 9 May 2015 00:30:41 +0200 Subject: [PATCH 0560/3617] changelog: add cs_instancegroup --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c85464edd689b3..fbf7f8e9e78bd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ New Modules: * cloudstack: cs_firewall * cloudstack: cs_iso * cloudstack: cs_instance + * cloudstack: cs_instancegroup * cloudstack: cs_sshkeypair * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule From d1977dad23fb3d9ae4095066c03ede44ed11d656 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 8 May 2015 19:19:03 -0400 Subject: [PATCH 0561/3617] started implementing syntax check --- lib/ansible/cli/playbook.py | 2 +- lib/ansible/executor/playbook_executor.py | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index eb60bacbd22afa..69e411dc87a0f6 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -87,7 +87,7 @@ def run(self): passwords = {} # don't deal with privilege escalation or passwords when we don't need to - if not self.options.listhosts and not self.options.listtasks and not self.options.listtags: + if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax: self.normalize_become_options() (sshpass, becomepass) = self.ask_passwords() passwords = { 'conn_pass': sshpass, 'become_pass': becomepass } diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 5d72ef15bd0bf0..5e339e40313417 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -46,7 +46,7 @@ def __init__(self, playbooks, inventory, variable_manager, loader, display, opti self._options = options self.passwords = passwords - if options.listhosts or options.listtasks or options.listtags: + if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords) @@ -85,6 +85,9 @@ def run(self): new_play = play.copy() new_play.post_validate(templar) + if self._options.syntax: + continue + if self._tqm is None: # we are just doing a listing @@ -147,6 +150,10 @@ def run(self): if self._tqm is not None: self._cleanup() + if self._options.syntax: + self.display.display("No issues encountered") + return result + # FIXME: this stat summary stuff should be cleaned up and moved # to a new method, if it even belongs here... self._display.banner("PLAY RECAP") From ad9981f565ea00a857e895fd6a111705da676a05 Mon Sep 17 00:00:00 2001 From: Vitaliy Okulov Date: Sun, 10 May 2015 14:37:46 +0000 Subject: [PATCH 0562/3617] Fix for ansible/ansible#10917 Add missing broadcast addr for network facts for Linux --- lib/ansible/module_utils/facts.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index b223c5f5f7d3eb..c4ad5cab738f06 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1836,6 +1836,7 @@ def parse_ip_output(output, secondary=False): if words[0] == 'inet': if '/' in words[1]: address, netmask_length = words[1].split('/') + broadcast = words[3] else: # pointopoint interfaces do not have a prefix address = words[1] @@ -1849,6 +1850,7 @@ def parse_ip_output(output, secondary=False): interfaces[iface] = {} if not secondary and "ipv4" not in interfaces[iface]: interfaces[iface]['ipv4'] = {'address': address, + 'broadcast': broadcast, 'netmask': netmask, 'network': network} else: @@ -1856,6 +1858,7 @@ def parse_ip_output(output, secondary=False): interfaces[iface]["ipv4_secondaries"] = [] interfaces[iface]["ipv4_secondaries"].append({ 'address': address, + 'broadcast': broadcast, 'netmask': netmask, 'network': network, }) @@ -1866,12 +1869,14 @@ def parse_ip_output(output, secondary=False): interfaces[device]["ipv4_secondaries"] = [] interfaces[device]["ipv4_secondaries"].append({ 'address': address, + 'broadcast': broadcast, 'netmask': netmask, 'network': network, }) # If this is the default address, update default_ipv4 if 'address' in default_ipv4 and default_ipv4['address'] == address: + default_ipv4['broadcast'] = broadcast default_ipv4['netmask'] = netmask default_ipv4['network'] = network default_ipv4['macaddress'] = macaddress From 31b0ffafa85094a300c0bb7ec0eb7badfd2fb3a8 Mon Sep 17 00:00:00 2001 From: Vitaliy Okulov Date: Sun, 10 May 2015 14:43:36 +0000 Subject: [PATCH 0563/3617] Align fix for ansible/ansible#10917 --- lib/ansible/module_utils/facts.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index c4ad5cab738f06..83aef5bf30b598 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1836,7 +1836,7 @@ def parse_ip_output(output, secondary=False): if words[0] == 'inet': if '/' in words[1]: address, netmask_length = words[1].split('/') - broadcast = words[3] + broadcast = words[3] else: # pointopoint interfaces do not have a prefix address = words[1] @@ -1850,7 +1850,7 @@ def parse_ip_output(output, secondary=False): interfaces[iface] = {} if not secondary and "ipv4" not in interfaces[iface]: interfaces[iface]['ipv4'] = {'address': address, - 'broadcast': broadcast, + 'broadcast': broadcast, 'netmask': netmask, 'network': network} else: @@ -1858,7 +1858,7 @@ def parse_ip_output(output, secondary=False): interfaces[iface]["ipv4_secondaries"] = [] interfaces[iface]["ipv4_secondaries"].append({ 'address': address, - 'broadcast': broadcast, + 'broadcast': broadcast, 'netmask': netmask, 'network': network, }) @@ -1869,7 +1869,7 @@ def parse_ip_output(output, secondary=False): interfaces[device]["ipv4_secondaries"] = [] interfaces[device]["ipv4_secondaries"].append({ 'address': address, - 'broadcast': broadcast, + 'broadcast': broadcast, 'netmask': netmask, 'network': network, }) From d2782f0d84c4e344c18f647b1ac3bfd903d75366 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 11 May 2015 08:06:21 -0400 Subject: [PATCH 0564/3617] Remove unneeded required_one_of for openstack We're being too strict - there is a third possibility, which is that a user will have defined the OS_* environment variables and expect them to pass through. --- lib/ansible/module_utils/openstack.py | 6 +----- lib/ansible/utils/module_docs_fragments/openstack.py | 7 +++++-- v2/ansible/module_utils/openstack.py | 6 +----- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index b58cc534287050..4069449144346d 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -93,11 +93,7 @@ def openstack_full_argument_spec(**kwargs): def openstack_module_kwargs(**kwargs): - ret = dict( - required_one_of=[ - ['cloud', 'auth'], - ], - ) + ret = {} for key in ('mutually_exclusive', 'required_together', 'required_one_of'): if key in kwargs: if key in ret: diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index 7e42841d6da9e9..3dff423772d29d 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -23,7 +23,9 @@ class ModuleDocFragment(object): options: cloud: description: - - Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin) + - Named cloud to operate against. Provides default values for I(auth) and + I(auth_type). This parameter is not needed if I(auth) is provided or if + OpenStack OS_* environment variables are present. required: false auth: description: @@ -32,7 +34,8 @@ class ModuleDocFragment(object): I(auth_url), I(username), I(password), I(project_name) and any information about domains if the cloud supports them. For other plugins, this param will need to contain whatever parameters that auth plugin - requires. This parameter is not needed if a named cloud is provided. + requires. This parameter is not needed if a named cloud is provided or + OpenStack OS_* environment variables are present. required: false auth_type: description: diff --git a/v2/ansible/module_utils/openstack.py b/v2/ansible/module_utils/openstack.py index b58cc534287050..4069449144346d 100644 --- a/v2/ansible/module_utils/openstack.py +++ b/v2/ansible/module_utils/openstack.py @@ -93,11 +93,7 @@ def openstack_full_argument_spec(**kwargs): def openstack_module_kwargs(**kwargs): - ret = dict( - required_one_of=[ - ['cloud', 'auth'], - ], - ) + ret = {} for key in ('mutually_exclusive', 'required_together', 'required_one_of'): if key in kwargs: if key in ret: From cd14d73be8ae29ade22a9e7bad9bef1fccd1c67b Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 11 May 2015 08:10:37 -0400 Subject: [PATCH 0565/3617] Add defaults and a link to os-client-config docs --- lib/ansible/utils/module_docs_fragments/openstack.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index 3dff423772d29d..99897eee6d86bd 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -80,14 +80,17 @@ class ModuleDocFragment(object): - A path to a CA Cert bundle that can be used as part of verifying SSL API requests. required: false + default: None cert: description: - A path to a client certificate to use as part of the SSL transaction required: false + default: None key: description: - A path to a client key to use as part of the SSL transaction required: false + default: None endpoint_type: description: - Endpoint URL type to fetch from the service catalog. @@ -103,5 +106,6 @@ class ModuleDocFragment(object): can come from a yaml config file in /etc/ansible/openstack.yaml, /etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from standard environment variables, then finally by explicit parameters in - plays. + plays. More information can be found at + U(http://docs.openstack.org/developer/os-client-config) ''' From 0c305b72cd61058666091895095d87d1ee787a19 Mon Sep 17 00:00:00 2001 From: Vitaliy Okulov Date: Mon, 11 May 2015 14:33:50 +0000 Subject: [PATCH 0566/3617] Fix align again, strange vim error. ansible/ansible#10917 --- lib/ansible/module_utils/facts.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 83aef5bf30b598..b025a80d956783 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1836,7 +1836,7 @@ def parse_ip_output(output, secondary=False): if words[0] == 'inet': if '/' in words[1]: address, netmask_length = words[1].split('/') - broadcast = words[3] + broadcast = words[3] else: # pointopoint interfaces do not have a prefix address = words[1] @@ -1850,7 +1850,7 @@ def parse_ip_output(output, secondary=False): interfaces[iface] = {} if not secondary and "ipv4" not in interfaces[iface]: interfaces[iface]['ipv4'] = {'address': address, - 'broadcast': broadcast, + 'broadcast': broadcast, 'netmask': netmask, 'network': network} else: @@ -1858,7 +1858,7 @@ def parse_ip_output(output, secondary=False): interfaces[iface]["ipv4_secondaries"] = [] interfaces[iface]["ipv4_secondaries"].append({ 'address': address, - 'broadcast': broadcast, + 'broadcast': broadcast, 'netmask': netmask, 'network': network, }) @@ -1869,7 +1869,7 @@ def parse_ip_output(output, secondary=False): interfaces[device]["ipv4_secondaries"] = [] interfaces[device]["ipv4_secondaries"].append({ 'address': address, - 'broadcast': broadcast, + 'broadcast': broadcast, 'netmask': netmask, 'network': network, }) From 9180ede6e09083b9248680bd7f1fdf412fa98934 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 11 May 2015 17:37:35 +0200 Subject: [PATCH 0567/3617] cloudstack: add test for cs_account --- test/integration/cloudstack.yml | 1 + .../roles/test_cs_account/meta/main.yml | 3 + .../roles/test_cs_account/tasks/main.yml | 226 ++++++++++++++++++ 3 files changed, 230 insertions(+) create mode 100644 test/integration/roles/test_cs_account/meta/main.yml create mode 100644 test/integration/roles/test_cs_account/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 5f5e65c6cd1107..7cdf593a8c7c4d 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -11,3 +11,4 @@ - { role: test_cs_securitygroup_rule, tags: test_cs_securitygroup_rule } - { role: test_cs_instance, tags: test_cs_instance } - { role: test_cs_instancegroup, tags: test_cs_instancegroup } + - { role: test_cs_account, tags: test_cs_account } diff --git a/test/integration/roles/test_cs_account/meta/main.yml b/test/integration/roles/test_cs_account/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_account/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_account/tasks/main.yml b/test/integration/roles/test_cs_account/tasks/main.yml new file mode 100644 index 00000000000000..5bcea07ef2a3aa --- /dev/null +++ b/test/integration/roles/test_cs_account/tasks/main.yml @@ -0,0 +1,226 @@ +--- +- name: setup + cs_account: name={{ cs_resource_prefix }}_user state=absent + register: acc +- name: verify setup + assert: + that: + - acc|success + +- name: test fail if missing name + action: cs_account + register: acc + ignore_errors: true +- name: verify results of fail if missing params + assert: + that: + - acc|failed + - 'acc.msg == "missing required arguments: name"' + +- name: test fail if missing params if state=present + cs_account: + name: "{{ cs_resource_prefix }}_user" + register: acc + ignore_errors: true +- name: verify results of fail if missing params if state=present + assert: + that: + - acc|failed + - 'acc.msg == "missing required arguments: email,username,password,first_name,last_name"' + +- name: test create user account + cs_account: + name: "{{ cs_resource_prefix }}_user" + username: "{{ cs_resource_prefix }}_username" + password: "{{ cs_resource_prefix }}_password" + last_name: "{{ cs_resource_prefix }}_last_name" + first_name: "{{ cs_resource_prefix }}_first_name" + email: "{{ cs_resource_prefix }}@example.com" + network_domain: "{{ cs_resource_prefix }}.local" + register: acc +- name: verify results of create account + assert: + that: + - acc|success + - acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "enabled" + - acc.domain == "ROOT" + +- name: test create user account idempotence + cs_account: + name: "{{ cs_resource_prefix }}_user" + username: "{{ cs_resource_prefix }}_username" + password: "{{ cs_resource_prefix }}_password" + last_name: "{{ cs_resource_prefix }}_last_name" + first_name: "{{ cs_resource_prefix }}_first_name" + email: "{{ cs_resource_prefix }}@example.com" + network_domain: "{{ cs_resource_prefix }}.local" + register: acc +- name: verify results of create account idempotence + assert: + that: + - acc|success + - not acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "enabled" + - acc.domain == "ROOT" + +- name: test lock user account + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: locked + register: acc +- name: verify results of lock user account + assert: + that: + - acc|success + - acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "locked" + - acc.domain == "ROOT" + +- name: test lock user account idempotence + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: locked + register: acc +- name: verify results of lock user account idempotence + assert: + that: + - acc|success + - not acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "locked" + - acc.domain == "ROOT" + +- name: test disable user account + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: disabled + register: acc +- name: verify results of disable user account + assert: + that: + - acc|success + - acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "disabled" + - acc.domain == "ROOT" + +- name: test disable user account idempotence + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: disabled + register: acc +- name: verify results of disable user account idempotence + assert: + that: + - acc|success + - not acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "disabled" + - acc.domain == "ROOT" + +- name: test lock disabled user account + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: locked + register: acc +- name: verify results of lock disabled user account + assert: + that: + - acc|success + - acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "locked" + - acc.domain == "ROOT" + +- name: test lock disabled user account idempotence + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: locked + register: acc +- name: verify results of lock disabled user account idempotence + assert: + that: + - acc|success + - not acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "locked" + - acc.domain == "ROOT" + +- name: test enable user account + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: enabled + register: acc +- name: verify results of enable user account + assert: + that: + - acc|success + - acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "enabled" + - acc.domain == "ROOT" + +- name: test enable user account idempotence + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: enabled + register: acc +- name: verify results of enable user account idempotence + assert: + that: + - acc|success + - not acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "enabled" + - acc.domain == "ROOT" + +- name: test remove user account + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: absent + register: acc +- name: verify results of remove user account + assert: + that: + - acc|success + - acc|changed + - acc.name == "{{ cs_resource_prefix }}_user" + - acc.network_domain == "{{ cs_resource_prefix }}.local" + - acc.account_type == "user" + - acc.account_state == "enabled" + - acc.domain == "ROOT" + +- name: test remove user account idempotence + cs_account: + name: "{{ cs_resource_prefix }}_user" + state: absent + register: acc +- name: verify results of remove user account idempotence + assert: + that: + - acc|success + - not acc|changed From ebb4695da8e1844593ccbd4c4b677ab5c3e9edab Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 11 May 2015 09:09:00 -0700 Subject: [PATCH 0568/3617] Update the submodule pointers --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8b4e201772cf94..36891d82735ed9 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8b4e201772cf94e738bdabae0b4e6b68759cdd85 +Subproject commit 36891d82735ed90bbf8a45ad9ce3f044e5e39ec5 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 66a96ad6e2a93f..6bf4558df8c61a 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 66a96ad6e2a93f7ed786c630cf81e996b9a50403 +Subproject commit 6bf4558df8c61ae457dc7e5be58855d2931b607f From f141ec967141972e43849458419a39177daecc40 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 11 May 2015 09:28:19 -0700 Subject: [PATCH 0569/3617] Update v2 module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 31b6f75570de2d..42abf85be7acbd 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 31b6f75570de2d9c321c596e659fd5daf42e786d +Subproject commit 42abf85be7acbd95f6904a313c34a9495e99ca14 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 66a96ad6e2a93f..6bf4558df8c61a 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 66a96ad6e2a93f7ed786c630cf81e996b9a50403 +Subproject commit 6bf4558df8c61ae457dc7e5be58855d2931b607f From daf533c80e934b219a40373042b513cd00aac695 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 11 May 2015 11:22:41 -0500 Subject: [PATCH 0570/3617] V2 fixes * PluginLoader class will now be more selective about loading some plugin classes, if a required base class is specified (used to avoid loading v1 plugins that have changed significantly in their apis) * Added ability for the connection info class to read values from a given hosts variables, to support "magic" variables * Added some more magic variables to the VariableManager output * Fixed a bug in the ActionBase class, where the module configuration code was not correctly handling unicode --- lib/ansible/executor/connection_info.py | 27 +++++++++++++++++- lib/ansible/executor/process/worker.py | 2 +- lib/ansible/plugins/__init__.py | 38 +++++++++++++++++-------- lib/ansible/plugins/action/__init__.py | 32 +++++++++------------ lib/ansible/vars/__init__.py | 10 +++++-- 5 files changed, 74 insertions(+), 35 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 9e91cd09eafb66..bf78cf63a5b122 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -29,6 +29,20 @@ __all__ = ['ConnectionInformation'] +# the magic variable mapping dictionary below is used to translate +# host/inventory variables to fields in the ConnectionInformation +# object. The dictionary values are tuples, to account for aliases +# in variable names. + +MAGIC_VARIABLE_MAPPING = dict( + connection = ('ansible_connection',), + remote_addr = ('ansible_ssh_host', 'ansible_host'), + remote_user = ('ansible_ssh_user', 'ansible_user'), + port = ('ansible_ssh_port', 'ansible_port'), + password = ('ansible_ssh_pass', 'ansible_password'), + private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'), + shell = ('ansible_shell_type',), +) class ConnectionInformation: @@ -51,6 +65,7 @@ def __init__(self, play=None, options=None, passwords=None): self.port = None self.private_key_file = C.DEFAULT_PRIVATE_KEY_FILE self.timeout = C.DEFAULT_TIMEOUT + self.shell = None # privilege escalation self.become = None @@ -170,7 +185,7 @@ def copy(self, ci): else: setattr(self, field, value) - def set_task_override(self, task): + def set_task_and_host_override(self, task, host): ''' Sets attributes from the task if they are set, which will override those from the play. @@ -179,12 +194,22 @@ def set_task_override(self, task): new_info = ConnectionInformation() new_info.copy(self) + # loop through a subset of attributes on the task object and set + # connection fields based on their values for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'): if hasattr(task, attr): attr_val = getattr(task, attr) if attr_val: setattr(new_info, attr, attr_val) + # finally, use the MAGIC_VARIABLE_MAPPING dictionary to update this + # connection info object with 'magic' variables from inventory + variables = host.get_vars() + for (attr, variable_names) in MAGIC_VARIABLE_MAPPING.iteritems(): + for variable_name in variable_names: + if variable_name in variables: + setattr(new_info, attr, variables[variable_name]) + return new_info def make_become_cmd(self, cmd, executable, become_settings=None): diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index d8e8960fe4077b..e1488ebcb1da2e 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -111,7 +111,7 @@ def run(self): # apply the given task's information to the connection info, # which may override some fields already set by the play or # the options specified on the command line - new_connection_info = connection_info.set_task_override(task) + new_connection_info = connection_info.set_task_and_host_override(task=task, host=host) # execute the task and build a TaskResult from the result debug("running TaskExecutor() for %s/%s" % (host, task)) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 36b5c3d0334e68..8d23ae796cb028 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -55,9 +55,10 @@ class PluginLoader: The first match is used. ''' - def __init__(self, class_name, package, config, subdir, aliases={}): + def __init__(self, class_name, package, config, subdir, aliases={}, required_base_class=None): self.class_name = class_name + self.base_class = required_base_class self.package = package self.config = config self.subdir = subdir @@ -87,11 +88,12 @@ def __setstate__(self, data): config = data.get('config') subdir = data.get('subdir') aliases = data.get('aliases') + base_class = data.get('base_class') PATH_CACHE[class_name] = data.get('PATH_CACHE') PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE') - self.__init__(class_name, package, config, subdir, aliases) + self.__init__(class_name, package, config, subdir, aliases, base_class) self._extra_dirs = data.get('_extra_dirs', []) self._searched_paths = data.get('_searched_paths', set()) @@ -102,6 +104,7 @@ def __getstate__(self): return dict( class_name = self.class_name, + base_class = self.base_class, package = self.package, config = self.config, subdir = self.subdir, @@ -268,9 +271,13 @@ def get(self, name, *args, **kwargs): self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path) if kwargs.get('class_only', False): - return getattr(self._module_cache[path], self.class_name) + obj = getattr(self._module_cache[path], self.class_name) else: - return getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]: + return None + + return obj def all(self, *args, **kwargs): ''' instantiates all plugins with the same arguments ''' @@ -291,6 +298,9 @@ def all(self, *args, **kwargs): else: obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs) + if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]: + continue + # set extra info on the module, in case we want it later setattr(obj, '_original_path', path) yield obj @@ -299,21 +309,22 @@ def all(self, *args, **kwargs): 'ActionModule', 'ansible.plugins.action', C.DEFAULT_ACTION_PLUGIN_PATH, - 'action_plugins' + 'action_plugins', + required_base_class='ActionBase', ) cache_loader = PluginLoader( 'CacheModule', 'ansible.plugins.cache', C.DEFAULT_CACHE_PLUGIN_PATH, - 'cache_plugins' + 'cache_plugins', ) callback_loader = PluginLoader( 'CallbackModule', 'ansible.plugins.callback', C.DEFAULT_CALLBACK_PLUGIN_PATH, - 'callback_plugins' + 'callback_plugins', ) connection_loader = PluginLoader( @@ -321,7 +332,8 @@ def all(self, *args, **kwargs): 'ansible.plugins.connections', C.DEFAULT_CONNECTION_PLUGIN_PATH, 'connection_plugins', - aliases={'paramiko': 'paramiko_ssh'} + aliases={'paramiko': 'paramiko_ssh'}, + required_base_class='ConnectionBase', ) shell_loader = PluginLoader( @@ -335,28 +347,29 @@ def all(self, *args, **kwargs): '', 'ansible.modules', C.DEFAULT_MODULE_PATH, - 'library' + 'library', ) lookup_loader = PluginLoader( 'LookupModule', 'ansible.plugins.lookup', C.DEFAULT_LOOKUP_PLUGIN_PATH, - 'lookup_plugins' + 'lookup_plugins', + required_base_class='LookupBase', ) vars_loader = PluginLoader( 'VarsModule', 'ansible.plugins.vars', C.DEFAULT_VARS_PLUGIN_PATH, - 'vars_plugins' + 'vars_plugins', ) filter_loader = PluginLoader( 'FilterModule', 'ansible.plugins.filter', C.DEFAULT_FILTER_PLUGIN_PATH, - 'filter_plugins' + 'filter_plugins', ) fragment_loader = PluginLoader( @@ -371,4 +384,5 @@ def all(self, *args, **kwargs): 'ansible.plugins.strategies', None, 'strategy_plugins', + required_base_class='StrategyBase', ) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 83c129687ec88d..d6861118b2f0d4 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -34,6 +34,7 @@ from ansible.plugins import shell_loader from ansible.utils.debug import debug +from ansible.utils.unicode import to_bytes class ActionBase: @@ -51,20 +52,20 @@ def __init__(self, task, connection, connection_info, loader, templar, shared_lo self._loader = loader self._templar = templar self._shared_loader_obj = shared_loader_obj - self._shell = self.get_shell() - self._supports_check_mode = True - - def get_shell(self): - - if hasattr(self._connection, '_shell'): - shell_plugin = getattr(self._connection, '_shell', '') + # load the shell plugin for this action/connection + if self._connection_info.shell: + shell_type = self._connection_info.shell + elif hasattr(connection, '_shell'): + shell_type = getattr(connection, '_shell') else: - shell_plugin = shell_loader.get(os.path.basename(C.DEFAULT_EXECUTABLE)) - if shell_plugin is None: - shell_plugin = shell_loader.get('sh') + shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) - return shell_plugin + self._shell = shell_loader.get(shell_type) + if not self._shell: + raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type) + + self._supports_check_mode = True def _configure_module(self, module_name, module_args): ''' @@ -201,18 +202,13 @@ def _transfer_data(self, remote_path, data): Copies the module data out to the temporary module path. ''' - if type(data) == dict: + if isinstance(data, dict): data = jsonify(data) afd, afile = tempfile.mkstemp() afo = os.fdopen(afd, 'w') try: - # FIXME: is this still necessary? - #if not isinstance(data, unicode): - # #ensure the data is valid UTF-8 - # data = data.decode('utf-8') - #else: - # data = data.encode('utf-8') + data = to_bytes(data, errors='strict') afo.write(data) except Exception as e: #raise AnsibleError("failure encoding into utf-8: %s" % str(e)) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 040c2244483984..4cf10709b93109 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -212,9 +212,13 @@ def get_vars(self, loader, play=None, host=None, task=None): # FIXME: make sure all special vars are here # Finally, we create special vars - if host and self._inventory is not None: - hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader) - all_vars['hostvars'] = hostvars + + if host: + all_vars['groups'] = [group.name for group in host.get_groups()] + + if self._inventory is not None: + hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader) + all_vars['hostvars'] = hostvars if self._inventory is not None: all_vars['inventory_dir'] = self._inventory.basedir() From c80c5c980d84631ca20ed34d006b2a575bb5bf71 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 11 May 2015 10:10:58 -0700 Subject: [PATCH 0571/3617] Add python2.6 dep to aws and cloudstack doc fragments --- lib/ansible/utils/module_docs_fragments/aws.py | 1 + lib/ansible/utils/module_docs_fragments/cloudstack.py | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ansible/utils/module_docs_fragments/aws.py b/lib/ansible/utils/module_docs_fragments/aws.py index 981eb8e105038b..421d8fd986074a 100644 --- a/lib/ansible/utils/module_docs_fragments/aws.py +++ b/lib/ansible/utils/module_docs_fragments/aws.py @@ -62,6 +62,7 @@ class ModuleDocFragment(object): aliases: [] version_added: "1.6" requirements: + - "python >= 2.6" - boto notes: - If parameters are not set within the module, the following diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py index 2e89178d0021c4..5a7411b199dfff 100644 --- a/lib/ansible/utils/module_docs_fragments/cloudstack.py +++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py @@ -47,6 +47,7 @@ class ModuleDocFragment(object): default: 'get' aliases: [] requirements: + - "python >= 2.6" - cs notes: - Ansible uses the C(cs) library's configuration method if credentials are not From 0c21f05bcd9b4538d2068abbbab2ff69f451c8c0 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 11 May 2015 18:33:24 +0100 Subject: [PATCH 0572/3617] Update become.rst Visually separate commands and values from description text for clarity. Add value detail for become_user and become_method to match become. --- docsite/rst/become.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 4507b191009909..ca639c01f0d1ca 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -17,13 +17,13 @@ New directives -------------- become - equivalent to adding sudo: or su: to a play or task, set to true/yes to activate privilege escalation + equivalent to adding 'sudo:' or 'su:' to a play or task, set to 'true'/'yes' to activate privilege escalation become_user - equivalent to adding sudo_user: or su_user: to a play or task + equivalent to adding 'sudo_user:' or 'su_user:' to a play or task, set to user with desired privileges become_method - at play or task level overrides the default method set in ansible.cfg + at play or task level overrides the default method set in ansible.cfg, set to 'sudo'/'su'/'pbrun'/'pfexec' New ansible\_ variables From 7b1c6fbab906eba6056f6c573f4b54f8e099d9f2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 11 May 2015 12:48:03 -0500 Subject: [PATCH 0573/3617] Fix playbook includes so tags are obeyed (v2) --- lib/ansible/playbook/playbook_include.py | 5 +++-- samples/included_playbook.yml | 6 ++++++ samples/test_playbook.include | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 samples/included_playbook.yml create mode 100644 samples/test_playbook.include diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index 075e6dcbdf2f3e..1f4bddd4a32b49 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -61,10 +61,11 @@ def load_data(self, ds, basedir, variable_manager=None, loader=None): pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) - # finally, playbook includes can specify a list of variables, which are simply - # used to update the vars of each play in the playbook + # finally, update each loaded playbook entry with any variables specified + # on the included playbook and/or any tags which may have been set for entry in pb._entries: entry.vars.update(new_obj.vars) + entry.tags = list(set(entry.tags).union(new_obj.tags)) return pb diff --git a/samples/included_playbook.yml b/samples/included_playbook.yml new file mode 100644 index 00000000000000..d56e9c68f7f4b6 --- /dev/null +++ b/samples/included_playbook.yml @@ -0,0 +1,6 @@ +- hosts: localhost + gather_facts: no + tags: + - included + tasks: + - debug: msg="incuded playbook, variable is {{a}}" diff --git a/samples/test_playbook.include b/samples/test_playbook.include new file mode 100644 index 00000000000000..95c1a821471e32 --- /dev/null +++ b/samples/test_playbook.include @@ -0,0 +1,2 @@ +- include: included_playbook.yml a=1 + tags: include From fd321355d69cf2450549f44bfe1572d6f75a0dac Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 11 May 2015 14:04:17 -0500 Subject: [PATCH 0574/3617] Adding 'role_path' to VariableManager "magic" variables (v2) --- lib/ansible/vars/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 4cf10709b93109..736b9529ef547c 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -212,7 +212,6 @@ def get_vars(self, loader, play=None, host=None, task=None): # FIXME: make sure all special vars are here # Finally, we create special vars - if host: all_vars['groups'] = [group.name for group in host.get_groups()] @@ -220,6 +219,10 @@ def get_vars(self, loader, play=None, host=None, task=None): hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader) all_vars['hostvars'] = hostvars + if task: + if task._role: + all_vars['role_path'] = task._role._role_path + if self._inventory is not None: all_vars['inventory_dir'] = self._inventory.basedir() From 1caee5cb79789df9b38643bb6233b22b88e6b386 Mon Sep 17 00:00:00 2001 From: Alex Muller Date: Mon, 11 May 2015 20:11:52 +0100 Subject: [PATCH 0575/3617] Remove unnecessary 'from' in playbook intro docs To make this sentence make sense. Fixes #10970. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index a27285b4a9ff18..3899502ed475cf 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -148,7 +148,7 @@ Remote users can also be defined per task:: The `remote_user` parameter for tasks was added in 1.4. -Support for running things from as another user is also available (see :doc:`become`):: +Support for running things as another user is also available (see :doc:`become`):: --- - hosts: webservers From 490cde3cbd4f52bfd53709ce79f476946094f8d7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 11 May 2015 12:24:14 -0700 Subject: [PATCH 0576/3617] Add python2.6+ as a documented requirement for rackspace modules --- lib/ansible/utils/module_docs_fragments/rackspace.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/utils/module_docs_fragments/rackspace.py b/lib/ansible/utils/module_docs_fragments/rackspace.py index a49202c500fe19..7430ca696bb03d 100644 --- a/lib/ansible/utils/module_docs_fragments/rackspace.py +++ b/lib/ansible/utils/module_docs_fragments/rackspace.py @@ -50,6 +50,7 @@ class ModuleDocFragment(object): - Whether or not to require SSL validation of API endpoints version_added: 1.5 requirements: + - "python >= 2.6" - pyrax notes: - The following environment variables can be used, C(RAX_USERNAME), @@ -111,6 +112,7 @@ class ModuleDocFragment(object): - Whether or not to require SSL validation of API endpoints version_added: 1.5 requirements: + - "python >= 2.6" - pyrax notes: - The following environment variables can be used, C(RAX_USERNAME), From adc7f91865e222f067369723e2b1e408fae3b311 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Mon, 11 May 2015 21:30:59 +0200 Subject: [PATCH 0577/3617] changelog: add cs_account --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fbf7f8e9e78bd7..6dba043feb08b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ New Modules: * consul_kv * consul_session * cloudtrail + * cloudstack: cs_account * cloudstack: cs_affinitygroup * cloudstack: cs_firewall * cloudstack: cs_iso From 75b208252988c3f8715e8585530c7f2a392f7f52 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 10:33:19 -0500 Subject: [PATCH 0578/3617] Submodule update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 36891d82735ed9..576ca33bdc968e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 36891d82735ed90bbf8a45ad9ce3f044e5e39ec5 +Subproject commit 576ca33bdc968edb4fb303c41ca0157d85fd30ab From 6918a588c610b4656833a6493d84fa94649b31d9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 12 May 2015 08:44:24 -0700 Subject: [PATCH 0579/3617] Update the extras module ref --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 6bf4558df8c61a..e5022ba87b6c45 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 6bf4558df8c61ae457dc7e5be58855d2931b607f +Subproject commit e5022ba87b6c45488b7d4e140df7f098495dba67 From 8fdf9ae59b5c760c72451b0e863ec7c35a7c01cf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 12 May 2015 12:18:55 -0400 Subject: [PATCH 0580/3617] moved module_doc_fragments to v2 --- {v1 => lib}/ansible/utils/module_docs_fragments/__init__.py | 0 {v1 => lib}/ansible/utils/module_docs_fragments/aws.py | 0 {v1 => lib}/ansible/utils/module_docs_fragments/cloudstack.py | 0 {v1 => lib}/ansible/utils/module_docs_fragments/files.py | 0 {v1 => lib}/ansible/utils/module_docs_fragments/openstack.py | 0 {v1 => lib}/ansible/utils/module_docs_fragments/rackspace.py | 0 {lib => v1}/ansible/utils/module_docs_fragments | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename {v1 => lib}/ansible/utils/module_docs_fragments/__init__.py (100%) rename {v1 => lib}/ansible/utils/module_docs_fragments/aws.py (100%) rename {v1 => lib}/ansible/utils/module_docs_fragments/cloudstack.py (100%) rename {v1 => lib}/ansible/utils/module_docs_fragments/files.py (100%) rename {v1 => lib}/ansible/utils/module_docs_fragments/openstack.py (100%) rename {v1 => lib}/ansible/utils/module_docs_fragments/rackspace.py (100%) rename {lib => v1}/ansible/utils/module_docs_fragments (100%) diff --git a/v1/ansible/utils/module_docs_fragments/__init__.py b/lib/ansible/utils/module_docs_fragments/__init__.py similarity index 100% rename from v1/ansible/utils/module_docs_fragments/__init__.py rename to lib/ansible/utils/module_docs_fragments/__init__.py diff --git a/v1/ansible/utils/module_docs_fragments/aws.py b/lib/ansible/utils/module_docs_fragments/aws.py similarity index 100% rename from v1/ansible/utils/module_docs_fragments/aws.py rename to lib/ansible/utils/module_docs_fragments/aws.py diff --git a/v1/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py similarity index 100% rename from v1/ansible/utils/module_docs_fragments/cloudstack.py rename to lib/ansible/utils/module_docs_fragments/cloudstack.py diff --git a/v1/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py similarity index 100% rename from v1/ansible/utils/module_docs_fragments/files.py rename to lib/ansible/utils/module_docs_fragments/files.py diff --git a/v1/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py similarity index 100% rename from v1/ansible/utils/module_docs_fragments/openstack.py rename to lib/ansible/utils/module_docs_fragments/openstack.py diff --git a/v1/ansible/utils/module_docs_fragments/rackspace.py b/lib/ansible/utils/module_docs_fragments/rackspace.py similarity index 100% rename from v1/ansible/utils/module_docs_fragments/rackspace.py rename to lib/ansible/utils/module_docs_fragments/rackspace.py diff --git a/lib/ansible/utils/module_docs_fragments b/v1/ansible/utils/module_docs_fragments similarity index 100% rename from lib/ansible/utils/module_docs_fragments rename to v1/ansible/utils/module_docs_fragments From 12a800c0e7586f98d91ebc5e41d7cf7eadd6bb69 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 12 May 2015 12:24:32 -0400 Subject: [PATCH 0581/3617] fixed less opts issue --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 4a7f5bbacc1d47..98b524b44a5e41 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -426,7 +426,7 @@ def pager(text): def pager_pipe(text, cmd): ''' pipe text through a pager ''' if 'LESS' not in os.environ: - os.environ['LESS'] = LESS_OPTS + os.environ['LESS'] = self.LESS_OPTS try: cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) cmd.communicate(input=text) From 09605706d9b8495f76b346616cf7bc4568e3e01f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 12 May 2015 12:26:20 -0400 Subject: [PATCH 0582/3617] relly fix it this time --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 98b524b44a5e41..1e997f58d37a0e 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -426,7 +426,7 @@ def pager(text): def pager_pipe(text, cmd): ''' pipe text through a pager ''' if 'LESS' not in os.environ: - os.environ['LESS'] = self.LESS_OPTS + os.environ['LESS'] = CLI.LESS_OPTS try: cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) cmd.communicate(input=text) From 3697d6582fef5d01e3f2c5da8b3aa35ad5f35500 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 11:10:22 -0500 Subject: [PATCH 0583/3617] Connection plugins no longer auto-connect (v2) Also fixed a bug in ssh.py where an attribute was being inappropriately initialized in the _connect() method instead of __init__() --- lib/ansible/plugins/connections/__init__.py | 2 -- lib/ansible/plugins/connections/ssh.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 5558f5ba86a2b3..70807b08f616b3 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -54,8 +54,6 @@ def __init__(self, connection_info, new_stdin, *args, **kwargs): if not hasattr(self, '_connected'): self._connected = False - self._connect() - def _become_method_supported(self, become_method): ''' Checks if the current class supports this privilege escalation method ''' diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 7c95cc3c0f532c..426dc6b49d06ae 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -41,6 +41,7 @@ class Connection(ConnectionBase): def __init__(self, *args, **kwargs): # SSH connection specific init stuff + self._common_args = [] self.HASHED_KEY_MAGIC = "|1|" self._has_pipelining = True @@ -65,7 +66,6 @@ def _connect(self): if self._connected: return self - self._common_args = [] extra_args = C.ANSIBLE_SSH_ARGS if extra_args is not None: # make sure there is no empty string added as this can produce weird errors From 361eb291467258f4fbc29569510916bf7b253bc2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 11:30:08 -0500 Subject: [PATCH 0584/3617] Also make task_executor connect explicitly (v2) --- lib/ansible/executor/task_executor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 6d62eea68bab66..9bc875b02a4395 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -210,6 +210,7 @@ def _execute(self, variables=None): # get the connection and the handler for this execution self._connection = self._get_connection(variables) self._connection.set_host_overrides(host=self._host) + self._connection._connect() self._handler = self._get_action_handler(connection=self._connection, templar=templar) From 1ca8cb8553c07dab5baf5c95646316970d29006b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 12:24:57 -0500 Subject: [PATCH 0585/3617] Fixing up v2 unit tests --- lib/ansible/playbook/block.py | 2 +- test/units/executor/test_play_iterator.py | 22 ++++++++++++++++------ test/units/playbook/test_play.py | 6 +++--- test/units/vars/test_variable_manager.py | 1 + 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index d65f78712798ef..1bbc06183f2c82 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -66,7 +66,7 @@ def get_vars(self): return all_vars @staticmethod - def load(data, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): + def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None): b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers) return b.load_data(data, variable_manager=variable_manager, loader=loader) diff --git a/test/units/executor/test_play_iterator.py b/test/units/executor/test_play_iterator.py index 2fa32c7119e4e6..7f8ed4d6817ed5 100644 --- a/test/units/executor/test_play_iterator.py +++ b/test/units/executor/test_play_iterator.py @@ -23,6 +23,7 @@ from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError +from ansible.executor.connection_info import ConnectionInformation from ansible.executor.play_iterator import PlayIterator from ansible.playbook import Playbook @@ -67,19 +68,28 @@ def test_play_iterator(self): inventory.get_hosts.return_value = hosts inventory.filter_hosts.return_value = hosts - itr = PlayIterator(inventory, p._entries[0]) - task = itr.get_next_task_for_host(hosts[0]) + connection_info = ConnectionInformation(play=p._entries[0]) + + itr = PlayIterator( + inventory=inventory, + play=p._entries[0], + connection_info=connection_info, + all_vars=dict(), + ) + + (host_state, task) = itr.get_next_task_for_host(hosts[0]) print(task) self.assertIsNotNone(task) - task = itr.get_next_task_for_host(hosts[0]) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) print(task) self.assertIsNotNone(task) - task = itr.get_next_task_for_host(hosts[0]) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) print(task) self.assertIsNotNone(task) - task = itr.get_next_task_for_host(hosts[0]) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) print(task) self.assertIsNotNone(task) - task = itr.get_next_task_for_host(hosts[0]) + (host_state, task) = itr.get_next_task_for_host(hosts[0]) print(task) self.assertIsNone(task) + diff --git a/test/units/playbook/test_play.py b/test/units/playbook/test_play.py index 637b6dbbe13a13..561da36272b261 100644 --- a/test/units/playbook/test_play.py +++ b/test/units/playbook/test_play.py @@ -23,9 +23,9 @@ from ansible.compat.tests.mock import patch, MagicMock from ansible.errors import AnsibleError, AnsibleParserError +from ansible.playbook.block import Block from ansible.playbook.play import Play from ansible.playbook.role import Role -from ansible.playbook.task import Task from units.mock.loader import DictDataLoader @@ -39,7 +39,7 @@ def tearDown(self): def test_empty_play(self): p = Play.load(dict()) - self.assertEqual(str(p), "PLAY: ") + self.assertEqual(str(p), "PLAY: ") def test_basic_play(self): p = Play.load(dict( @@ -129,4 +129,4 @@ def test_play_compile(self): tasks = p.compile() self.assertEqual(len(tasks), 1) - self.assertIsInstance(tasks[0], Task) + self.assertIsInstance(tasks[0], Block) diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py index 173ba1370ddea9..9abed8f9482c04 100644 --- a/test/units/vars/test_variable_manager.py +++ b/test/units/vars/test_variable_manager.py @@ -137,6 +137,7 @@ def test_variable_manager_task_vars(self): fake_loader = DictDataLoader({}) mock_task = MagicMock() + mock_task._role = None mock_task.get_vars.return_value = dict(foo="bar") v = VariableManager() From 9b646dea41e68c3b68c2b16d87c604b38990bfd4 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Tue, 12 May 2015 12:51:35 -0500 Subject: [PATCH 0586/3617] Add optional 'skip_missing' flag to subelements --- docsite/rst/playbooks_loops.rst | 33 ++++++++- lib/ansible/plugins/lookup/subelements.py | 72 +++++++++++++++---- .../roles/test_iterators/tasks/main.yml | 35 ++++++++- .../roles/test_iterators/vars/main.yml | 34 +++++++++ 4 files changed, 157 insertions(+), 17 deletions(-) diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index e71c81cefc2267..5456791f61472d 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -147,9 +147,26 @@ How might that be accomplished? Let's assume you had the following defined and authorized: - /tmp/alice/onekey.pub - /tmp/alice/twokey.pub + mysql: + password: mysql-password + hosts: + - "%" + - "127.0.0.1" + - "::1" + - "localhost" + privs: + - "*.*:SELECT" + - "DB1.*:ALL" - name: bob authorized: - /tmp/bob/id_rsa.pub + mysql: + password: other-mysql-password + hosts: + - "db1" + privs: + - "*.*:SELECT" + - "DB2.*:ALL" It might happen like so:: @@ -161,9 +178,23 @@ It might happen like so:: - users - authorized -Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given key inside of those +Given the mysql hosts and privs subkey lists, you can also iterate over a list in a nested subkey:: + + - name: Setup MySQL users + mysql_user: name={{ item.0.user }} password={{ item.0.mysql.password }} host={{ item.1 }} priv={{ item.0.mysql.privs | join('/') }} + with_subelements: + - users + - mysql.hosts + +Subelements walks a list of hashes (aka dictionaries) and then traverses a list with a given (nested sub-)key inside of those records. +Optionally, you can add a third element to the subelements list, that holds a +dictionary of flags. Currently you can add the 'skip_missing' flag. If set to +True, the lookup plugin will skip the lists items that do not contain the given +subkey. Without this flag, or if that flag is set to False, the plugin will +yield an error and complain about the missing subkey. + The authorized_key pattern is exactly where it comes up most. .. _looping_over_integer_sequences: diff --git a/lib/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py index 09a2ca306a11ee..0636387be65e2c 100644 --- a/lib/ansible/plugins/lookup/subelements.py +++ b/lib/ansible/plugins/lookup/subelements.py @@ -20,40 +20,82 @@ from ansible.errors import * from ansible.plugins.lookup import LookupBase from ansible.utils.listify import listify_lookup_plugin_terms +from ansible.utils.boolean import boolean + +FLAGS = ('skip_missing',) + class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): - terms[0] = listify_lookup_plugin_terms(terms[0], variables, loader=self._loader) + def _raise_terms_error(msg=""): + raise errors.AnsibleError( + "subelements lookup expects a list of two or three items, " + + msg) + terms = listify_lookup_plugin_terms(terms, self.basedir, inject) + terms[0] = listify_lookup_plugin_terms(terms[0], self.basedir, inject) - if not isinstance(terms, list) or not len(terms) == 2: - raise AnsibleError("subelements lookup expects a list of two items, first a dict or a list, and second a string") + # check lookup terms - check number of terms + if not isinstance(terms, list) or not 2 <= len(terms) <= 3: + _raise_terms_error() - if isinstance(terms[0], dict): # convert to list: - if terms[0].get('skipped',False) != False: + # first term should be a list (or dict), second a string holding the subkey + if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring): + _raise_terms_error("first a dict or a list, second a string pointing to the subkey") + subelements = terms[1].split(".") + + if isinstance(terms[0], dict): # convert to list: + if terms[0].get('skipped', False) is not False: # the registered result was completely skipped return [] elementlist = [] for key in terms[0].iterkeys(): elementlist.append(terms[0][key]) - else: + else: elementlist = terms[0] - subelement = terms[1] + # check for optional flags in third term + flags = {} + if len(terms) == 3: + flags = terms[2] + if not isinstance(flags, dict) and not all([isinstance(key, basestring) and key in FLAGS for key in flags]): + _raise_terms_error("the optional third item must be a dict with flags %s" % FLAGS) + # build_items ret = [] for item0 in elementlist: if not isinstance(item0, dict): - raise AnsibleError("subelements lookup expects a dictionary, got '%s'" %item0) - if item0.get('skipped', False) != False: + raise errors.AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0) + if item0.get('skipped', False) is not False: # this particular item is to be skipped - continue - if not subelement in item0: - raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subelement, item0)) - if not isinstance(item0[subelement], list): - raise AnsibleError("the key %s should point to a list, got '%s'" % (subelement, item0[subelement])) - sublist = item0.pop(subelement, []) + continue + + skip_missing = boolean(flags.get('skip_missing', False)) + subvalue = item0 + lastsubkey = False + sublist = [] + for subkey in subelements: + if subkey == subelements[-1]: + lastsubkey = True + if not subkey in subvalue: + if skip_missing: + continue + else: + raise errors.AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue)) + if not lastsubkey: + if not isinstance(subvalue[subkey], dict): + if skip_missing: + continue + else: + raise errors.AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey])) + else: + subvalue = subvalue[subkey] + else: # lastsubkey + if not isinstance(subvalue[subkey], list): + raise errors.AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey])) + else: + sublist = subvalue.pop(subkey, []) for item1 in sublist: ret.append((item0, item1)) diff --git a/test/integration/roles/test_iterators/tasks/main.yml b/test/integration/roles/test_iterators/tasks/main.yml index c95eaff3da4739..931e30458265b6 100644 --- a/test/integration/roles/test_iterators/tasks/main.yml +++ b/test/integration/roles/test_iterators/tasks/main.yml @@ -39,7 +39,7 @@ set_fact: "{{ item.0 + item.1 }}=x" with_nested: - [ 'a', 'b' ] - - [ 'c', 'd' ] + - [ 'c', 'd' ] - debug: var=ac - debug: var=ad @@ -97,6 +97,39 @@ - "_ye == 'e'" - "_yf == 'f'" +- name: test with_subelements in subkeys + set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}" + with_subelements: + - element_data + - the.sub.key.list + +- name: verify with_subelements in subkeys results + assert: + that: + - "_xq == 'q'" + - "_xr == 'r'" + - "_yi == 'i'" + - "_yo == 'o'" + +- name: test with_subelements with missing key or subkey + set_fact: "{{ '_'+ item.0.id + item.1 }}={{ item.1 }}" + with_subelements: + - element_data_missing + - the.sub.key.list + - skip_missing: yes + register: _subelements_missing_subkeys + +- debug: var=_subelements_missing_subkeys.skipped +- debug: var=_subelements_missing_subkeys.results|length +- name: verify with_subelements in subkeys results + assert: + that: + - _subelements_missing_subkeys.skipped is not defined + - _subelements_missing_subkeys.results|length == 2 + - "_xk == 'k'" + - "_xl == 'l'" + + # WITH_TOGETHER - name: test with_together diff --git a/test/integration/roles/test_iterators/vars/main.yml b/test/integration/roles/test_iterators/vars/main.yml index cd0078c9a9cf44..f7ef50f57a1b42 100644 --- a/test/integration/roles/test_iterators/vars/main.yml +++ b/test/integration/roles/test_iterators/vars/main.yml @@ -3,7 +3,41 @@ element_data: the_list: - "f" - "d" + the: + sub: + key: + list: + - "q" + - "r" - id: y the_list: - "e" - "f" + the: + sub: + key: + list: + - "i" + - "o" +element_data_missing: + - id: x + the_list: + - "f" + - "d" + the: + sub: + key: + list: + - "k" + - "l" + - id: y + the_list: + - "f" + - "d" + - id: z + the_list: + - "e" + - "f" + the: + sub: + key: From d0d0e9933f7a515bbb2c951ef106e3006fc29bb7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 12 May 2015 11:03:11 -0700 Subject: [PATCH 0587/3617] Update module refs in v2 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 42abf85be7acbd..2a6a79c3675b56 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 42abf85be7acbd95f6904a313c34a9495e99ca14 +Subproject commit 2a6a79c3675b56bf3a171feb1f310689c01e894e diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 6bf4558df8c61a..8afc822d0c6b89 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 6bf4558df8c61ae457dc7e5be58855d2931b607f +Subproject commit 8afc822d0c6b89eee710cf989612a3d2c137cb3c From b03b7892f8ca3f62371863da22542b38fdb5d3be Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 13:08:46 -0500 Subject: [PATCH 0588/3617] Fix method of exiting task loop (v2) --- lib/ansible/plugins/strategies/linear.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index bd510dc55742ff..f1efadd5476e74 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -226,7 +226,7 @@ def __repr__(self): # FIXME: this should also be moved to the base class in a method included_files = [] for res in host_results: - if res.is_failed(): + if res._host in self._tqm._failed_hosts: return 1 if res._task.action == 'include': From dcb54d9657882638a1ccd661d83d8400d9d47499 Mon Sep 17 00:00:00 2001 From: Jan Losinski Date: Tue, 12 May 2015 18:43:16 +0200 Subject: [PATCH 0589/3617] Add integration test to verify #10073 In issue #10073 a misbehaviour in literal handling for inline lookup arguments that can cause unexpected behaviur was reported. This integration testcase reproduce the problem. After applying pull request #10991 the issue is fixed and the test passes. Signed-off-by: Jan Losinski --- .../roles/test_lookups/tasks/main.yml | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 8440ff5772022b..f9970f70a29f70 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -129,3 +129,26 @@ debug: msg={{item}} with_items: things2 + +# BUG #10073 nested template handling + +- name: set variable that clashes + set_fact: + LOGNAME: foobar + + +- name: get LOGNAME environment var value + shell: echo {{ '$LOGNAME' }} + register: known_var_value + +- name: do the lookup for env LOGNAME + set_fact: + test_val: "{{ lookup('env', 'LOGNAME') }}" + +- debug: var=test_val + +- name: compare values + assert: + that: + - "test_val == known_var_value.stdout" + From 4d999f8fe014e3fd11f9fe2146f3c99f1e355e48 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 15:08:35 -0500 Subject: [PATCH 0590/3617] Fix logic error in parent attribute retrieval for blocks/roles (v2) --- lib/ansible/playbook/block.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 1bbc06183f2c82..a82aae1e67b545 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -274,15 +274,20 @@ def _get_parent_attribute(self, attr, extend=False): value = parent_value if self._role and (not value or extend): parent_value = getattr(self._role, attr) + if extend: + value = self._extend_value(value, parent_value) + else: + value = parent_value + if len(self._dep_chain) and (not value or extend): reverse_dep_chain = self._dep_chain[:] reverse_dep_chain.reverse() for dep in reverse_dep_chain: dep_value = getattr(dep, attr) if extend: - value = self._extend_value(value, parent_value) + value = self._extend_value(value, dep_value) else: - value = parent_value + value = dep_value if value and not extend: break From 830225d9c14b002babb9b8d10a3e1d7be31a97bd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 15:09:03 -0500 Subject: [PATCH 0591/3617] Fix errors in subelements lookup plugin and associated tests (v2) --- lib/ansible/plugins/lookup/subelements.py | 4 ++-- test/integration/roles/test_iterators/tasks/main.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py index 0636387be65e2c..b934a053ebf36d 100644 --- a/lib/ansible/plugins/lookup/subelements.py +++ b/lib/ansible/plugins/lookup/subelements.py @@ -33,8 +33,8 @@ def _raise_terms_error(msg=""): raise errors.AnsibleError( "subelements lookup expects a list of two or three items, " + msg) - terms = listify_lookup_plugin_terms(terms, self.basedir, inject) - terms[0] = listify_lookup_plugin_terms(terms[0], self.basedir, inject) + terms = listify_lookup_plugin_terms(terms, variables, loader=self._loader) + terms[0] = listify_lookup_plugin_terms(terms[0], variables, loader=self._loader) # check lookup terms - check number of terms if not isinstance(terms, list) or not 2 <= len(terms) <= 3: diff --git a/test/integration/roles/test_iterators/tasks/main.yml b/test/integration/roles/test_iterators/tasks/main.yml index 931e30458265b6..539ac2a4e77cf6 100644 --- a/test/integration/roles/test_iterators/tasks/main.yml +++ b/test/integration/roles/test_iterators/tasks/main.yml @@ -119,7 +119,7 @@ - skip_missing: yes register: _subelements_missing_subkeys -- debug: var=_subelements_missing_subkeys.skipped +- debug: var=_subelements_missing_subkeys - debug: var=_subelements_missing_subkeys.results|length - name: verify with_subelements in subkeys results assert: From 7b33f5c9522bce1bf6a0fd3b33e2f1a53b2f8ebd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 09:21:55 -0400 Subject: [PATCH 0592/3617] added circonus annotation --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dba043feb08b2..5538ca72eff8d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Deprecated Modules (new ones in parens): New Modules: * find * ec2_ami_find + * circonus_annotation * consul * consul_acl * consul_kv From c82574e044ad230e788151cf91b3dbc539fee9c9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 09:46:02 -0400 Subject: [PATCH 0593/3617] added cs_portforward --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5538ca72eff8d5..4f04d7f3da45f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ New Modules: * cloudstack: cs_iso * cloudstack: cs_instance * cloudstack: cs_instancegroup + * cloudstack: cs_portforward * cloudstack: cs_sshkeypair * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule From 079fca27a20aefef17d3b572f6934c3d1d4e0040 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 13 May 2015 06:57:04 -0700 Subject: [PATCH 0594/3617] Update module refs for v2 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 2a6a79c3675b56..46a553189331dc 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 2a6a79c3675b56bf3a171feb1f310689c01e894e +Subproject commit 46a553189331dcbe2017aa47345c1c10640263bc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8afc822d0c6b89..aa86c5ff9010a5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8afc822d0c6b89eee710cf989612a3d2c137cb3c +Subproject commit aa86c5ff9010a5201c8ee5ffd2b0045abfaba899 From 0a1dc74463bc680e4cc23d6a02fb08feddf6a1f9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 13 May 2015 07:52:13 -0700 Subject: [PATCH 0595/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 576ca33bdc968e..8ab439498c9c07 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 576ca33bdc968edb4fb303c41ca0157d85fd30ab +Subproject commit 8ab439498c9c079abf0ef54e69ddcf1acd8e6f3e diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e5022ba87b6c45..aa86c5ff9010a5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e5022ba87b6c45488b7d4e140df7f098495dba67 +Subproject commit aa86c5ff9010a5201c8ee5ffd2b0045abfaba899 From 3861597696e3504c78eb4f08172682c4816eca7d Mon Sep 17 00:00:00 2001 From: Aleksey Zhukov Date: Wed, 13 May 2015 18:12:48 +0300 Subject: [PATCH 0596/3617] Bring back cache --- plugins/inventory/digital_ocean.ini | 9 +- plugins/inventory/digital_ocean.py | 156 ++++++++++++++++++++++------ 2 files changed, 129 insertions(+), 36 deletions(-) diff --git a/plugins/inventory/digital_ocean.ini b/plugins/inventory/digital_ocean.ini index c4e3fe2141946b..021899731c45db 100644 --- a/plugins/inventory/digital_ocean.ini +++ b/plugins/inventory/digital_ocean.ini @@ -3,12 +3,11 @@ [digital_ocean] -# The module needs your DigitalOcean Client ID and API Key. -# These may also be specified on the command line via --client-id and --api-key -# or via the environment variables DO_CLIENT_ID and DO_API_KEY +# The module needs your DigitalOcean API Token. +# It may also be specified on the command line via --api-token +# or via the environment variables DO_API_TOKEN or DO_API_KEY # -#client_id = abcdefg123456 -#api_key = 123456abcdefg +#api_token = 123456abcdefg # API calls to DigitalOcean may be slow. For this reason, we cache the results diff --git a/plugins/inventory/digital_ocean.py b/plugins/inventory/digital_ocean.py index 29c4856efb5515..9bfb184d578923 100755 --- a/plugins/inventory/digital_ocean.py +++ b/plugins/inventory/digital_ocean.py @@ -24,12 +24,12 @@ Configuration is read from `digital_ocean.ini`, then from environment variables, then and command-line arguments. -Most notably, the DigitalOcean Client ID and API Key must be specified. They -can be specified in the INI file or with the following environment variables: - export DO_CLIENT_ID='DO123' DO_API_KEY='abc123' +Most notably, the DigitalOcean API Token must be specified. It can be specified +in the INI file or with the following environment variables: + export DO_API_TOKEN='abc123' or + export DO_API_KEY='abc123' -Alternatively, they can be passed on the command-line with --client-id and ---api-key. +Alternatively, it can be passed on the command-line with --api-token. If you specify DigitalOcean credentials in the INI file, a handy way to get them into your environment (e.g., to use the digital_ocean module) @@ -43,31 +43,40 @@ - image_ID - image_NAME - distro_NAME (distribution NAME from image) - - region_ID - region_NAME - - size_ID - size_NAME - status_STATUS When run against a specific host, this script returns the following variables: + - do_backup_ids - do_created_at - - do_distroy + - do_disk + - do_features - list - do_id - - do_image - - do_image_id + - do_image - object - do_ip_address + - do_kernel - object + - do_locked + - de_memory - do_name - - do_region - - do_region_id - - do_size - - do_size_id + - do_networks - object + - do_next_backup_window + - do_region - object + - do_size - object + - do_size_slug + - do_snapshot_ids - list - do_status + - do_vcpus ----- ``` usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] [--regions] [--images] [--sizes] [--ssh-keys] [--domains] [--pretty] + [--cache-path CACHE_PATH] + [--cache-max_age CACHE_MAX_AGE] + [--force-cache] + [--refresh-cache] [--api-token API_TOKEN] Produce an Ansible Inventory file based on DigitalOcean credentials @@ -86,6 +95,13 @@ --ssh-keys List SSH keys as JSON --domains List Domains as JSON --pretty, -p Pretty-print results + --cache-path CACHE_PATH + Path to the cache files (default: .) + --cache-max_age CACHE_MAX_AGE + Maximum age of the cached items (default: 0) + --force-cache Only use data from the cache + --refresh-cache Force refresh of cache by making API requests to + DigitalOcean (default: False - use cache files) --api-token API_TOKEN, -a API_TOKEN DigitalOcean API Token ``` @@ -147,6 +163,10 @@ def __init__(self): self.data = {} # All DigitalOcean data self.inventory = {} # Ansible Inventory + # Define defaults + self.cache_path = '.' + self.cache_max_age = 0 + # Read settings, environment variables, and CLI arguments self.read_settings() self.read_environment() @@ -164,27 +184,45 @@ def __init__(self): print "DO_API_TOKEN=%s" % self.api_token sys.exit(0) + # Manage cache + self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" + self.cache_refreshed = False + + if self.is_cache_valid: + self.load_from_cache() + if len(self.data) == 0: + if self.args.force_cache: + print '''Cache is empty and --force-cache was specified''' + sys.exit(-1) + self.manager = DoManager(None, self.api_token, api_version=2) # Pick the json_data to print based on the CLI command if self.args.droplets: - json_data = self.load_from_digital_ocean('droplets') + self.load_from_digital_ocean('droplets') + json_data = {'droplets': self.data['droplets']} elif self.args.regions: - json_data = self.load_from_digital_ocean('regions') + self.load_from_digital_ocean('regions') + json_data = {'regions': self.data['regions']} elif self.args.images: - json_data = self.load_from_digital_ocean('images') + self.load_from_digital_ocean('images') + json_data = {'images': self.data['images']} elif self.args.sizes: - json_data = self.load_from_digital_ocean('sizes') + self.load_from_digital_ocean('sizes') + json_data = {'sizes': self.data['sizes']} elif self.args.ssh_keys: - json_data = self.load_from_digital_ocean('ssh_keys') + self.load_from_digital_ocean('ssh_keys') + json_data = {'ssh_keys': self.data['ssh_keys']} elif self.args.domains: - json_data = self.load_from_digital_ocean('domains') + self.load_from_digital_ocean('domains') + json_data = {'domains': self.data['domains']} elif self.args.all: - json_data = self.load_from_digital_ocean() + self.load_from_digital_ocean() + json_data = self.data elif self.args.host: json_data = self.load_droplet_variables_for_host() else: # '--list' this is last to make it default - self.data = self.load_from_digital_ocean('droplets') + self.load_from_digital_ocean('droplets') self.build_inventory() json_data = self.inventory @@ -241,6 +279,12 @@ def read_cli_args(self): parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results') + parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') + parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') + parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') + parser.add_argument('--refresh-cache','-r', action='store_true', default=False, + help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') + parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN') parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token') @@ -263,20 +307,25 @@ def read_cli_args(self): def load_from_digital_ocean(self, resource=None): '''Get JSON from DigitalOcean API''' - json_data = {} + if self.args.force_cache: + return + if self.args.refresh_cache: + resource=None + if resource == 'droplets' or resource is None: - json_data['droplets'] = self.manager.all_active_droplets() + self.data['droplets'] = self.manager.all_active_droplets() if resource == 'regions' or resource is None: - json_data['regions'] = self.manager.all_regions() + self.data['regions'] = self.manager.all_regions() if resource == 'images' or resource is None: - json_data['images'] = self.manager.all_images(filter=None) + self.data['images'] = self.manager.all_images(filter=None) if resource == 'sizes' or resource is None: - json_data['sizes'] = self.manager.sizes() + self.data['sizes'] = self.manager.sizes() if resource == 'ssh_keys' or resource is None: - json_data['ssh_keys'] = self.manager.all_ssh_keys() + self.data['ssh_keys'] = self.manager.all_ssh_keys() if resource == 'domains' or resource is None: - json_data['domains'] = self.manager.all_domains() - return json_data + self.data['domains'] = self.manager.all_domains() + + self.write_to_cache() def build_inventory(self): @@ -309,8 +358,53 @@ def load_droplet_variables_for_host(self): '''Generate a JSON response to a --host call''' host = int(self.args.host) - return self.manager.show_droplet(host) + droplet = self.manager.show_droplet(host) + + # Put all the information in a 'do_' namespace + info = {} + for k, v in droplet.items(): + info['do_'+k] = v + + return {'droplet': info} + + + + ########################################################################### + # Cache Management + ########################################################################### + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + if os.path.isfile(self.cache_filename): + mod_time = os.path.getmtime(self.cache_filename) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + return False + + + def load_from_cache(self): + ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' + try: + cache = open(self.cache_filename, 'r') + json_data = cache.read() + cache.close() + data = json.loads(json_data) + except IOError: + data = {'data': {}, 'inventory': {}} + + self.data = data['data'] + self.inventory = data['inventory'] + + + def write_to_cache(self): + ''' Writes data in JSON format to a file ''' + data = { 'data': self.data, 'inventory': self.inventory } + json_data = json.dumps(data, sort_keys=True, indent=2) + + cache = open(self.cache_filename, 'w') + cache.write(json_data) + cache.close() ########################################################################### From 892fba265bda111ab667cf3d3a046be946106932 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 13 May 2015 08:15:12 -0700 Subject: [PATCH 0597/3617] Update to fix documentation build --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index aa86c5ff9010a5..e3373ffc46d5b3 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit aa86c5ff9010a5201c8ee5ffd2b0045abfaba899 +Subproject commit e3373ffc46d5b318222a6dd71d6790bcdecb43be From b85ce3883451e20c7869dce39d795ba6cf62ed08 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 11:15:04 -0400 Subject: [PATCH 0598/3617] slight changes to error handling to align with v1 --- bin/ansible | 18 ++++++++++++++---- lib/ansible/cli/adhoc.py | 2 +- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/bin/ansible b/bin/ansible index 467dd505a2e17a..12ad89fcff3797 100755 --- a/bin/ansible +++ b/bin/ansible @@ -35,7 +35,7 @@ except Exception: import os import sys -from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.utils.display import Display ######################################################## @@ -70,10 +70,20 @@ if __name__ == '__main__': except AnsibleOptionsError as e: cli.parser.print_help() display.display(str(e), stderr=True, color='red') - sys.exit(1) + sys.exit(5) + except AnsibleParserError as e: + display.display(str(e), stderr=True, color='red') + sys.exit(4) +# TQM takes care of these, but leaving comment to reserve the exit codes +# except AnsibleHostUnreachable as e: +# display.display(str(e), stderr=True, color='red') +# sys.exit(3) +# except AnsibleHostFailed as e: +# display.display(str(e), stderr=True, color='red') +# sys.exit(2) except AnsibleError as e: display.display(str(e), stderr=True, color='red') - sys.exit(2) + sys.exit(1) except KeyboardInterrupt: display.error("interrupted") - sys.exit(4) + sys.exit(99) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index f7692a13351d04..9a055e5e625c43 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -105,7 +105,7 @@ def run(self): return 0 if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args: - raise AnsibleError("No argument passed to %s module" % self.options.module_name) + raise AnsibleOptionsError("No argument passed to %s module" % self.options.module_name) #TODO: implement async support #if self.options.seconds: From b94e2a1f4ee1631d311f6943f6653c391d5022de Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 13 May 2015 11:27:12 -0500 Subject: [PATCH 0599/3617] Fixing bugs related to parsing and fixing up parsing integration tests (v2) --- lib/ansible/parsing/mod_args.py | 18 +++++++++++++---- lib/ansible/plugins/strategies/__init__.py | 2 +- test/integration/Makefile | 10 +++++----- .../roles/test_good_parsing/tasks/main.yml | 20 +++++++++---------- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index ed527f1b08fbaf..87b3813d8f0410 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -264,13 +264,23 @@ def parse(self): thing = value action, args = self._normalize_parameters(value, action=action, additional_args=additional_args) + # FIXME: this should probably be somewhere else + RAW_PARAM_MODULES = ( + 'command', + 'shell', + 'script', + 'include', + 'include_vars', + 'add_host', + 'group_by', + 'set_fact', + 'meta', + ) # if we didn't see any module in the task at all, it's not a task really if action is None: raise AnsibleParserError("no action detected in task", obj=self._task_ds) - # FIXME: disabled for now, as there are other places besides the shell/script modules where - # having variables as the sole param for the module is valid (include_vars, add_host, and group_by?) - #elif args.get('_raw_params', '') != '' and action not in ('command', 'shell', 'script', 'include_vars'): - # raise AnsibleParserError("this task has extra params, which is only allowed in the command, shell or script module.", obj=self._task_ds) + elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES: + raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES)), obj=self._task_ds) # shell modules require special handling (action, args) = self._handle_shell_weirdness(action, args) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index f6103343712f5c..a3668ba089a8ab 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -335,7 +335,7 @@ def _load_included_file(self, included_file): # set the vars for this task from those specified as params to the include for b in block_list: - b._vars = included_file._args.copy() + b.vars = included_file._args.copy() return block_list diff --git a/test/integration/Makefile b/test/integration/Makefile index 28de76c7cdf759..3ee38b0ab79d76 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -24,11 +24,11 @@ CONSUL_RUNNING := $(shell python consul_running.py) all: parsing test_var_precedence unicode test_templating_settings non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault test_tags parsing: - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 3 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 3 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 4 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 4 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 4 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 4 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 4 ] ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) includes: diff --git a/test/integration/roles/test_good_parsing/tasks/main.yml b/test/integration/roles/test_good_parsing/tasks/main.yml index 27475ce0f53ac1..482d0efac5d69a 100644 --- a/test/integration/roles/test_good_parsing/tasks/main.yml +++ b/test/integration/roles/test_good_parsing/tasks/main.yml @@ -152,17 +152,17 @@ that: - complex_param == "this is a param in a complex arg with double quotes" -- name: test variable module name - action: "{{ variable_module_name }} msg='this should be debugged'" - register: result - -- debug: var=result +#- name: test variable module name +# action: "{{ variable_module_name }} msg='this should be debugged'" +# register: result +# +#- debug: var=result -- name: assert the task with variable module name ran - assert: - that: - - result.invocation.module_name == "debug" - - result.msg == "this should be debugged" +#- name: assert the task with variable module name ran +# assert: +# that: +# - result.invocation.module_name == "debug" +# - result.msg == "this should be debugged" - name: test conditional includes include: test_include_conditional.yml From bbda86ad0a43183236e58c44a63db93b9631deac Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 13 May 2015 11:04:12 -0700 Subject: [PATCH 0600/3617] Fix parsing tests so that they all run --- .../roles/test_bad_parsing/tasks/main.yml | 20 ++++++++----------- .../test_bad_parsing/tasks/scenario1.yml | 5 +++++ .../test_bad_parsing/tasks/scenario2.yml | 5 +++++ .../test_bad_parsing/tasks/scenario3.yml | 5 +++++ .../test_bad_parsing/tasks/scenario4.yml | 5 +++++ 5 files changed, 28 insertions(+), 12 deletions(-) create mode 100644 test/integration/roles/test_bad_parsing/tasks/scenario1.yml create mode 100644 test/integration/roles/test_bad_parsing/tasks/scenario2.yml create mode 100644 test/integration/roles/test_bad_parsing/tasks/scenario3.yml create mode 100644 test/integration/roles/test_bad_parsing/tasks/scenario4.yml diff --git a/test/integration/roles/test_bad_parsing/tasks/main.yml b/test/integration/roles/test_bad_parsing/tasks/main.yml index 3899821de6f695..4636383d9eb204 100644 --- a/test/integration/roles/test_bad_parsing/tasks/main.yml +++ b/test/integration/roles/test_bad_parsing/tasks/main.yml @@ -29,24 +29,20 @@ - file: name={{test_file}} state=touch tags: common -- name: test that we cannot insert arguments - file: path={{ test_file }} {{ test_input }} - failed_when: False # ignore the module, just test the parser +- name: include test that we cannot insert arguments + include: scenario1.yml tags: scenario1 -- name: test that we cannot duplicate arguments - file: path={{ test_file }} owner=test2 {{ test_input }} - failed_when: False # ignore the module, just test the parser +- name: include test that we cannot duplicate arguments + include: scenario2.yml tags: scenario2 -- name: test that we can't do this for the shell module - shell: echo hi {{ chdir }} - failed_when: False +- name: include test that we can't do this for the shell module + include: scneario3.yml tags: scenario3 -- name: test that we can't go all Little Bobby Droptables on a quoted var to add more - file: "name={{ bad_var }}" - failed_when: False +- name: include test that we can't go all Little Bobby Droptables on a quoted var to add more + include: scenario4.yml tags: scenario4 - name: test that a missing/malformed jinja2 filter fails diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario1.yml b/test/integration/roles/test_bad_parsing/tasks/scenario1.yml new file mode 100644 index 00000000000000..dab20be749ff74 --- /dev/null +++ b/test/integration/roles/test_bad_parsing/tasks/scenario1.yml @@ -0,0 +1,5 @@ +- name: test that we cannot insert arguments + file: path={{ test_file }} {{ test_input }} + failed_when: False # ignore the module, just test the parser + tags: scenario1 + diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario2.yml b/test/integration/roles/test_bad_parsing/tasks/scenario2.yml new file mode 100644 index 00000000000000..4f14f81b233db4 --- /dev/null +++ b/test/integration/roles/test_bad_parsing/tasks/scenario2.yml @@ -0,0 +1,5 @@ +- name: test that we cannot duplicate arguments + file: path={{ test_file }} owner=test2 {{ test_input }} + failed_when: False # ignore the module, just test the parser + tags: scenario2 + diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario3.yml b/test/integration/roles/test_bad_parsing/tasks/scenario3.yml new file mode 100644 index 00000000000000..cd4da7babaf37b --- /dev/null +++ b/test/integration/roles/test_bad_parsing/tasks/scenario3.yml @@ -0,0 +1,5 @@ +- name: test that we can't do this for the shell module + shell: echo hi {{ chdir }} + failed_when: False + tags: scenario3 + diff --git a/test/integration/roles/test_bad_parsing/tasks/scenario4.yml b/test/integration/roles/test_bad_parsing/tasks/scenario4.yml new file mode 100644 index 00000000000000..9ed1eae0b53558 --- /dev/null +++ b/test/integration/roles/test_bad_parsing/tasks/scenario4.yml @@ -0,0 +1,5 @@ +- name: test that we can't go all Little Bobby Droptables on a quoted var to add more + file: "name={{ bad_var }}" + failed_when: False + tags: scenario4 + From b91ce29007ff24c73a786afb80b721b6d8778362 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 13 May 2015 12:52:51 -0700 Subject: [PATCH 0601/3617] Go to next task when we get an error in linear --- lib/ansible/plugins/strategies/linear.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index f1efadd5476e74..ec829c8996a38f 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -280,6 +280,7 @@ def __repr__(self): iterator.mark_host_failed(host) # FIXME: callback here? print(e) + continue for new_block in new_blocks: noop_block = Block(parent_block=task._block) From 46d7f5281a155d54cea5051e432b4c687636b9f7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 13 May 2015 20:05:47 -0400 Subject: [PATCH 0602/3617] added pushbullet to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f04d7f3da45f9..1bfc7780e72061 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ New Modules: * openstack: os_subnet * openstack: os_volume * pushover + * pushbullet * zabbix_host * zabbix_hostmacro * zabbix_screen From b7d644d484c11f6af4134af021b9d05037a48193 Mon Sep 17 00:00:00 2001 From: Aleksey Zhukov Date: Thu, 14 May 2015 09:42:48 +0300 Subject: [PATCH 0603/3617] Fix broken cache logic --- plugins/inventory/digital_ocean.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/digital_ocean.py b/plugins/inventory/digital_ocean.py index 9bfb184d578923..1323a384ba9a6e 100755 --- a/plugins/inventory/digital_ocean.py +++ b/plugins/inventory/digital_ocean.py @@ -226,6 +226,9 @@ def __init__(self): self.build_inventory() json_data = self.inventory + if self.cache_refreshed: + self.write_to_cache() + if self.args.pretty: print json.dumps(json_data, sort_keys=True, indent=2) else: @@ -309,23 +312,30 @@ def load_from_digital_ocean(self, resource=None): '''Get JSON from DigitalOcean API''' if self.args.force_cache: return + # We always get fresh droplets + if self.is_cache_valid() and not (resource=='droplets' or resource is None): + return if self.args.refresh_cache: resource=None if resource == 'droplets' or resource is None: self.data['droplets'] = self.manager.all_active_droplets() + self.cache_refreshed = True if resource == 'regions' or resource is None: self.data['regions'] = self.manager.all_regions() + self.cache_refreshed = True if resource == 'images' or resource is None: self.data['images'] = self.manager.all_images(filter=None) + self.cache_refreshed = True if resource == 'sizes' or resource is None: self.data['sizes'] = self.manager.sizes() + self.cache_refreshed = True if resource == 'ssh_keys' or resource is None: self.data['ssh_keys'] = self.manager.all_ssh_keys() + self.cache_refreshed = True if resource == 'domains' or resource is None: self.data['domains'] = self.manager.all_domains() - - self.write_to_cache() + self.cache_refreshed = True def build_inventory(self): From fa1549fec186547cf60dc6574d5bd6263d26233d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Strahinja=20Kustudi=C4=87?= Date: Thu, 14 May 2015 12:24:36 +0200 Subject: [PATCH 0604/3617] Fixed documentation for host pattern portions --- docsite/rst/intro_patterns.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_patterns.rst b/docsite/rst/intro_patterns.rst index 7830c97c491e67..579276a3af7db7 100644 --- a/docsite/rst/intro_patterns.rst +++ b/docsite/rst/intro_patterns.rst @@ -74,7 +74,7 @@ As an advanced usage, you can also select the numbered server in a group:: Or a portion of servers in a group:: - webservers[0:25] + webservers[0-25] Most people don't specify patterns as regular expressions, but you can. Just start the pattern with a '~':: From 14719a6f08eb67d36d36acb2d3ce0ec3885047a3 Mon Sep 17 00:00:00 2001 From: Chen Zhidong Date: Thu, 14 May 2015 22:02:30 +0800 Subject: [PATCH 0605/3617] Add judgment to to fix path0 if ANSIBLE_CONFIG is set to a dir --- lib/ansible/constants.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 089de5b7c5bf15..d09a8da5ca3688 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -65,6 +65,8 @@ def load_config_file(): path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: path0 = os.path.expanduser(path0) + if os.path.isdir(path0): + path0 += "/ansible.cfg" path1 = os.getcwd() + "/ansible.cfg" path2 = os.path.expanduser("~/.ansible.cfg") path3 = "/etc/ansible/ansible.cfg" From a0509cda1ea6d05ed339a14f18697864f929ffcd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 14 May 2015 14:31:11 -0500 Subject: [PATCH 0606/3617] Fix test_role unit tests to use unique role names to avoid role caching errors --- test/units/playbook/test_role.py | 62 ++++++++++++++++---------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/test/units/playbook/test_role.py b/test/units/playbook/test_role.py index 7aab5133da2eba..031871ce32931b 100644 --- a/test/units/playbook/test_role.py +++ b/test/units/playbook/test_role.py @@ -41,28 +41,28 @@ def tearDown(self): def test_load_role_with_tasks(self): fake_loader = DictDataLoader({ - "/etc/ansible/roles/foo/tasks/main.yml": """ + "/etc/ansible/roles/foo_tasks/tasks/main.yml": """ - shell: echo 'hello world' """, }) - i = RoleInclude.load('foo', loader=fake_loader) + i = RoleInclude.load('foo_tasks', loader=fake_loader) r = Role.load(i) - self.assertEqual(str(r), 'foo') + self.assertEqual(str(r), 'foo_tasks') self.assertEqual(len(r._task_blocks), 1) assert isinstance(r._task_blocks[0], Block) def test_load_role_with_handlers(self): fake_loader = DictDataLoader({ - "/etc/ansible/roles/foo/handlers/main.yml": """ + "/etc/ansible/roles/foo_handlers/handlers/main.yml": """ - name: test handler shell: echo 'hello world' """, }) - i = RoleInclude.load('foo', loader=fake_loader) + i = RoleInclude.load('foo_handlers', loader=fake_loader) r = Role.load(i) self.assertEqual(len(r._handler_blocks), 1) @@ -71,15 +71,15 @@ def test_load_role_with_handlers(self): def test_load_role_with_vars(self): fake_loader = DictDataLoader({ - "/etc/ansible/roles/foo/defaults/main.yml": """ + "/etc/ansible/roles/foo_vars/defaults/main.yml": """ foo: bar """, - "/etc/ansible/roles/foo/vars/main.yml": """ + "/etc/ansible/roles/foo_vars/vars/main.yml": """ foo: bam """, }) - i = RoleInclude.load('foo', loader=fake_loader) + i = RoleInclude.load('foo_vars', loader=fake_loader) r = Role.load(i) self.assertEqual(r._default_vars, dict(foo='bar')) @@ -88,41 +88,41 @@ def test_load_role_with_vars(self): def test_load_role_with_metadata(self): fake_loader = DictDataLoader({ - '/etc/ansible/roles/foo/meta/main.yml': """ + '/etc/ansible/roles/foo_metadata/meta/main.yml': """ allow_duplicates: true dependencies: - - bar + - bar_metadata galaxy_info: a: 1 b: 2 c: 3 """, - '/etc/ansible/roles/bar/meta/main.yml': """ + '/etc/ansible/roles/bar_metadata/meta/main.yml': """ dependencies: - - baz + - baz_metadata """, - '/etc/ansible/roles/baz/meta/main.yml': """ + '/etc/ansible/roles/baz_metadata/meta/main.yml': """ dependencies: - - bam + - bam_metadata """, - '/etc/ansible/roles/bam/meta/main.yml': """ + '/etc/ansible/roles/bam_metadata/meta/main.yml': """ dependencies: [] """, - '/etc/ansible/roles/bad1/meta/main.yml': """ + '/etc/ansible/roles/bad1_metadata/meta/main.yml': """ 1 """, - '/etc/ansible/roles/bad2/meta/main.yml': """ + '/etc/ansible/roles/bad2_metadata/meta/main.yml': """ foo: bar """, - '/etc/ansible/roles/recursive1/meta/main.yml': """ - dependencies: ['recursive2'] + '/etc/ansible/roles/recursive1_metadata/meta/main.yml': """ + dependencies: ['recursive2_metadata'] """, - '/etc/ansible/roles/recursive2/meta/main.yml': """ - dependencies: ['recursive1'] + '/etc/ansible/roles/recursive2_metadata/meta/main.yml': """ + dependencies: ['recursive1_metadata'] """, }) - i = RoleInclude.load('foo', loader=fake_loader) + i = RoleInclude.load('foo_metadata', loader=fake_loader) r = Role.load(i) role_deps = r.get_direct_dependencies() @@ -136,17 +136,17 @@ def test_load_role_with_metadata(self): all_deps = r.get_all_dependencies() self.assertEqual(len(all_deps), 3) - self.assertEqual(all_deps[0].get_name(), 'bar') - self.assertEqual(all_deps[1].get_name(), 'baz') - self.assertEqual(all_deps[2].get_name(), 'bam') + self.assertEqual(all_deps[0].get_name(), 'bam_metadata') + self.assertEqual(all_deps[1].get_name(), 'baz_metadata') + self.assertEqual(all_deps[2].get_name(), 'bar_metadata') - i = RoleInclude.load('bad1', loader=fake_loader) + i = RoleInclude.load('bad1_metadata', loader=fake_loader) self.assertRaises(AnsibleParserError, Role.load, i) - i = RoleInclude.load('bad2', loader=fake_loader) + i = RoleInclude.load('bad2_metadata', loader=fake_loader) self.assertRaises(AnsibleParserError, Role.load, i) - i = RoleInclude.load('recursive1', loader=fake_loader) + i = RoleInclude.load('recursive1_metadata', loader=fake_loader) self.assertRaises(AnsibleError, Role.load, i) def test_load_role_complex(self): @@ -155,13 +155,13 @@ def test_load_role_complex(self): # params and tags/when statements fake_loader = DictDataLoader({ - "/etc/ansible/roles/foo/tasks/main.yml": """ + "/etc/ansible/roles/foo_complex/tasks/main.yml": """ - shell: echo 'hello world' """, }) - i = RoleInclude.load(dict(role='foo'), loader=fake_loader) + i = RoleInclude.load(dict(role='foo_complex'), loader=fake_loader) r = Role.load(i) - self.assertEqual(r.get_name(), "foo") + self.assertEqual(r.get_name(), "foo_complex") From ae9ba4afa1044071227a37268700c4acf897f68e Mon Sep 17 00:00:00 2001 From: Leonid Evdokimov Date: Thu, 3 Jul 2014 10:32:31 +0400 Subject: [PATCH 0607/3617] uri: provide raw_content, parse json without double-decoding. Fixes #7586 Regression potential: - `raw_content` is written to `dest` file instead of decoded `content` - `raw_content` doubles module reply --- test/integration/roles/test_uri/files/README | 9 ++ .../roles/test_uri/files/fail0.json | 1 + .../roles/test_uri/files/fail1.json | 1 + .../roles/test_uri/files/fail10.json | 1 + .../roles/test_uri/files/fail11.json | 1 + .../roles/test_uri/files/fail12.json | 1 + .../roles/test_uri/files/fail13.json | 1 + .../roles/test_uri/files/fail14.json | 1 + .../roles/test_uri/files/fail15.json | 1 + .../roles/test_uri/files/fail16.json | 1 + .../roles/test_uri/files/fail17.json | 1 + .../roles/test_uri/files/fail18.json | 1 + .../roles/test_uri/files/fail19.json | 1 + .../roles/test_uri/files/fail2.json | 1 + .../roles/test_uri/files/fail20.json | 1 + .../roles/test_uri/files/fail21.json | 1 + .../roles/test_uri/files/fail22.json | 1 + .../roles/test_uri/files/fail23.json | 1 + .../roles/test_uri/files/fail24.json | 1 + .../roles/test_uri/files/fail25.json | 1 + .../roles/test_uri/files/fail26.json | 2 + .../roles/test_uri/files/fail27.json | 2 + .../roles/test_uri/files/fail28.json | 1 + .../roles/test_uri/files/fail29.json | 1 + .../roles/test_uri/files/fail3.json | 1 + .../roles/test_uri/files/fail30.json | 1 + .../roles/test_uri/files/fail4.json | 1 + .../roles/test_uri/files/fail5.json | 1 + .../roles/test_uri/files/fail6.json | 1 + .../roles/test_uri/files/fail7.json | 1 + .../roles/test_uri/files/fail8.json | 1 + .../roles/test_uri/files/fail9.json | 1 + .../roles/test_uri/files/pass0.json | 58 +++++++++ .../roles/test_uri/files/pass1.json | 1 + .../roles/test_uri/files/pass2.json | 6 + .../roles/test_uri/files/pass3.json | 1 + .../roles/test_uri/files/pass4.json | 1 + .../roles/test_uri/handlers/main.yml | 3 + test/integration/roles/test_uri/meta/main.yml | 2 + .../integration/roles/test_uri/tasks/main.yml | 120 ++++++++++++++++++ 40 files changed, 234 insertions(+) create mode 100644 test/integration/roles/test_uri/files/README create mode 100644 test/integration/roles/test_uri/files/fail0.json create mode 100644 test/integration/roles/test_uri/files/fail1.json create mode 100644 test/integration/roles/test_uri/files/fail10.json create mode 100644 test/integration/roles/test_uri/files/fail11.json create mode 100644 test/integration/roles/test_uri/files/fail12.json create mode 100644 test/integration/roles/test_uri/files/fail13.json create mode 100644 test/integration/roles/test_uri/files/fail14.json create mode 100644 test/integration/roles/test_uri/files/fail15.json create mode 100644 test/integration/roles/test_uri/files/fail16.json create mode 100644 test/integration/roles/test_uri/files/fail17.json create mode 100644 test/integration/roles/test_uri/files/fail18.json create mode 100644 test/integration/roles/test_uri/files/fail19.json create mode 100644 test/integration/roles/test_uri/files/fail2.json create mode 100644 test/integration/roles/test_uri/files/fail20.json create mode 100644 test/integration/roles/test_uri/files/fail21.json create mode 100644 test/integration/roles/test_uri/files/fail22.json create mode 100644 test/integration/roles/test_uri/files/fail23.json create mode 100644 test/integration/roles/test_uri/files/fail24.json create mode 100644 test/integration/roles/test_uri/files/fail25.json create mode 100644 test/integration/roles/test_uri/files/fail26.json create mode 100644 test/integration/roles/test_uri/files/fail27.json create mode 100644 test/integration/roles/test_uri/files/fail28.json create mode 100644 test/integration/roles/test_uri/files/fail29.json create mode 100644 test/integration/roles/test_uri/files/fail3.json create mode 100644 test/integration/roles/test_uri/files/fail30.json create mode 100644 test/integration/roles/test_uri/files/fail4.json create mode 100644 test/integration/roles/test_uri/files/fail5.json create mode 100644 test/integration/roles/test_uri/files/fail6.json create mode 100644 test/integration/roles/test_uri/files/fail7.json create mode 100644 test/integration/roles/test_uri/files/fail8.json create mode 100644 test/integration/roles/test_uri/files/fail9.json create mode 100644 test/integration/roles/test_uri/files/pass0.json create mode 100644 test/integration/roles/test_uri/files/pass1.json create mode 100644 test/integration/roles/test_uri/files/pass2.json create mode 100644 test/integration/roles/test_uri/files/pass3.json create mode 100644 test/integration/roles/test_uri/files/pass4.json create mode 100644 test/integration/roles/test_uri/handlers/main.yml create mode 100644 test/integration/roles/test_uri/meta/main.yml create mode 100644 test/integration/roles/test_uri/tasks/main.yml diff --git a/test/integration/roles/test_uri/files/README b/test/integration/roles/test_uri/files/README new file mode 100644 index 00000000000000..ef7791262b4285 --- /dev/null +++ b/test/integration/roles/test_uri/files/README @@ -0,0 +1,9 @@ +The files were taken from http://www.json.org/JSON_checker/ +> If the JSON_checker is working correctly, it must accept all of the pass*.json files and reject all of the fail*.json files. + +Difference with JSON_checker dataset: + - *${n}.json renamed to *${n-1}.json to be 0-based + - fail0.json renamed to pass3.json as python json module allows JSON payload to be string + - fail17.json renamed to pass4.json as python json module has no problems with deep structures + - fail32.json renamed to fail0.json to fill gap + - fail31.json renamed to fail17.json to fill gap diff --git a/test/integration/roles/test_uri/files/fail0.json b/test/integration/roles/test_uri/files/fail0.json new file mode 100644 index 00000000000000..ca5eb19dc97f5c --- /dev/null +++ b/test/integration/roles/test_uri/files/fail0.json @@ -0,0 +1 @@ +["mismatch"} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail1.json b/test/integration/roles/test_uri/files/fail1.json new file mode 100644 index 00000000000000..6b7c11e5a56537 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail1.json @@ -0,0 +1 @@ +["Unclosed array" \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail10.json b/test/integration/roles/test_uri/files/fail10.json new file mode 100644 index 00000000000000..76eb95b4583c8e --- /dev/null +++ b/test/integration/roles/test_uri/files/fail10.json @@ -0,0 +1 @@ +{"Illegal expression": 1 + 2} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail11.json b/test/integration/roles/test_uri/files/fail11.json new file mode 100644 index 00000000000000..77580a4522d8c7 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail11.json @@ -0,0 +1 @@ +{"Illegal invocation": alert()} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail12.json b/test/integration/roles/test_uri/files/fail12.json new file mode 100644 index 00000000000000..379406b59bdb94 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail12.json @@ -0,0 +1 @@ +{"Numbers cannot have leading zeroes": 013} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail13.json b/test/integration/roles/test_uri/files/fail13.json new file mode 100644 index 00000000000000..0ed366b38a34f5 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail13.json @@ -0,0 +1 @@ +{"Numbers cannot be hex": 0x14} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail14.json b/test/integration/roles/test_uri/files/fail14.json new file mode 100644 index 00000000000000..fc8376b605da69 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail14.json @@ -0,0 +1 @@ +["Illegal backslash escape: \x15"] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail15.json b/test/integration/roles/test_uri/files/fail15.json new file mode 100644 index 00000000000000..3fe21d4b532498 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail15.json @@ -0,0 +1 @@ +[\naked] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail16.json b/test/integration/roles/test_uri/files/fail16.json new file mode 100644 index 00000000000000..62b9214aeda6d7 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail16.json @@ -0,0 +1 @@ +["Illegal backslash escape: \017"] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail17.json b/test/integration/roles/test_uri/files/fail17.json new file mode 100644 index 00000000000000..45cba7396ff746 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail17.json @@ -0,0 +1 @@ +{"Comma instead if closing brace": true, \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail18.json b/test/integration/roles/test_uri/files/fail18.json new file mode 100644 index 00000000000000..3b9c46fa9a296c --- /dev/null +++ b/test/integration/roles/test_uri/files/fail18.json @@ -0,0 +1 @@ +{"Missing colon" null} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail19.json b/test/integration/roles/test_uri/files/fail19.json new file mode 100644 index 00000000000000..27c1af3e72ee37 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail19.json @@ -0,0 +1 @@ +{"Double colon":: null} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail2.json b/test/integration/roles/test_uri/files/fail2.json new file mode 100644 index 00000000000000..168c81eb78537e --- /dev/null +++ b/test/integration/roles/test_uri/files/fail2.json @@ -0,0 +1 @@ +{unquoted_key: "keys must be quoted"} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail20.json b/test/integration/roles/test_uri/files/fail20.json new file mode 100644 index 00000000000000..62474573b2160a --- /dev/null +++ b/test/integration/roles/test_uri/files/fail20.json @@ -0,0 +1 @@ +{"Comma instead of colon", null} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail21.json b/test/integration/roles/test_uri/files/fail21.json new file mode 100644 index 00000000000000..a7752581bcf7f3 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail21.json @@ -0,0 +1 @@ +["Colon instead of comma": false] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail22.json b/test/integration/roles/test_uri/files/fail22.json new file mode 100644 index 00000000000000..494add1ca190e1 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail22.json @@ -0,0 +1 @@ +["Bad value", truth] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail23.json b/test/integration/roles/test_uri/files/fail23.json new file mode 100644 index 00000000000000..caff239bfc3629 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail23.json @@ -0,0 +1 @@ +['single quote'] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail24.json b/test/integration/roles/test_uri/files/fail24.json new file mode 100644 index 00000000000000..8b7ad23e010314 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail24.json @@ -0,0 +1 @@ +[" tab character in string "] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail25.json b/test/integration/roles/test_uri/files/fail25.json new file mode 100644 index 00000000000000..845d26a6a54398 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail25.json @@ -0,0 +1 @@ +["tab\ character\ in\ string\ "] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail26.json b/test/integration/roles/test_uri/files/fail26.json new file mode 100644 index 00000000000000..6b01a2ca4a97ec --- /dev/null +++ b/test/integration/roles/test_uri/files/fail26.json @@ -0,0 +1,2 @@ +["line +break"] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail27.json b/test/integration/roles/test_uri/files/fail27.json new file mode 100644 index 00000000000000..621a0101c664a6 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail27.json @@ -0,0 +1,2 @@ +["line\ +break"] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail28.json b/test/integration/roles/test_uri/files/fail28.json new file mode 100644 index 00000000000000..47ec421bb62426 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail28.json @@ -0,0 +1 @@ +[0e] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail29.json b/test/integration/roles/test_uri/files/fail29.json new file mode 100644 index 00000000000000..8ab0bc4b8b2c73 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail29.json @@ -0,0 +1 @@ +[0e+] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail3.json b/test/integration/roles/test_uri/files/fail3.json new file mode 100644 index 00000000000000..9de168bf34e2e3 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail3.json @@ -0,0 +1 @@ +["extra comma",] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail30.json b/test/integration/roles/test_uri/files/fail30.json new file mode 100644 index 00000000000000..1cce602b518fc6 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail30.json @@ -0,0 +1 @@ +[0e+-1] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail4.json b/test/integration/roles/test_uri/files/fail4.json new file mode 100644 index 00000000000000..ddf3ce3d240946 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail4.json @@ -0,0 +1 @@ +["double extra comma",,] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail5.json b/test/integration/roles/test_uri/files/fail5.json new file mode 100644 index 00000000000000..ed91580e1b1c15 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail5.json @@ -0,0 +1 @@ +[ , "<-- missing value"] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail6.json b/test/integration/roles/test_uri/files/fail6.json new file mode 100644 index 00000000000000..8a96af3e4ee6c7 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail6.json @@ -0,0 +1 @@ +["Comma after the close"], \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail7.json b/test/integration/roles/test_uri/files/fail7.json new file mode 100644 index 00000000000000..b28479c6ecb21a --- /dev/null +++ b/test/integration/roles/test_uri/files/fail7.json @@ -0,0 +1 @@ +["Extra close"]] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail8.json b/test/integration/roles/test_uri/files/fail8.json new file mode 100644 index 00000000000000..5815574f363e58 --- /dev/null +++ b/test/integration/roles/test_uri/files/fail8.json @@ -0,0 +1 @@ +{"Extra comma": true,} \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/fail9.json b/test/integration/roles/test_uri/files/fail9.json new file mode 100644 index 00000000000000..5d8c0047bd522d --- /dev/null +++ b/test/integration/roles/test_uri/files/fail9.json @@ -0,0 +1 @@ +{"Extra value after close": true} "misplaced quoted value" \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/pass0.json b/test/integration/roles/test_uri/files/pass0.json new file mode 100644 index 00000000000000..70e26854369282 --- /dev/null +++ b/test/integration/roles/test_uri/files/pass0.json @@ -0,0 +1,58 @@ +[ + "JSON Test Pattern pass1", + {"object with 1 member":["array with 1 element"]}, + {}, + [], + -42, + true, + false, + null, + { + "integer": 1234567890, + "real": -9876.543210, + "e": 0.123456789e-12, + "E": 1.234567890E+34, + "": 23456789012E66, + "zero": 0, + "one": 1, + "space": " ", + "quote": "\"", + "backslash": "\\", + "controls": "\b\f\n\r\t", + "slash": "/ & \/", + "alpha": "abcdefghijklmnopqrstuvwyz", + "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", + "digit": "0123456789", + "0123456789": "digit", + "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", + "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", + "true": true, + "false": false, + "null": null, + "array":[ ], + "object":{ }, + "address": "50 St. James Street", + "url": "http://www.JSON.org/", + "comment": "// /* */": " ", + " s p a c e d " :[1,2 , 3 + +, + +4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7], + "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", + "quotes": "" \u0022 %22 0x22 034 "", + "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" +: "A key can be any string" + }, + 0.5 ,98.6 +, +99.44 +, + +1066, +1e1, +0.1e1, +1e-1, +1e00,2e+00,2e-00 +,"rosebud"] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/pass1.json b/test/integration/roles/test_uri/files/pass1.json new file mode 100644 index 00000000000000..d3c63c7ad845e4 --- /dev/null +++ b/test/integration/roles/test_uri/files/pass1.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/pass2.json b/test/integration/roles/test_uri/files/pass2.json new file mode 100644 index 00000000000000..4528d51f1ac615 --- /dev/null +++ b/test/integration/roles/test_uri/files/pass2.json @@ -0,0 +1,6 @@ +{ + "JSON Test Pattern pass3": { + "The outermost value": "must be an object or array.", + "In this test": "It is an object." + } +} diff --git a/test/integration/roles/test_uri/files/pass3.json b/test/integration/roles/test_uri/files/pass3.json new file mode 100644 index 00000000000000..6216b865f10219 --- /dev/null +++ b/test/integration/roles/test_uri/files/pass3.json @@ -0,0 +1 @@ +"A JSON payload should be an object or array, not a string." \ No newline at end of file diff --git a/test/integration/roles/test_uri/files/pass4.json b/test/integration/roles/test_uri/files/pass4.json new file mode 100644 index 00000000000000..edac92716f186e --- /dev/null +++ b/test/integration/roles/test_uri/files/pass4.json @@ -0,0 +1 @@ +[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]] \ No newline at end of file diff --git a/test/integration/roles/test_uri/handlers/main.yml b/test/integration/roles/test_uri/handlers/main.yml new file mode 100644 index 00000000000000..2283208d191bc3 --- /dev/null +++ b/test/integration/roles/test_uri/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: stop SimpleHTTPServer + shell: start-stop-daemon --stop --pidfile {{ output_dir }}/SimpleHTTPServer.pid --exec {{ py2.stdout }} diff --git a/test/integration/roles/test_uri/meta/main.yml b/test/integration/roles/test_uri/meta/main.yml new file mode 100644 index 00000000000000..07faa217762603 --- /dev/null +++ b/test/integration/roles/test_uri/meta/main.yml @@ -0,0 +1,2 @@ +dependencies: + - prepare_tests diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml new file mode 100644 index 00000000000000..6dd23df86ca879 --- /dev/null +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -0,0 +1,120 @@ +# test code for the uri module +# (c) 2014, Leonid Evdokimov + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: set role facts + set_fact: + http_port: 15260 + files_dir: '{{ _original_file|dirname }}/../files' + checkout_dir: '{{ output_dir }}/git' + +- name: verify that python2 is installed so this test can continue + shell: which python2 + register: py2 + +- name: start SimpleHTTPServer + shell: start-stop-daemon --start --pidfile {{ output_dir }}/SimpleHTTPServer.pid --background --make-pidfile --chdir {{ files_dir }} --exec {{ py2.stdout }} -- -m SimpleHTTPServer {{ http_port }} + notify: stop SimpleHTTPServer + +- wait_for: port={{ http_port }} + + +- name: md5 pass_json + stat: path={{ files_dir }}/{{ item }}.json get_md5=yes + register: pass_md5 + with_sequence: start=0 end=4 format=pass%d + +- name: fetch pass_json + uri: return_content=yes url=http://localhost:{{ http_port }}/{{ item }}.json + register: pass + with_sequence: start=0 end=4 format=pass%d + +- name: check pass_json + assert: + that: + - '"json" in item.1' + - item.0.stat.md5 == item.1.raw_content | md5 + with_together: + - pass_md5.results + - pass.results + + +- name: md5 fail_json + stat: path={{ files_dir }}/{{ item }}.json get_md5=yes + register: fail_md5 + with_sequence: start=0 end=30 format=fail%d + +- name: fetch fail_json + uri: return_content=yes url=http://localhost:{{ http_port }}/{{ item }}.json + register: fail + with_sequence: start=0 end=30 format=fail%d + +- name: check fail_json + assert: + that: + - item.0.stat.md5 == item.1.raw_content | md5 + - '"json" not in item.1' + with_together: + - fail_md5.results + - fail.results + + +- name: check content != raw_content + assert: + that: item.content != item.raw_content + with_items: + - '{{ pass.results.0 }}' + - '{{ fail.results.14 }}' + - '{{ fail.results.15 }}' + - '{{ fail.results.16 }}' + - '{{ fail.results.27 }}' + +- name: check content == raw_content + assert: + that: item.content == item.raw_content + with_items: + - '{{ pass.results.1 }}' + - '{{ pass.results.2 }}' + - '{{ pass.results.3 }}' + - '{{ pass.results.4 }}' + - '{{ fail.results.0 }}' + - '{{ fail.results.1 }}' + - '{{ fail.results.2 }}' + - '{{ fail.results.3 }}' + - '{{ fail.results.4 }}' + - '{{ fail.results.5 }}' + - '{{ fail.results.6 }}' + - '{{ fail.results.7 }}' + - '{{ fail.results.8 }}' + - '{{ fail.results.9 }}' + - '{{ fail.results.10 }}' + - '{{ fail.results.11 }}' + - '{{ fail.results.12 }}' + - '{{ fail.results.13 }}' + - '{{ fail.results.17 }}' + - '{{ fail.results.18 }}' + - '{{ fail.results.19 }}' + - '{{ fail.results.20 }}' + - '{{ fail.results.21 }}' + - '{{ fail.results.22 }}' + - '{{ fail.results.23 }}' + - '{{ fail.results.24 }}' + - '{{ fail.results.25 }}' + - '{{ fail.results.26 }}' + - '{{ fail.results.28 }}' + - '{{ fail.results.29 }}' + - '{{ fail.results.30 }}' From 3383a7b37aa6aa1697369233d58182614636b453 Mon Sep 17 00:00:00 2001 From: Leonid Evdokimov Date: Sat, 5 Jul 2014 09:15:57 +0400 Subject: [PATCH 0608/3617] tests: replace start-stop-daemon with async action --- test/integration/roles/test_uri/handlers/main.yml | 3 --- test/integration/roles/test_uri/tasks/main.yml | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) delete mode 100644 test/integration/roles/test_uri/handlers/main.yml diff --git a/test/integration/roles/test_uri/handlers/main.yml b/test/integration/roles/test_uri/handlers/main.yml deleted file mode 100644 index 2283208d191bc3..00000000000000 --- a/test/integration/roles/test_uri/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: stop SimpleHTTPServer - shell: start-stop-daemon --stop --pidfile {{ output_dir }}/SimpleHTTPServer.pid --exec {{ py2.stdout }} diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 6dd23df86ca879..c41590636dfe41 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -27,8 +27,9 @@ register: py2 - name: start SimpleHTTPServer - shell: start-stop-daemon --start --pidfile {{ output_dir }}/SimpleHTTPServer.pid --background --make-pidfile --chdir {{ files_dir }} --exec {{ py2.stdout }} -- -m SimpleHTTPServer {{ http_port }} - notify: stop SimpleHTTPServer + shell: cd {{ files_dir }} && {{ py2.stdout }} -m SimpleHTTPServer {{ http_port }} + async: 15 # this test set takes ~8 seconds to run + poll: 0 - wait_for: port={{ http_port }} From 0f9ad9dad2db3ea6c9b8fe6f35844a1e22fbf721 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 14 May 2015 14:21:29 -0700 Subject: [PATCH 0609/3617] Update integration tests from @darkk to work with the current uri module code --- test/integration/non_destructive.yml | 1 + .../integration/roles/test_uri/tasks/main.yml | 86 ++++++------------- 2 files changed, 28 insertions(+), 59 deletions(-) diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index e520a17ea05a4b..0c4c5be49651a4 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -39,6 +39,7 @@ - { role: test_authorized_key, tags: test_authorized_key } - { role: test_get_url, tags: test_get_url } - { role: test_embedded_module, tags: test_embedded_module } + - { role: test_uri, tags: test_uri } # Turn on test_binary when we start testing v2 #- { role: test_binary, tags: test_binary } diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index c41590636dfe41..6072754f224551 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -19,24 +19,39 @@ - name: set role facts set_fact: http_port: 15260 - files_dir: '{{ _original_file|dirname }}/../files' + files_dir: '{{ output_dir|expanduser }}/files' checkout_dir: '{{ output_dir }}/git' +- name: create a directory to serve files from + file: + dest: "{{ files_dir }}" + state: directory + +- copy: + src: "{{ item }}" + dest: "{{files_dir}}/{{ item }}" + with_sequence: start=0 end=4 format=pass%d.json + +- copy: + src: "{{ item }}" + dest: "{{files_dir}}/{{ item }}" + with_sequence: start=0 end=30 format=fail%d.json + - name: verify that python2 is installed so this test can continue shell: which python2 register: py2 - name: start SimpleHTTPServer shell: cd {{ files_dir }} && {{ py2.stdout }} -m SimpleHTTPServer {{ http_port }} - async: 15 # this test set takes ~8 seconds to run + async: 60 # this test set takes ~15 seconds to run poll: 0 - wait_for: port={{ http_port }} -- name: md5 pass_json - stat: path={{ files_dir }}/{{ item }}.json get_md5=yes - register: pass_md5 +- name: checksum pass_json + stat: path={{ files_dir }}/{{ item }}.json get_checksum=yes + register: pass_checksum with_sequence: start=0 end=4 format=pass%d - name: fetch pass_json @@ -48,15 +63,15 @@ assert: that: - '"json" in item.1' - - item.0.stat.md5 == item.1.raw_content | md5 + - item.0.stat.checksum == item.1.content | checksum with_together: - - pass_md5.results + - pass_checksum.results - pass.results -- name: md5 fail_json - stat: path={{ files_dir }}/{{ item }}.json get_md5=yes - register: fail_md5 +- name: checksum fail_json + stat: path={{ files_dir }}/{{ item }}.json get_checksum=yes + register: fail_checksum with_sequence: start=0 end=30 format=fail%d - name: fetch fail_json @@ -67,55 +82,8 @@ - name: check fail_json assert: that: - - item.0.stat.md5 == item.1.raw_content | md5 + - item.0.stat.checksum == item.1.content | checksum - '"json" not in item.1' with_together: - - fail_md5.results + - fail_checksum.results - fail.results - - -- name: check content != raw_content - assert: - that: item.content != item.raw_content - with_items: - - '{{ pass.results.0 }}' - - '{{ fail.results.14 }}' - - '{{ fail.results.15 }}' - - '{{ fail.results.16 }}' - - '{{ fail.results.27 }}' - -- name: check content == raw_content - assert: - that: item.content == item.raw_content - with_items: - - '{{ pass.results.1 }}' - - '{{ pass.results.2 }}' - - '{{ pass.results.3 }}' - - '{{ pass.results.4 }}' - - '{{ fail.results.0 }}' - - '{{ fail.results.1 }}' - - '{{ fail.results.2 }}' - - '{{ fail.results.3 }}' - - '{{ fail.results.4 }}' - - '{{ fail.results.5 }}' - - '{{ fail.results.6 }}' - - '{{ fail.results.7 }}' - - '{{ fail.results.8 }}' - - '{{ fail.results.9 }}' - - '{{ fail.results.10 }}' - - '{{ fail.results.11 }}' - - '{{ fail.results.12 }}' - - '{{ fail.results.13 }}' - - '{{ fail.results.17 }}' - - '{{ fail.results.18 }}' - - '{{ fail.results.19 }}' - - '{{ fail.results.20 }}' - - '{{ fail.results.21 }}' - - '{{ fail.results.22 }}' - - '{{ fail.results.23 }}' - - '{{ fail.results.24 }}' - - '{{ fail.results.25 }}' - - '{{ fail.results.26 }}' - - '{{ fail.results.28 }}' - - '{{ fail.results.29 }}' - - '{{ fail.results.30 }}' From 48d62fd9341dbe030380f0feab5dc7a9f9483a0f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 14 May 2015 20:10:31 -0500 Subject: [PATCH 0610/3617] Cleaning up VariableManager tests (v2) --- lib/ansible/vars/__init__.py | 9 +++--- test/units/vars/test_variable_manager.py | 41 ++++++++++++++++-------- 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 736b9529ef547c..5a576daba7cc58 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -72,7 +72,8 @@ def extra_vars(self): ''' ensures a clean copy of the extra_vars are made ''' return self._extra_vars.copy() - def set_extra_vars(self, value): + @extra_vars.setter + def extra_vars(self, value): ''' ensures a clean copy of the extra_vars are used to set the value ''' assert isinstance(value, MutableMapping) self._extra_vars = value.copy() @@ -123,7 +124,7 @@ def _merge_dicts(self, a, b): return result - def get_vars(self, loader, play=None, host=None, task=None): + def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters for the play, host, and task (which could possibly result in different @@ -145,7 +146,7 @@ def get_vars(self, loader, play=None, host=None, task=None): debug("in VariableManager get_vars()") cache_entry = self._get_cache_entry(play=play, host=host, task=task) - if cache_entry in CACHED_VARS: + if cache_entry in CACHED_VARS and use_cache: debug("vars are cached, returning them now") return CACHED_VARS[cache_entry] @@ -229,7 +230,7 @@ def get_vars(self, loader, play=None, host=None, task=None): # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token - CACHED_VARS[cache_entry] = all_vars + #CACHED_VARS[cache_entry] = all_vars debug("done with get_vars()") return all_vars diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py index 9abed8f9482c04..273f9238edbba0 100644 --- a/test/units/vars/test_variable_manager.py +++ b/test/units/vars/test_variable_manager.py @@ -38,7 +38,11 @@ def test_basic_manager(self): fake_loader = DictDataLoader({}) v = VariableManager() - self.assertEqual(v.get_vars(loader=fake_loader), dict()) + vars = v.get_vars(loader=fake_loader, use_cache=False) + if 'omit' in vars: + del vars['omit'] + + self.assertEqual(vars, dict()) self.assertEqual( v._merge_dicts( @@ -59,11 +63,14 @@ def test_variable_manager_extra_vars(self): extra_vars = dict(a=1, b=2, c=3) v = VariableManager() - v.set_extra_vars(extra_vars) + v.extra_vars = extra_vars + + vars = v.get_vars(loader=fake_loader, use_cache=False) for (key, val) in extra_vars.iteritems(): - self.assertEqual(v.get_vars(loader=fake_loader).get(key), val) - self.assertIsNot(v.extra_vars.get(key), val) + self.assertEqual(vars.get(key), val) + + self.assertIsNot(v.extra_vars, extra_vars) def test_variable_manager_host_vars_file(self): fake_loader = DictDataLoader({ @@ -82,30 +89,38 @@ def test_variable_manager_host_vars_file(self): mock_host.get_vars.return_value = dict() mock_host.get_groups.return_value = () - self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar") + self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host, use_cache=False).get("foo"), "bar") def test_variable_manager_group_vars_file(self): fake_loader = DictDataLoader({ - "group_vars/somegroup.yml": """ + "group_vars/all.yml": """ foo: bar + """, + "group_vars/somegroup.yml": """ + bam: baz """ }) v = VariableManager() + v.add_group_vars_file("group_vars/all.yml", loader=fake_loader) v.add_group_vars_file("group_vars/somegroup.yml", loader=fake_loader) self.assertIn("somegroup", v._group_vars_files) - self.assertEqual(v._group_vars_files["somegroup"], dict(foo="bar")) + self.assertEqual(v._group_vars_files["all"], dict(foo="bar")) + self.assertEqual(v._group_vars_files["somegroup"], dict(bam="baz")) mock_group = MagicMock() - mock_group.name.return_value = "somegroup" + mock_group.name = "somegroup" mock_group.get_ancestors.return_value = () + mock_group.get_vars.return_value = dict() mock_host = MagicMock() mock_host.get_name.return_value = "hostname1" mock_host.get_vars.return_value = dict() - mock_host.get_groups.return_value = (mock_group) + mock_host.get_groups.return_value = (mock_group,) - self.assertEqual(v.get_vars(loader=fake_loader, host=mock_host).get("foo"), "bar") + vars = v.get_vars(loader=fake_loader, host=mock_host, use_cache=False) + self.assertEqual(vars.get("foo"), "bar") + self.assertEqual(vars.get("bam"), "baz") def test_variable_manager_play_vars(self): fake_loader = DictDataLoader({}) @@ -116,7 +131,7 @@ def test_variable_manager_play_vars(self): mock_play.get_vars_files.return_value = [] v = VariableManager() - self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar") + self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar") def test_variable_manager_play_vars_files(self): fake_loader = DictDataLoader({ @@ -131,7 +146,7 @@ def test_variable_manager_play_vars_files(self): mock_play.get_vars_files.return_value = ['/path/to/somefile.yml'] v = VariableManager() - self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play).get("foo"), "bar") + self.assertEqual(v.get_vars(loader=fake_loader, play=mock_play, use_cache=False).get("foo"), "bar") def test_variable_manager_task_vars(self): fake_loader = DictDataLoader({}) @@ -141,5 +156,5 @@ def test_variable_manager_task_vars(self): mock_task.get_vars.return_value = dict(foo="bar") v = VariableManager() - self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task).get("foo"), "bar") + self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar") From 85aa984340d69150cbffc8c52443485a4d7b2c40 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Fri, 15 May 2015 01:05:38 -0400 Subject: [PATCH 0611/3617] Fix error in the column name in the doc --- hacking/templates/rst.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index 444b4243af5241..f6f38e59101168 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -118,7 +118,7 @@ Common return values are documented here :doc:`common_return_values`, the follow - + @@ -138,7 +138,7 @@ Common return values are documented here :doc:`common_return_values`, the follow
namedespcriptiondescription returned type sample
- + From ac7dce4631dd073c68a8770a91bbb7dfb99ad96c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 15 May 2015 10:45:55 -0500 Subject: [PATCH 0612/3617] Fixing broken set_extra_vars method after fixing unit tests (v2) --- lib/ansible/cli/playbook.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index 69e411dc87a0f6..97d4f0de3f92a3 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -131,7 +131,7 @@ def run(self): # create the variable manager, which will be shared throughout # the code, ensuring a consistent view of global variables variable_manager = VariableManager() - variable_manager.set_extra_vars(extra_vars) + variable_manager.extra_vars = extra_vars # create the inventory, and filter it based on the subset specified (if any) inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory) From e2de336a239a64d068f67dd4f22d4ecf0109af2a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 14 May 2015 10:50:22 -0400 Subject: [PATCH 0613/3617] made special treatment of certain filesystem for selinux configurable --- examples/ansible.cfg | 5 +++++ lib/ansible/constants.py | 5 ++++- lib/ansible/inventory/__init__.py | 4 ++-- lib/ansible/module_common.py | 9 ++++++--- lib/ansible/module_utils/basic.py | 24 +++++++++++++++--------- 5 files changed, 32 insertions(+), 15 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 4cf9d513e59533..85eada17cc8545 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -223,3 +223,8 @@ accelerate_daemon_timeout = 30 # is "no". #accelerate_multi_key = yes +[selinux] +# file systems that require special treatment when dealing with security context +# the default behaviour that copies the existing context or uses the user default +# needs to be changed to use the file system dependant context. +#special_context_filesystems=nfs,vboxsf,fuse diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 089de5b7c5bf15..2cdc08d8ce87ac 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -134,7 +134,10 @@ def shell_expand_path(path): DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() -DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) + +# selinux +DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True) #TODO: get rid of ternary chain mess BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 2048046d3c1f21..f012246e227016 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -36,7 +36,7 @@ class Inventory(object): Host inventory for ansible. """ - __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', + __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] @@ -53,7 +53,7 @@ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): self._vars_per_host = {} self._vars_per_group = {} self._hosts_cache = {} - self._groups_list = {} + self._groups_list = {} self._pattern_cache = {} # to be set by calling set_playbook_basedir by playbook code diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py index 118c757f8dcae1..fba5b9137da881 100644 --- a/lib/ansible/module_common.py +++ b/lib/ansible/module_common.py @@ -33,6 +33,8 @@ REPLACER_COMPLEX = "\"<>\"" REPLACER_WINDOWS = "# POWERSHELL_COMMON" REPLACER_VERSION = "\"<>\"" +REPLACER_SELINUX = "<>" + class ModuleReplacer(object): @@ -41,14 +43,14 @@ class ModuleReplacer(object): transfer. Rather than doing classical python imports, this allows for more efficient transfer in a no-bootstrapping scenario by not moving extra files over the wire, and also takes care of embedding arguments in the transferred - modules. + modules. This version is done in such a way that local imports can still be used in the module code, so IDEs don't have to be aware of what is going on. Example: - from ansible.module_utils.basic import * + from ansible.module_utils.basic import * ... will result in the insertion basic.py into the module @@ -94,7 +96,7 @@ def _find_snippet_imports(self, module_data, module_path): module_style = 'new' elif 'WANT_JSON' in module_data: module_style = 'non_native_want_json' - + output = StringIO() lines = module_data.split('\n') snippet_names = [] @@ -167,6 +169,7 @@ def modify_module(self, module_path, complex_args, module_args, inject): # these strings should be part of the 'basic' snippet which is required to be included module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) + module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS)) module_data = module_data.replace(REPLACER_ARGS, encoded_args) module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 54a1a9cfff7f88..0c2e57f81a6d03 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -38,6 +38,8 @@ BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0] BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE +SELINUX_SPECIAL_FS="<>" + # ansible modules can be written in any language. To simplify # development of Python modules, the functions available here # can be inserted in any module source automatically by including @@ -528,10 +530,10 @@ def find_mount_point(self, path): path = os.path.dirname(path) return path - def is_nfs_path(self, path): + def is_special_selinux_path(self, path): """ - Returns a tuple containing (True, selinux_context) if the given path - is on a NFS mount point, otherwise the return will be (False, None). + Returns a tuple containing (True, selinux_context) if the given path is on a + NFS or other 'special' fs mount point, otherwise the return will be (False, None). """ try: f = open('/proc/mounts', 'r') @@ -542,9 +544,13 @@ def is_nfs_path(self, path): path_mount_point = self.find_mount_point(path) for line in mount_data: (device, mount_point, fstype, options, rest) = line.split(' ', 4) - if path_mount_point == mount_point and 'nfs' in fstype: - nfs_context = self.selinux_context(path_mount_point) - return (True, nfs_context) + + if path_mount_point == mount_point: + for fs in SELINUX_SPECIAL_FS.split(','): + if fs in fstype: + special_context = self.selinux_context(path_mount_point) + return (True, special_context) + return (False, None) def set_default_selinux_context(self, path, changed): @@ -562,9 +568,9 @@ def set_context_if_different(self, path, context, changed): # Iterate over the current context instead of the # argument context, which may have selevel. - (is_nfs, nfs_context) = self.is_nfs_path(path) - if is_nfs: - new_context = nfs_context + (is_special_se, sp_context) = self.is_special_selinux_path(path) + if is_special_se: + new_context = sp_context else: for i in range(len(cur_context)): if len(context) > i: From 2e31a67532fa889dd6e201ad14a8cbb5f6a8d3f1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 15 May 2015 10:42:41 -0700 Subject: [PATCH 0614/3617] Update module refs in v2 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 46a553189331dc..b92ed6e9da7784 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 46a553189331dcbe2017aa47345c1c10640263bc +Subproject commit b92ed6e9da7784743976ade2affef63c8ddfedaf diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index aa86c5ff9010a5..8c8a0e1b8dc4b5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit aa86c5ff9010a5201c8ee5ffd2b0045abfaba899 +Subproject commit 8c8a0e1b8dc4b51721b313fcabb9bb5bd8a6d26f From 0913b8263ca88400efb2efd4cb681f8d883cceeb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 14 May 2015 10:50:22 -0400 Subject: [PATCH 0615/3617] made special treatment of certain filesystem for selinux configurable --- examples/ansible.cfg | 5 +++++ lib/ansible/constants.py | 5 ++++- lib/ansible/inventory/__init__.py | 2 +- lib/ansible/module_utils/basic.py | 24 +++++++++++++++--------- v1/ansible/module_common.py | 9 ++++++--- 5 files changed, 31 insertions(+), 14 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 4cf9d513e59533..85eada17cc8545 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -223,3 +223,8 @@ accelerate_daemon_timeout = 30 # is "no". #accelerate_multi_key = yes +[selinux] +# file systems that require special treatment when dealing with security context +# the default behaviour that copies the existing context or uses the user default +# needs to be changed to use the file system dependant context. +#special_context_filesystems=nfs,vboxsf,fuse diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 456beb8bbc40f4..d24dc311a79a5a 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -142,7 +142,10 @@ def shell_expand_path(path): DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() -DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) + +# selinux +DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True) #TODO: get rid of ternary chain mess BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 063398f17f9cdf..45bdaf8a6f974b 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -61,7 +61,7 @@ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): self._vars_per_host = {} self._vars_per_group = {} self._hosts_cache = {} - self._groups_list = {} + self._groups_list = {} self._pattern_cache = {} # to be set by calling set_playbook_basedir by playbook code diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 8f9b03f882d1a2..1f0abb177643dd 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -38,6 +38,8 @@ BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0] BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE +SELINUX_SPECIAL_FS="<>" + # ansible modules can be written in any language. To simplify # development of Python modules, the functions available here # can be inserted in any module source automatically by including @@ -529,10 +531,10 @@ def find_mount_point(self, path): path = os.path.dirname(path) return path - def is_nfs_path(self, path): + def is_special_selinux_path(self, path): """ - Returns a tuple containing (True, selinux_context) if the given path - is on a NFS mount point, otherwise the return will be (False, None). + Returns a tuple containing (True, selinux_context) if the given path is on a + NFS or other 'special' fs mount point, otherwise the return will be (False, None). """ try: f = open('/proc/mounts', 'r') @@ -543,9 +545,13 @@ def is_nfs_path(self, path): path_mount_point = self.find_mount_point(path) for line in mount_data: (device, mount_point, fstype, options, rest) = line.split(' ', 4) - if path_mount_point == mount_point and 'nfs' in fstype: - nfs_context = self.selinux_context(path_mount_point) - return (True, nfs_context) + + if path_mount_point == mount_point: + for fs in SELINUX_SPECIAL_FS.split(','): + if fs in fstype: + special_context = self.selinux_context(path_mount_point) + return (True, special_context) + return (False, None) def set_default_selinux_context(self, path, changed): @@ -563,9 +569,9 @@ def set_context_if_different(self, path, context, changed): # Iterate over the current context instead of the # argument context, which may have selevel. - (is_nfs, nfs_context) = self.is_nfs_path(path) - if is_nfs: - new_context = nfs_context + (is_special_se, sp_context) = self.is_special_selinux_path(path) + if is_special_se: + new_context = sp_context else: for i in range(len(cur_context)): if len(context) > i: diff --git a/v1/ansible/module_common.py b/v1/ansible/module_common.py index 118c757f8dcae1..fba5b9137da881 100644 --- a/v1/ansible/module_common.py +++ b/v1/ansible/module_common.py @@ -33,6 +33,8 @@ REPLACER_COMPLEX = "\"<>\"" REPLACER_WINDOWS = "# POWERSHELL_COMMON" REPLACER_VERSION = "\"<>\"" +REPLACER_SELINUX = "<>" + class ModuleReplacer(object): @@ -41,14 +43,14 @@ class ModuleReplacer(object): transfer. Rather than doing classical python imports, this allows for more efficient transfer in a no-bootstrapping scenario by not moving extra files over the wire, and also takes care of embedding arguments in the transferred - modules. + modules. This version is done in such a way that local imports can still be used in the module code, so IDEs don't have to be aware of what is going on. Example: - from ansible.module_utils.basic import * + from ansible.module_utils.basic import * ... will result in the insertion basic.py into the module @@ -94,7 +96,7 @@ def _find_snippet_imports(self, module_data, module_path): module_style = 'new' elif 'WANT_JSON' in module_data: module_style = 'non_native_want_json' - + output = StringIO() lines = module_data.split('\n') snippet_names = [] @@ -167,6 +169,7 @@ def modify_module(self, module_path, complex_args, module_args, inject): # these strings should be part of the 'basic' snippet which is required to be included module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) + module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS)) module_data = module_data.replace(REPLACER_ARGS, encoded_args) module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex) From b0448d9cf4c743a3d7d5c31d88009745c6a3e3ca Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 15 May 2015 11:34:54 -0700 Subject: [PATCH 0616/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8ab439498c9c07..75790b6ebbc6ec 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8ab439498c9c079abf0ef54e69ddcf1acd8e6f3e +Subproject commit 75790b6ebbc6ec20e522be08eea2db300ee51240 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index e3373ffc46d5b3..8c8a0e1b8dc4b5 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit e3373ffc46d5b318222a6dd71d6790bcdecb43be +Subproject commit 8c8a0e1b8dc4b51721b313fcabb9bb5bd8a6d26f From 674d1016c001d8e4cc1b8c8294a1b49c6aae4bf5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 15 May 2015 11:52:57 -0700 Subject: [PATCH 0617/3617] Update extras ref for doc fix --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8c8a0e1b8dc4b5..32fb15e3106280 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8c8a0e1b8dc4b51721b313fcabb9bb5bd8a6d26f +Subproject commit 32fb15e3106280c40afd4d574f6baa991298407d From 5a947209059480903c3315fa4d75e073c5f33218 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 15 May 2015 12:12:45 -0700 Subject: [PATCH 0618/3617] Pull in a lot more doc fixes --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 32fb15e3106280..88eff11c048f88 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 32fb15e3106280c40afd4d574f6baa991298407d +Subproject commit 88eff11c048f88ed9a49bf1f38a26493083d35a2 From e7846343e57691f827623047b140ccbe938a13eb Mon Sep 17 00:00:00 2001 From: Till Maas Date: Fri, 15 May 2015 22:25:20 +0200 Subject: [PATCH 0619/3617] facts: Add ed25519 ssh pubkey --- lib/ansible/module_utils/facts.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index b223c5f5f7d3eb..b95fccdcb76ddb 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -474,14 +474,17 @@ def get_public_ssh_host_keys(self): dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub' rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub' ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub' + ed25519_filename = '/etc/ssh/ssh_host_ed25519_key.pub' if self.facts['system'] == 'Darwin': dsa_filename = '/etc/ssh_host_dsa_key.pub' rsa_filename = '/etc/ssh_host_rsa_key.pub' ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub' + ed25519_filename = '/etc/ssh_host_ed25519_key.pub' dsa = get_file_content(dsa_filename) rsa = get_file_content(rsa_filename) ecdsa = get_file_content(ecdsa_filename) + ed25519 = get_file_content(ed25519_filename) if dsa is None: dsa = 'NA' else: @@ -494,6 +497,10 @@ def get_public_ssh_host_keys(self): ecdsa = 'NA' else: self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1] + if ed25519 is None: + ed25519 = 'NA' + else: + self.facts['ssh_host_key_ed25519_public'] = ed25519.split()[1] def get_pkg_mgr_facts(self): self.facts['pkg_mgr'] = 'unknown' From 02d784598fcdbfd2bfc93c91ecff782a61dafcc3 Mon Sep 17 00:00:00 2001 From: Till Maas Date: Fri, 15 May 2015 22:36:13 +0200 Subject: [PATCH 0620/3617] facts: Simplify ssh key fetching --- lib/ansible/module_utils/facts.py | 37 +++++++++---------------------- 1 file changed, 10 insertions(+), 27 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index b95fccdcb76ddb..6ddae5df855d65 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -471,36 +471,19 @@ def get_cmdline(self): pass def get_public_ssh_host_keys(self): - dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub' - rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub' - ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub' - ed25519_filename = '/etc/ssh/ssh_host_ed25519_key.pub' + keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519') if self.facts['system'] == 'Darwin': - dsa_filename = '/etc/ssh_host_dsa_key.pub' - rsa_filename = '/etc/ssh_host_rsa_key.pub' - ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub' - ed25519_filename = '/etc/ssh_host_ed25519_key.pub' - dsa = get_file_content(dsa_filename) - rsa = get_file_content(rsa_filename) - ecdsa = get_file_content(ecdsa_filename) - ed25519 = get_file_content(ed25519_filename) - if dsa is None: - dsa = 'NA' + keydir = '/etc' else: - self.facts['ssh_host_key_dsa_public'] = dsa.split()[1] - if rsa is None: - rsa = 'NA' - else: - self.facts['ssh_host_key_rsa_public'] = rsa.split()[1] - if ecdsa is None: - ecdsa = 'NA' - else: - self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1] - if ed25519 is None: - ed25519 = 'NA' - else: - self.facts['ssh_host_key_ed25519_public'] = ed25519.split()[1] + keydir = '/etc/ssh' + + for type_ in keytypes: + key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_) + keydata = get_file_content(key_filename) + if keydata is not None: + factname = 'ssh_host_key_%s_public' % type_ + self.facts[factname] = keydata.split()[1] def get_pkg_mgr_facts(self): self.facts['pkg_mgr'] = 'unknown' From 23cd3294d0caaf5cf90de8d63b779d186e158abd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 16 May 2015 15:45:01 -0500 Subject: [PATCH 0621/3617] Starting to add v2 tests for template --- test/units/template/__init__.py | 21 ++++++++ test/units/template/test_safe_eval.py | 21 ++++++++ test/units/template/test_templar.py | 74 +++++++++++++++++++++++++++ test/units/template/test_vars.py | 21 ++++++++ 4 files changed, 137 insertions(+) create mode 100644 test/units/template/__init__.py create mode 100644 test/units/template/test_safe_eval.py create mode 100644 test/units/template/test_templar.py create mode 100644 test/units/template/test_vars.py diff --git a/test/units/template/__init__.py b/test/units/template/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/template/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/template/test_safe_eval.py b/test/units/template/test_safe_eval.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/template/test_safe_eval.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py new file mode 100644 index 00000000000000..f2f727d1c79790 --- /dev/null +++ b/test/units/template/test_templar.py @@ -0,0 +1,74 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible import constants as C +from ansible.plugins import filter_loader, lookup_loader, module_loader +from ansible.plugins.strategies import SharedPluginLoaderObj +from ansible.template import Templar + +from units.mock.loader import DictDataLoader + +class TestTemplar(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_templar_simple(self): + fake_loader = DictDataLoader({}) + shared_loader = SharedPluginLoaderObj() + templar = Templar(loader=fake_loader, variables=dict(foo="bar", bam="{{foo}}", num=1, var_true=True, var_false=False, var_dict=dict(a="b"), bad_dict="{a='b'", var_list=[1])) + + # test some basic templating + self.assertEqual(templar.template("{{foo}}"), "bar") + self.assertEqual(templar.template("{{foo}}\n"), "bar") + self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n") + self.assertEqual(templar.template("foo", convert_bare=True), "bar") + self.assertEqual(templar.template("{{bam}}"), "bar") + self.assertEqual(templar.template("{{num}}"), 1) + self.assertEqual(templar.template("{{var_true}}"), True) + self.assertEqual(templar.template("{{var_false}}"), False) + self.assertEqual(templar.template("{{var_dict}}"), dict(a="b")) + self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'") + self.assertEqual(templar.template("{{var_list}}"), [1]) + + # test set_available_variables() + templar.set_available_variables(variables=dict(foo="bam")) + self.assertEqual(templar.template("{{foo}}"), "bam") + # variables must be a dict() for set_available_variables() + self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam") + + def test_template_jinja2_extensions(self): + fake_loader = DictDataLoader({}) + templar = Templar(loader=fake_loader) + + old_exts = C.DEFAULT_JINJA2_EXTENSIONS + try: + C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar" + self.assertEqual(templar._get_extensions(), ['foo', 'bar']) + finally: + C.DEFAULT_JINJA2_EXTENSIONS = old_exts + diff --git a/test/units/template/test_vars.py b/test/units/template/test_vars.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/template/test_vars.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + From 9aa8676bdd13a0636e5e7920713197972d56946d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 17 May 2015 01:06:02 -0500 Subject: [PATCH 0622/3617] More template unit tests for v2 --- lib/ansible/plugins/lookup/file.py | 12 ++++++++---- lib/ansible/template/__init__.py | 2 +- test/units/mock/loader.py | 6 ++++++ test/units/template/test_templar.py | 20 ++++++++++++++++++-- 4 files changed, 33 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py index efb039497dd89b..ea53c37e03986b 100644 --- a/lib/ansible/plugins/lookup/file.py +++ b/lib/ansible/plugins/lookup/file.py @@ -42,18 +42,22 @@ def run(self, terms, variables=None, **kwargs): # role/files/ directory, and finally the playbook directory # itself (which will be relative to the current working dir) + if 'role_path' in variables: + relative_path = self._loader.path_dwim_relative(variables['role_path'], 'files', term, check=False) + # FIXME: the original file stuff still needs to be worked out, but the # playbook_dir stuff should be able to be removed as it should # be covered by the fact that the loader contains that info - #if '_original_file' in variables: - # relative_path = self._loader.path_dwim_relative(variables['_original_file'], 'files', term, self.basedir, check=False) #if 'playbook_dir' in variables: # playbook_path = os.path.join(variables['playbook_dir'], term) for path in (basedir_path, relative_path, playbook_path): - if path and os.path.exists(path): - ret.append(codecs.open(path, encoding="utf8").read().rstrip()) + try: + contents = self._loader._get_file_contents(path) + ret.append(contents.rstrip()) break + except AnsibleParserError: + continue else: raise AnsibleError("could not locate file in lookup: %s" % term) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 19e091b9b27ad6..8ad9917d6020e0 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -218,7 +218,7 @@ def _lookup(self, name, *args, **kwargs): # safely catch run failures per #5059 try: ran = instance.run(*args, variables=self._available_variables, **kwargs) - except AnsibleUndefinedVariable: + except (AnsibleUndefinedVariable, UndefinedError): raise except Exception, e: if self._fail_on_lookup_errors: diff --git a/test/units/mock/loader.py b/test/units/mock/loader.py index cf9d7ea72d0fe0..078ca3f0e6ce26 100644 --- a/test/units/mock/loader.py +++ b/test/units/mock/loader.py @@ -38,6 +38,12 @@ def load_from_file(self, path): return self.load(self._file_mapping[path], path) return None + def _get_file_contents(self, path): + if path in self._file_mapping: + return self._file_mapping[path] + else: + raise AnsibleParserError("file not found: %s" % path) + def path_exists(self, path): return path in self._file_mapping or path in self._known_directories diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py index f2f727d1c79790..eb634994fd7d41 100644 --- a/test/units/template/test_templar.py +++ b/test/units/template/test_templar.py @@ -19,10 +19,13 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from jinja2.exceptions import UndefinedError + from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock from ansible import constants as C +from ansible.errors import * from ansible.plugins import filter_loader, lookup_loader, module_loader from ansible.plugins.strategies import SharedPluginLoaderObj from ansible.template import Templar @@ -38,9 +41,11 @@ def tearDown(self): pass def test_templar_simple(self): - fake_loader = DictDataLoader({}) + fake_loader = DictDataLoader({ + "/path/to/my_file.txt": "foo\n", + }) shared_loader = SharedPluginLoaderObj() - templar = Templar(loader=fake_loader, variables=dict(foo="bar", bam="{{foo}}", num=1, var_true=True, var_false=False, var_dict=dict(a="b"), bad_dict="{a='b'", var_list=[1])) + templar = Templar(loader=fake_loader, variables=dict(foo="bar", bam="{{foo}}", num=1, var_true=True, var_false=False, var_dict=dict(a="b"), bad_dict="{a='b'", var_list=[1], recursive="{{recursive}}")) # test some basic templating self.assertEqual(templar.template("{{foo}}"), "bar") @@ -54,6 +59,17 @@ def test_templar_simple(self): self.assertEqual(templar.template("{{var_dict}}"), dict(a="b")) self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'") self.assertEqual(templar.template("{{var_list}}"), [1]) + self.assertEqual(templar.template(1, convert_bare=True), 1) + self.assertRaises(UndefinedError, templar.template, "{{bad_var}}") + self.assertEqual(templar.template("{{lookup('file', '/path/to/my_file.txt')}}"), "foo") + self.assertRaises(UndefinedError, templar.template, "{{lookup('file', bad_var)}}") + self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}") + self.assertRaises(AnsibleError, templar.template, "{{recursive}}") + self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}") + + # test with fail_on_undefined=False + templar = Templar(loader=fake_loader, fail_on_undefined=False) + self.assertEqual(templar.template("{{bad_var}}"), "{{bad_var}}") # test set_available_variables() templar.set_available_variables(variables=dict(foo="bam")) From 398b1d3e60e05585e81c9a47d00ab1077391813d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 17 May 2015 01:13:22 -0500 Subject: [PATCH 0623/3617] Cleaning up template test syntax a bit --- test/units/template/test_templar.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py index eb634994fd7d41..ce40c73b0d0e87 100644 --- a/test/units/template/test_templar.py +++ b/test/units/template/test_templar.py @@ -45,7 +45,18 @@ def test_templar_simple(self): "/path/to/my_file.txt": "foo\n", }) shared_loader = SharedPluginLoaderObj() - templar = Templar(loader=fake_loader, variables=dict(foo="bar", bam="{{foo}}", num=1, var_true=True, var_false=False, var_dict=dict(a="b"), bad_dict="{a='b'", var_list=[1], recursive="{{recursive}}")) + variables = dict( + foo="bar", + bam="{{foo}}", + num=1, + var_true=True, + var_false=False, + var_dict=dict(a="b"), + bad_dict="{a='b'", + var_list=[1], + recursive="{{recursive}}", + ) + templar = Templar(loader=fake_loader, variables=variables) # test some basic templating self.assertEqual(templar.template("{{foo}}"), "bar") From a960fcd569c0fde85b27f3c34093634b37fa2759 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 17 May 2015 01:29:40 -0500 Subject: [PATCH 0624/3617] Adding module_utils tests from v1 to v2 --- test/units/module_utils/__init__.py | 21 ++ test/units/module_utils/test_basic.py | 355 +++++++++++++++++++++++ test/units/module_utils/test_database.py | 118 ++++++++ 3 files changed, 494 insertions(+) create mode 100644 test/units/module_utils/__init__.py create mode 100644 test/units/module_utils/test_basic.py create mode 100644 test/units/module_utils/test_database.py diff --git a/test/units/module_utils/__init__.py b/test/units/module_utils/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/module_utils/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py new file mode 100644 index 00000000000000..60f501ba28b5c8 --- /dev/null +++ b/test/units/module_utils/test_basic.py @@ -0,0 +1,355 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +#from __future__ import (absolute_import, division, print_function) +from __future__ import (absolute_import, division) +__metaclass__ = type + +import os +import tempfile + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.errors import * +from ansible.executor.module_common import modify_module +from ansible.module_utils.basic import heuristic_log_sanitize +from ansible.utils.hashing import checksum as utils_checksum + +TEST_MODULE_DATA = """ +from ansible.module_utils.basic import * + +def get_module(): + return AnsibleModule( + argument_spec = dict(), + supports_check_mode = True, + no_log = True, + ) + +get_module() + +""" + +class TestModuleUtilsBasic(unittest.TestCase): + + def cleanup_temp_file(self, fd, path): + try: + os.close(fd) + os.remove(path) + except: + pass + + def cleanup_temp_dir(self, path): + try: + os.rmdir(path) + except: + pass + + def setUp(self): + # create a temporary file for the test module + # we're about to generate + self.tmp_fd, self.tmp_path = tempfile.mkstemp() + os.write(self.tmp_fd, TEST_MODULE_DATA) + + # template the module code and eval it + module_data, module_style, shebang = modify_module(self.tmp_path, {}) + + d = {} + exec(module_data, d, d) + self.module = d['get_module']() + + # module_utils/basic.py screws with CWD, let's save it and reset + self.cwd = os.getcwd() + + def tearDown(self): + self.cleanup_temp_file(self.tmp_fd, self.tmp_path) + # Reset CWD back to what it was before basic.py changed it + os.chdir(self.cwd) + + ################################################################################# + # run_command() tests + + # test run_command with a string command + def test_run_command_string(self): + (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'") + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + + # test run_command with an array of args (with both use_unsafe_shell=True|False) + def test_run_command_args(self): + (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"]) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + + # test run_command with leading environment variables + #@raises(SystemExit) + def test_run_command_string_with_env_variables(self): + self.assertRaises(SystemExit, self.module.run_command, 'FOO=bar /bin/echo -n "foo bar"') + + #@raises(SystemExit) + def test_run_command_args_with_env_variables(self): + self.assertRaises(SystemExit, self.module.run_command, ['FOO=bar', '/bin/echo', '-n', 'foo bar']) + + def test_run_command_string_unsafe_with_env_variables(self): + (rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar') + + # test run_command with a command pipe (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_pipe(self): + (rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar\n') + + # test run_command with a shell redirect in (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_redirect_in(self): + (rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar\n') + + # test run_command with a shell redirect out (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_redirect_out(self): + tmp_fd, tmp_path = tempfile.mkstemp() + try: + (rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertTrue(os.path.exists(tmp_path)) + checksum = utils_checksum(tmp_path) + self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') + except: + raise + finally: + self.cleanup_temp_file(tmp_fd, tmp_path) + + # test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False) + def test_run_command_string_unsafe_with_double_redirect_out(self): + tmp_fd, tmp_path = tempfile.mkstemp() + try: + (rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True) + self.assertEqual(rc, 0) + self.assertTrue(os.path.exists(tmp_path)) + checksum = utils_checksum(tmp_path) + self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') + except: + raise + finally: + self.cleanup_temp_file(tmp_fd, tmp_path) + + # test run_command with data + def test_run_command_string_with_data(self): + (rc, out, err) = self.module.run_command('cat', data='foo bar') + self.assertEqual(rc, 0) + self.assertEqual(out, 'foo bar\n') + + # test run_command with binary data + def test_run_command_string_with_binary_data(self): + (rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True) + self.assertEqual(rc, 0) + self.assertEqual(out, 'ABCD') + + # test run_command with a cwd set + def test_run_command_string_with_cwd(self): + tmp_path = tempfile.mkdtemp() + try: + (rc, out, err) = self.module.run_command('pwd', cwd=tmp_path) + self.assertEqual(rc, 0) + self.assertTrue(os.path.exists(tmp_path)) + self.assertEqual(out.strip(), os.path.realpath(tmp_path)) + except: + raise + finally: + self.cleanup_temp_dir(tmp_path) + + +class TestModuleUtilsBasicHelpers(unittest.TestCase): + ''' Test some implementation details of AnsibleModule + + Some pieces of AnsibleModule are implementation details but they have + potential cornercases that we need to check. Go ahead and test at + this level that the functions are behaving even though their API may + change and we'd have to rewrite these tests so that we know that we + need to check for those problems in any rewrite. + + In the future we might want to restructure higher level code to be + friendlier to unittests so that we can test at the level that the public + is interacting with the APIs. + ''' + + MANY_RECORDS = 7000 + URL_SECRET = 'http://username:pas:word@foo.com/data' + SSH_SECRET = 'username:pas:word@foo.com/data' + + def cleanup_temp_file(self, fd, path): + try: + os.close(fd) + os.remove(path) + except: + pass + + def cleanup_temp_dir(self, path): + try: + os.rmdir(path) + except: + pass + + def _gen_data(self, records, per_rec, top_level, secret_text): + hostvars = {'hostvars': {}} + for i in range(1, records, 1): + host_facts = {'host%s' % i: + {'pstack': + {'running': '875.1', + 'symlinked': '880.0', + 'tars': [], + 'versions': ['885.0']}, + }} + + if per_rec: + host_facts['host%s' % i]['secret'] = secret_text + hostvars['hostvars'].update(host_facts) + if top_level: + hostvars['secret'] = secret_text + return hostvars + + def setUp(self): + self.many_url = repr(self._gen_data(self.MANY_RECORDS, True, True, + self.URL_SECRET)) + self.many_ssh = repr(self._gen_data(self.MANY_RECORDS, True, True, + self.SSH_SECRET)) + self.one_url = repr(self._gen_data(self.MANY_RECORDS, False, True, + self.URL_SECRET)) + self.one_ssh = repr(self._gen_data(self.MANY_RECORDS, False, True, + self.SSH_SECRET)) + self.zero_secrets = repr(self._gen_data(self.MANY_RECORDS, False, + False, '')) + self.few_url = repr(self._gen_data(2, True, True, self.URL_SECRET)) + self.few_ssh = repr(self._gen_data(2, True, True, self.SSH_SECRET)) + + # create a temporary file for the test module + # we're about to generate + self.tmp_fd, self.tmp_path = tempfile.mkstemp() + os.write(self.tmp_fd, TEST_MODULE_DATA) + + # template the module code and eval it + module_data, module_style, shebang = modify_module(self.tmp_path, {}) + + d = {} + exec(module_data, d, d) + self.module = d['get_module']() + + # module_utils/basic.py screws with CWD, let's save it and reset + self.cwd = os.getcwd() + + def tearDown(self): + self.cleanup_temp_file(self.tmp_fd, self.tmp_path) + # Reset CWD back to what it was before basic.py changed it + os.chdir(self.cwd) + + + ################################################################################# + + # + # Speed tests + # + + # Previously, we used regexes which had some pathologically slow cases for + # parameters with large amounts of data with many ':' but no '@'. The + # present function gets slower when there are many replacements so we may + # want to explore regexes in the future (for the speed when substituting + # or flexibility). These speed tests will hopefully tell us if we're + # introducing code that has cases that are simply too slow. + # + # Some regex notes: + # * re.sub() is faster than re.match() + str.join(). + # * We may be able to detect a large number of '@' symbols and then use + # a regex else use the present function. + + #@timed(5) + #def test_log_sanitize_speed_many_url(self): + # heuristic_log_sanitize(self.many_url) + + #@timed(5) + #def test_log_sanitize_speed_many_ssh(self): + # heuristic_log_sanitize(self.many_ssh) + + #@timed(5) + #def test_log_sanitize_speed_one_url(self): + # heuristic_log_sanitize(self.one_url) + + #@timed(5) + #def test_log_sanitize_speed_one_ssh(self): + # heuristic_log_sanitize(self.one_ssh) + + #@timed(5) + #def test_log_sanitize_speed_zero_secrets(self): + # heuristic_log_sanitize(self.zero_secrets) + + # + # Test that the password obfuscation sanitizes somewhat cleanly. + # + + def test_log_sanitize_correctness(self): + url_data = repr(self._gen_data(3, True, True, self.URL_SECRET)) + ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET)) + + url_output = heuristic_log_sanitize(url_data) + ssh_output = heuristic_log_sanitize(ssh_data) + + # Basic functionality: Successfully hid the password + try: + self.assertNotIn('pas:word', url_output) + self.assertNotIn('pas:word', ssh_output) + + # Slightly more advanced, we hid all of the password despite the ":" + self.assertNotIn('pas', url_output) + self.assertNotIn('pas', ssh_output) + except AttributeError: + # python2.6 or less's unittest + self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output)) + self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output)) + + self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output)) + self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output)) + + # In this implementation we replace the password with 8 "*" which is + # also the length of our password. The url fields should be able to + # accurately detect where the password ends so the length should be + # the same: + self.assertEqual(len(url_output), len(url_data)) + + # ssh checking is harder as the heuristic is overzealous in many + # cases. Since the input will have at least one ":" present before + # the password we can tell some things about the beginning and end of + # the data, though: + self.assertTrue(ssh_output.startswith("{'")) + self.assertTrue(ssh_output.endswith("}")) + try: + self.assertIn(":********@foo.com/data'", ssh_output) + except AttributeError: + # python2.6 or less's unittest + self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output)) + + # The overzealous-ness here may lead to us changing the algorithm in + # the future. We could make it consume less of the data (with the + # possibility of leaving partial passwords exposed) and encourage + # people to use no_log instead of relying on this obfuscation. diff --git a/test/units/module_utils/test_database.py b/test/units/module_utils/test_database.py new file mode 100644 index 00000000000000..67da0b60e0bd03 --- /dev/null +++ b/test/units/module_utils/test_database.py @@ -0,0 +1,118 @@ +import collections +import mock +import os +import re + +from nose.tools import eq_ +try: + from nose.tools import assert_raises_regexp +except ImportError: + # Python < 2.7 + def assert_raises_regexp(expected, regexp, callable, *a, **kw): + try: + callable(*a, **kw) + except expected as e: + if isinstance(regexp, basestring): + regexp = re.compile(regexp) + if not regexp.search(str(e)): + raise Exception('"%s" does not match "%s"' % + (regexp.pattern, str(e))) + else: + if hasattr(expected,'__name__'): excName = expected.__name__ + else: excName = str(expected) + raise AssertionError("%s not raised" % excName) + +from ansible.module_utils.database import ( + pg_quote_identifier, + SQLParseError, +) + + +# Note: Using nose's generator test cases here so we can't inherit from +# unittest.TestCase +class TestQuotePgIdentifier(object): + + # These are all valid strings + # The results are based on interpreting the identifier as a table name + valid = { + # User quoted + '"public.table"': '"public.table"', + '"public"."table"': '"public"."table"', + '"schema test"."table test"': '"schema test"."table test"', + + # We quote part + 'public.table': '"public"."table"', + '"public".table': '"public"."table"', + 'public."table"': '"public"."table"', + 'schema test.table test': '"schema test"."table test"', + '"schema test".table test': '"schema test"."table test"', + 'schema test."table test"': '"schema test"."table test"', + + # Embedded double quotes + 'table "test"': '"table ""test"""', + 'public."table ""test"""': '"public"."table ""test"""', + 'public.table "test"': '"public"."table ""test"""', + 'schema "test".table': '"schema ""test"""."table"', + '"schema ""test""".table': '"schema ""test"""."table"', + '"""wat"""."""test"""': '"""wat"""."""test"""', + # Sigh, handle these as well: + '"no end quote': '"""no end quote"', + 'schema."table': '"schema"."""table"', + '"schema.table': '"""schema"."table"', + 'schema."table.something': '"schema"."""table"."something"', + + # Embedded dots + '"schema.test"."table.test"': '"schema.test"."table.test"', + '"schema.".table': '"schema."."table"', + '"schema."."table"': '"schema."."table"', + 'schema.".table"': '"schema".".table"', + '"schema".".table"': '"schema".".table"', + '"schema.".".table"': '"schema.".".table"', + # These are valid but maybe not what the user intended + '."table"': '".""table"""', + 'table.': '"table."', + } + + invalid = { + ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots', + ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots', + ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots', + ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots", + ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots", + ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots", + ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots", + ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes', + ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes', + ('"schema."table"','table'): 'User escaped identifiers must escape extra quotes', + ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot', + } + + def check_valid_quotes(self, identifier, quoted_identifier): + eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier) + + def test_valid_quotes(self): + for identifier in self.valid: + yield self.check_valid_quotes, identifier, self.valid[identifier] + + def check_invalid_quotes(self, identifier, id_type, msg): + assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type)) + + def test_invalid_quotes(self): + for test in self.invalid: + yield self.check_invalid_quotes, test[0], test[1], self.invalid[test] + + def test_how_many_dots(self): + eq_(pg_quote_identifier('role', 'role'), '"role"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role')) + + eq_(pg_quote_identifier('db', 'database'), '"db"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database')) + + eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema')) + + eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table')) + + eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"') + assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column')) From 8e2938c9974b2c15fc715684748b99d0e2d6a259 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 17 May 2015 09:21:46 -0700 Subject: [PATCH 0625/3617] Update core module ref for lineinfile fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 75790b6ebbc6ec..94246003d5672e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 75790b6ebbc6ec20e522be08eea2db300ee51240 +Subproject commit 94246003d5672e058605e6ba712f73db1011e5d3 From 8f71e47a73ad2be41a27e9a0a55a480e67389bd4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 17 May 2015 09:23:39 -0700 Subject: [PATCH 0626/3617] Update core and extras module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b92ed6e9da7784..71f16f5d418149 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b92ed6e9da7784743976ade2affef63c8ddfedaf +Subproject commit 71f16f5d418149057c85b34a2916d7421c7cc67c diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8c8a0e1b8dc4b5..d590de8c4ef976 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8c8a0e1b8dc4b51721b313fcabb9bb5bd8a6d26f +Subproject commit d590de8c4ef976d571264d6050b0abc59a82bde2 From 525de8b7cff46b1e31c0565cfbc3f51e3f1d9e5e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 17 May 2015 09:28:48 -0700 Subject: [PATCH 0627/3617] Fix codecs.escape_decode() usage --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 94246003d5672e..4b44aa479949bd 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 94246003d5672e058605e6ba712f73db1011e5d3 +Subproject commit 4b44aa479949bdbff554017edf22813572fd03ca diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 88eff11c048f88..d590de8c4ef976 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 88eff11c048f88ed9a49bf1f38a26493083d35a2 +Subproject commit d590de8c4ef976d571264d6050b0abc59a82bde2 From 9ef5d8da6e06c3f567f9833ca7ab9eafbc642f88 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 18 May 2015 08:17:29 -0400 Subject: [PATCH 0628/3617] added new rabbitmq modules --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1bfc7780e72061..425404cf15a2e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,9 @@ New Modules: * openstack: os_volume * pushover * pushbullet + * rabbitmq_binding + * rabbitmq_exchange + * rabbitmq_queue * zabbix_host * zabbix_hostmacro * zabbix_screen From d42cfb338609e3992e3f16c91e000e80b57a0aad Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 18 May 2015 08:57:22 -0400 Subject: [PATCH 0629/3617] added module checklist docs --- docsite/rst/developing_modules.rst | 38 ++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 3b563ee755f42f..44051d3c6890b1 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -449,6 +449,44 @@ a github pull request to the `extras use fail_json() from the module object +* Import custom packages in try/except and handled with fail_json() in main() e.g.:: + + try: + import foo + HAS_LIB=True + except: + HAS_LIB=False + +* Are module actions idempotent? If not document in the descriptions or the notes +* Import module snippets `from ansible.module_utils.basic import *` at the bottom, conserves line numbers for debugging. +* Try to normalize parameters with other modules, you can have aliases for when user is more familiar with underlying API name for the option +* Being pep8 compliant is nice, but not a requirement. Specifically, the 80 column limit now hinders readability more that it improves it +* Avoid '`action`/`command`', they are imperative and not declarative, there are other ways to express the same thing +* Sometimes you want to split the module, specially if you are adding a list/info state, you want a _facts version +* If you are asking 'how can i have a module execute other modules' ... you want to write a role + Deprecating and making module aliases `````````````````````````````````````` From 6c1e806a2f538d1fc4d18eb4ed4fcb2eeb887dcd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 18 May 2015 09:57:44 -0400 Subject: [PATCH 0630/3617] added return docs management --- docsite/rst/developing_modules.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 44051d3c6890b1..dd4d6b4d7ad312 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -466,12 +466,14 @@ Module checklist * Made use of U() for urls, C() for files and options, I() for params, M() for modules? * GPL License header * Examples: make sure they are reproducible + * Return: document the return structure of the module * Does module use check_mode? Could it be modified to use it? Document it * Exceptions: The module must handle them. (exceptions are bugs) * Give out useful messages on what you were doing and you can add the exception message to that. * Avoid catchall exceptions, they are not very useful unless the underlying API gives very good error messages pertaining the attempted action. * The module must not use sys.exit() --> use fail_json() from the module object * Import custom packages in try/except and handled with fail_json() in main() e.g.:: +* The return structure should be consistent, even if NA/None are used for keys normally returned under other options. try: import foo From 61110c08b891c5e701f20ba57e54edd2cdb6a05c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 18 May 2015 10:07:17 -0400 Subject: [PATCH 0631/3617] added ec2_win_password module --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 425404cf15a2e6..abe42602a6ba1e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Deprecated Modules (new ones in parens): New Modules: * find * ec2_ami_find + * ec2_win_password * circonus_annotation * consul * consul_acl From 684e30a5f4cd6e56a1531dd6652b33b1ed78e4bd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 18 May 2015 09:00:16 -0700 Subject: [PATCH 0632/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 71f16f5d418149..3dd0f2c40f9dbc 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 71f16f5d418149057c85b34a2916d7421c7cc67c +Subproject commit 3dd0f2c40f9dbc2311021e072a06671cd3da681a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index d590de8c4ef976..20bf6d825e807a 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit d590de8c4ef976d571264d6050b0abc59a82bde2 +Subproject commit 20bf6d825e807a590585f944c405d83c53704f43 From 5343c99cb10080ddb6f299610d8f92b0e16235f1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 18 May 2015 09:25:15 -0700 Subject: [PATCH 0633/3617] Update submodule pointers --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 4b44aa479949bd..627593b43a0dc3 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 4b44aa479949bdbff554017edf22813572fd03ca +Subproject commit 627593b43a0dc33050b2ede1efa9fa08080ebb92 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index d590de8c4ef976..20bf6d825e807a 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit d590de8c4ef976d571264d6050b0abc59a82bde2 +Subproject commit 20bf6d825e807a590585f944c405d83c53704f43 From e69c7f54747b23b133faf859eea0f8682632e96c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 18 May 2015 12:08:45 -0700 Subject: [PATCH 0634/3617] Update modules refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 627593b43a0dc3..81b476cd02ef53 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 627593b43a0dc33050b2ede1efa9fa08080ebb92 +Subproject commit 81b476cd02ef53a1e665a71bcd098463e1a4ead3 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 20bf6d825e807a..576d94e8d4fa8e 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 20bf6d825e807a590585f944c405d83c53704f43 +Subproject commit 576d94e8d4fa8e79216441efd65be62cfb0c603f From f083ca747acf1b5d79057d8cc61d440bf9029297 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 18 May 2015 12:46:31 -0700 Subject: [PATCH 0635/3617] Update submodule ref to fix postgres_user --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 81b476cd02ef53..7dd9f57e161b78 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 81b476cd02ef53a1e665a71bcd098463e1a4ead3 +Subproject commit 7dd9f57e161b78981eb797a4c77fd6e7042ad7fd From 2e07567c16bdd339f2305ee67e23ede60ba9a3ce Mon Sep 17 00:00:00 2001 From: Hugh Saunders Date: Fri, 27 Mar 2015 18:24:33 +0000 Subject: [PATCH 0636/3617] Retry exec command via ssh_retry This PR adds the option to retry failed ssh executions, if the failure is caused by ssh itself, not the remote command. This can be helpful if there are transient network issues. Retries are only implemented in the openssh connection plugin and are disabled by default. Retries are enabled by setting ssh_connection > retries to an integer greater than 0. Running a long series of playbooks, or a short playbook against a large cluster may result in transient ssh failures, some examples logged [here](https://trello.com/c/1yh6csEQ/13-ssh-errors). Ansible should be able to retry an ssh connection in order to survive transient failures. Ansible marks a host as failed the first time it fails to contact it. --- lib/ansible/constants.py | 2 + v1/ansible/runner/connection_plugins/ssh.py | 65 +++++++++++++++++---- 2 files changed, 57 insertions(+), 10 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index d24dc311a79a5a..9c1c820421a52b 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -195,7 +195,9 @@ def shell_expand_path(path): ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) +ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) + # obsolete -- will be formally removed ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) diff --git a/v1/ansible/runner/connection_plugins/ssh.py b/v1/ansible/runner/connection_plugins/ssh.py index 036175f6a9c3e2..ff7e8e03c874b7 100644 --- a/v1/ansible/runner/connection_plugins/ssh.py +++ b/v1/ansible/runner/connection_plugins/ssh.py @@ -16,21 +16,22 @@ # along with Ansible. If not, see . # +import fcntl +import gettext +import hmac import os -import re -import subprocess -import shlex import pipes +import pty +import pwd import random +import re import select -import fcntl -import hmac -import pwd -import gettext -import pty +import shlex +import subprocess +import time from hashlib import sha1 import ansible.constants as C -from ansible.callbacks import vvv +from ansible.callbacks import vvv, vv from ansible import errors from ansible import utils @@ -256,7 +257,51 @@ def not_in_host_file(self, host): vvv("EXEC previous known host file not found for %s" % host) return True - def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + def exec_command(self, *args, **kwargs): + """ Wrapper around _exec_command to retry in the case of an ssh + failure + + Will retry if: + * an exception is caught + * ssh returns 255 + + Will not retry if + * remaining_tries is <2 + * retries limit reached + """ + remaining_tries = C.get_config( + C.p, 'ssh_connection', 'retries', + 'ANSIBLE_SSH_RETRIES', 3, integer=True) + 1 + cmd_summary = "%s %s..." % (args[0], str(kwargs)[:200]) + for attempt in xrange(remaining_tries): + pause = 2 ** attempt - 1 + if pause > 30: + pause = 30 + time.sleep(pause) + try: + return_tuple = self._exec_command(*args, **kwargs) + except Exception as e: + msg = ("ssh_retry: attempt: %d, caught exception(%s) from cmd " + "(%s).") % (attempt, e, cmd_summary) + vv(msg) + if attempt == remaining_tries - 1: + raise e + else: + continue + # 0 = success + # 1-254 = remote command return code + # 255 = failure from the ssh command itself + if return_tuple[0] != 255: + break + else: + msg = ('ssh_retry: attempt: %d, ssh return code is 255. cmd ' + '(%s).') % (attempt, cmd_summary) + vv(msg) + + return return_tuple + + + def _exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: From 21fa385ce72d337434e462e33b4b9dcaecceda52 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 18 May 2015 17:26:59 -0700 Subject: [PATCH 0637/3617] Reorganizing plugin unit tests and adding start of strategy tests (v2) --- lib/ansible/plugins/strategies/__init__.py | 7 +- test/units/plugins/action/__init__.py | 21 +++ test/units/plugins/cache/__init__.py | 21 +++ test/units/plugins/{ => cache}/test_cache.py | 0 test/units/plugins/callback/__init__.py | 21 +++ test/units/plugins/connections/__init__.py | 21 +++ .../{ => connections}/test_connection.py | 0 test/units/plugins/filter/__init__.py | 21 +++ test/units/plugins/inventory/__init__.py | 21 +++ test/units/plugins/lookup/__init__.py | 21 +++ test/units/plugins/shell/__init__.py | 21 +++ test/units/plugins/strategies/__init__.py | 21 +++ .../plugins/strategies/test_strategy_base.py | 127 ++++++++++++++++++ test/units/plugins/vars/__init__.py | 21 +++ 14 files changed, 339 insertions(+), 5 deletions(-) create mode 100644 test/units/plugins/action/__init__.py create mode 100644 test/units/plugins/cache/__init__.py rename test/units/plugins/{ => cache}/test_cache.py (100%) create mode 100644 test/units/plugins/callback/__init__.py create mode 100644 test/units/plugins/connections/__init__.py rename test/units/plugins/{ => connections}/test_connection.py (100%) create mode 100644 test/units/plugins/filter/__init__.py create mode 100644 test/units/plugins/inventory/__init__.py create mode 100644 test/units/plugins/lookup/__init__.py create mode 100644 test/units/plugins/shell/__init__.py create mode 100644 test/units/plugins/strategies/__init__.py create mode 100644 test/units/plugins/strategies/test_strategy_base.py create mode 100644 test/units/plugins/vars/__init__.py diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index a3668ba089a8ab..7cc1709e08474d 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -61,7 +61,6 @@ def __init__(self, tqm): self._inventory = tqm.get_inventory() self._workers = tqm.get_workers() self._notified_handlers = tqm.get_notified_handlers() - #self._callback = tqm.get_callback() self._variable_manager = tqm.get_variable_manager() self._loader = tqm.get_loader() self._final_q = tqm._final_q @@ -80,8 +79,6 @@ def run(self, iterator, connection_info, result=True): num_failed = len(self._tqm._failed_hosts) num_unreachable = len(self._tqm._unreachable_hosts) - #debug("running the cleanup portion of the play") - #result &= self.cleanup(iterator, connection_info) debug("running handlers") result &= self.run_handlers(iterator, connection_info) @@ -99,6 +96,7 @@ def run(self, iterator, connection_info, result=True): return 0 def get_hosts_remaining(self, play): + print("inventory get hosts: %s" % self._inventory.get_hosts(play.hosts)) return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts] def get_failed_hosts(self, play): @@ -119,13 +117,12 @@ def _queue_task(self, host, task, task_vars, connection_info): if self._cur_worker >= len(self._workers): self._cur_worker = 0 - self._pending_results += 1 - # create a dummy object with plugin loaders set as an easier # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, shared_loader_obj), block=False) + self._pending_results += 1 except (EOFError, IOError, AssertionError) as e: # most likely an abort debug("got an error while queuing: %s" % e) diff --git a/test/units/plugins/action/__init__.py b/test/units/plugins/action/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/action/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/cache/__init__.py b/test/units/plugins/cache/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/cache/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/test_cache.py b/test/units/plugins/cache/test_cache.py similarity index 100% rename from test/units/plugins/test_cache.py rename to test/units/plugins/cache/test_cache.py diff --git a/test/units/plugins/callback/__init__.py b/test/units/plugins/callback/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/callback/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/connections/__init__.py b/test/units/plugins/connections/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/connections/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/test_connection.py b/test/units/plugins/connections/test_connection.py similarity index 100% rename from test/units/plugins/test_connection.py rename to test/units/plugins/connections/test_connection.py diff --git a/test/units/plugins/filter/__init__.py b/test/units/plugins/filter/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/filter/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/inventory/__init__.py b/test/units/plugins/inventory/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/inventory/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/lookup/__init__.py b/test/units/plugins/lookup/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/lookup/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/shell/__init__.py b/test/units/plugins/shell/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/shell/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/strategies/__init__.py b/test/units/plugins/strategies/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/strategies/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py new file mode 100644 index 00000000000000..36e22a9719e1f3 --- /dev/null +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -0,0 +1,127 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.plugins.strategies import StrategyBase +from ansible.executor.task_queue_manager import TaskQueueManager + +from units.mock.loader import DictDataLoader + +class TestVariableManager(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_strategy_base_init(self): + mock_tqm = MagicMock(TaskQueueManager) + mock_tqm._final_q = MagicMock() + strategy_base = StrategyBase(tqm=mock_tqm) + + def test_strategy_base_run(self): + mock_tqm = MagicMock(TaskQueueManager) + mock_tqm._final_q = MagicMock() + mock_tqm._stats = MagicMock() + mock_tqm.send_callback.return_value = None + + mock_iterator = MagicMock() + mock_iterator._play = MagicMock() + mock_iterator._play.handlers = [] + + mock_conn_info = MagicMock() + + mock_tqm._failed_hosts = [] + mock_tqm._unreachable_hosts = [] + strategy_base = StrategyBase(tqm=mock_tqm) + + self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info), 0) + self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 1) + mock_tqm._failed_hosts = ["host1"] + self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 2) + mock_tqm._unreachable_hosts = ["host1"] + self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 3) + + def test_strategy_base_get_hosts(self): + mock_hosts = [] + for i in range(0, 5): + mock_host = MagicMock() + mock_host.name = "host%02d" % (i+1) + mock_hosts.append(mock_host) + + mock_inventory = MagicMock() + mock_inventory.get_hosts.return_value = mock_hosts + + mock_tqm = MagicMock() + mock_tqm._final_q = MagicMock() + mock_tqm.get_inventory.return_value = mock_inventory + + mock_play = MagicMock() + mock_play.hosts = ["host%02d" % (i+1) for i in range(0, 5)] + + strategy_base = StrategyBase(tqm=mock_tqm) + + mock_tqm._failed_hosts = [] + mock_tqm._unreachable_hosts = [] + self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts) + + mock_tqm._failed_hosts = ["host01"] + self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[1:]) + self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0]]) + + mock_tqm._unreachable_hosts = ["host02"] + self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:]) + + def test_strategy_base_queue_task(self): + fake_loader = DictDataLoader() + + workers = [] + for i in range(0, 3): + worker_main_q = MagicMock() + worker_main_q.put.return_value = None + worker_result_q = MagicMock() + workers.append([i, worker_main_q, worker_result_q]) + + mock_tqm = MagicMock() + mock_tqm._final_q = MagicMock() + mock_tqm.get_workers.return_value = workers + mock_tqm.get_loader.return_value = fake_loader + + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base._cur_worker = 0 + strategy_base._pending_results = 0 + strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 1) + self.assertEqual(strategy_base._pending_results, 1) + strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 2) + self.assertEqual(strategy_base._pending_results, 2) + strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 0) + self.assertEqual(strategy_base._pending_results, 3) + workers[0][1].put.side_effect = EOFError + strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), connection_info=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 1) + self.assertEqual(strategy_base._pending_results, 3) + diff --git a/test/units/plugins/vars/__init__.py b/test/units/plugins/vars/__init__.py new file mode 100644 index 00000000000000..785fc4599219a8 --- /dev/null +++ b/test/units/plugins/vars/__init__.py @@ -0,0 +1,21 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + From 3d816402ba2ab84aae818b788e3ad174f7bfb9c4 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 18 May 2015 22:44:29 +0200 Subject: [PATCH 0638/3617] cloudstack: add tests for cs_firewall --- test/integration/cloudstack.yml | 1 + .../roles/test_cs_firewall/defaults/main.yml | 3 + .../roles/test_cs_firewall/meta/main.yml | 3 + .../roles/test_cs_firewall/tasks/main.yml | 271 ++++++++++++++++++ 4 files changed, 278 insertions(+) create mode 100644 test/integration/roles/test_cs_firewall/defaults/main.yml create mode 100644 test/integration/roles/test_cs_firewall/meta/main.yml create mode 100644 test/integration/roles/test_cs_firewall/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 7cdf593a8c7c4d..546c6fa8064a3e 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -12,3 +12,4 @@ - { role: test_cs_instance, tags: test_cs_instance } - { role: test_cs_instancegroup, tags: test_cs_instancegroup } - { role: test_cs_account, tags: test_cs_account } + - { role: test_cs_firewall, tags: test_cs_firewall } diff --git a/test/integration/roles/test_cs_firewall/defaults/main.yml b/test/integration/roles/test_cs_firewall/defaults/main.yml new file mode 100644 index 00000000000000..4aa4fe846f0b83 --- /dev/null +++ b/test/integration/roles/test_cs_firewall/defaults/main.yml @@ -0,0 +1,3 @@ +--- +cs_firewall_ip_address: 10.100.212.5 +cs_firewall_network: test diff --git a/test/integration/roles/test_cs_firewall/meta/main.yml b/test/integration/roles/test_cs_firewall/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_firewall/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_firewall/tasks/main.yml b/test/integration/roles/test_cs_firewall/tasks/main.yml new file mode 100644 index 00000000000000..5482ce44978119 --- /dev/null +++ b/test/integration/roles/test_cs_firewall/tasks/main.yml @@ -0,0 +1,271 @@ +--- +- name: setup 80 + cs_firewall: + port: 80 + ip_address: "{{ cs_firewall_ip_address }}" + state: absent + register: fw +- name: verify setup + assert: + that: + - fw|success + +- name: setup 5300 + cs_firewall: + ip_address: "{{ cs_firewall_ip_address }}" + protocol: udp + start_port: 5300 + end_port: 5333 + cidr: 1.2.3.4/24 + state: absent + register: fw +- name: verify setup + assert: + that: + - fw|success + +- name: setup all + cs_firewall: + network: "{{ cs_firewall_network }}" + protocol: all + type: egress + state: absent + register: fw +- name: verify setup + assert: + that: + - fw|success + +- name: test fail if missing params + action: cs_firewall + register: fw + ignore_errors: true +- name: verify results of fail if missing params + assert: + that: + - fw|failed + - fw.msg == "missing required argument for protocol 'tcp': start_port or end_port" + +- name: test fail if missing params ip_address ingress + cs_firewall: + port: 80 + register: fw + ignore_errors: true +- name: verify results of fail if missing params ip_address + assert: + that: + - fw|failed + - fw.msg == "missing required argument for type ingress: ip_address" + +- name: test fail if missing params network egress + cs_firewall: + type: egress + register: fw + ignore_errors: true +- name: verify results of fail if missing params ip_address + assert: + that: + - fw|failed + - fw.msg == "missing required argument for type egress: network" + +- name: test present firewall rule ingress 80 + cs_firewall: + port: 80 + ip_address: "{{ cs_firewall_ip_address }}" + register: fw +- name: verify results of present firewall rule ingress 80 + assert: + that: + - fw|success + - fw|changed + - fw.cidr == "0.0.0.0/0" + - fw.ip_address == "{{ cs_firewall_ip_address }}" + - fw.protocol == "tcp" + - fw.start_port == 80 + - fw.end_port == 80 + - fw.type == "ingress" + +- name: test present firewall rule ingress 80 idempotence + cs_firewall: + port: 80 + ip_address: "{{ cs_firewall_ip_address }}" + register: fw +- name: verify results of present firewall rule ingress 80 idempotence + assert: + that: + - fw|success + - not fw|changed + - fw.cidr == "0.0.0.0/0" + - fw.ip_address == "{{ cs_firewall_ip_address }}" + - fw.protocol == "tcp" + - fw.start_port == 80 + - fw.end_port == 80 + - fw.type == "ingress" + +- name: test present firewall rule ingress 5300 + cs_firewall: + ip_address: "{{ cs_firewall_ip_address }}" + protocol: udp + start_port: 5300 + end_port: 5333 + cidr: 1.2.3.4/24 + register: fw +- name: verify results of present firewall rule ingress 5300 + assert: + that: + - fw|success + - fw|changed + - fw.cidr == "1.2.3.4/24" + - fw.ip_address == "{{ cs_firewall_ip_address }}" + - fw.protocol == "udp" + - fw.start_port == 5300 + - fw.end_port == 5333 + - fw.type == "ingress" + +- name: test present firewall rule ingress 5300 idempotence + cs_firewall: + ip_address: "{{ cs_firewall_ip_address }}" + protocol: udp + start_port: 5300 + end_port: 5333 + cidr: 1.2.3.4/24 + register: fw +- name: verify results of present firewall rule ingress 5300 idempotence + assert: + that: + - fw|success + - not fw|changed + - fw.cidr == "1.2.3.4/24" + - fw.ip_address == "{{ cs_firewall_ip_address }}" + - fw.protocol == "udp" + - fw.start_port == 5300 + - fw.end_port == 5333 + - fw.type == "ingress" + +- name: test present firewall rule egress all + cs_firewall: + network: "{{ cs_firewall_network }}" + protocol: all + type: egress + register: fw +- name: verify results of present firewall rule egress all + assert: + that: + - fw|success + - fw|changed + - fw.cidr == "0.0.0.0/0" + - fw.network == "{{ cs_firewall_network }}" + - fw.protocol == "all" + - fw.type == "egress" + +- name: test present firewall rule egress all idempotence + cs_firewall: + network: "{{ cs_firewall_network }}" + protocol: all + type: egress + register: fw +- name: verify results of present firewall rule egress all idempotence + assert: + that: + - fw|success + - not fw|changed + - fw.cidr == "0.0.0.0/0" + - fw.network == "{{ cs_firewall_network }}" + - fw.protocol == "all" + - fw.type == "egress" + +- name: test absent firewall rule ingress 80 + cs_firewall: + port: 80 + ip_address: "{{ cs_firewall_ip_address }}" + state: absent + register: fw +- name: verify results of absent firewall rule ingress 80 + assert: + that: + - fw|success + - fw|changed + - fw.cidr == "0.0.0.0/0" + - fw.ip_address == "{{ cs_firewall_ip_address }}" + - fw.protocol == "tcp" + - fw.start_port == 80 + - fw.end_port == 80 + - fw.type == "ingress" + +- name: test absent firewall rule ingress 80 idempotence + cs_firewall: + port: 80 + ip_address: "{{ cs_firewall_ip_address }}" + state: absent + register: fw +- name: verify results of absent firewall rule ingress 80 idempotence + assert: + that: + - fw|success + - not fw|changed + +- name: test absent firewall rule ingress 5300 + cs_firewall: + ip_address: "{{ cs_firewall_ip_address }}" + protocol: udp + start_port: 5300 + end_port: 5333 + cidr: 1.2.3.4/24 + state: absent + register: fw +- name: verify results of absent firewall rule ingress 5300 + assert: + that: + - fw|success + - fw|changed + - fw.cidr == "1.2.3.4/24" + - fw.ip_address == "{{ cs_firewall_ip_address }}" + - fw.protocol == "udp" + - fw.start_port == 5300 + - fw.end_port == 5333 + - fw.type == "ingress" + +- name: test absent firewall rule ingress 5300 idempotence + cs_firewall: + ip_address: "{{ cs_firewall_ip_address }}" + protocol: udp + start_port: 5300 + end_port: 5333 + cidr: 1.2.3.4/24 + state: absent + register: fw +- name: verify results of absent firewall rule ingress 5300 idempotence + assert: + that: + - fw|success + - not fw|changed + +- name: test absent firewall rule egress all + cs_firewall: + network: "{{ cs_firewall_network }}" + protocol: all + type: egress + state: absent + register: fw +- name: verify results of absent firewall rule egress all + assert: + that: + - fw|success + - fw|changed + - fw.cidr == "0.0.0.0/0" + - fw.network == "{{ cs_firewall_network }}" + - fw.protocol == "all" + - fw.type == "egress" + +- name: test absent firewall rule egress all idempotence + cs_firewall: + network: "{{ cs_firewall_network }}" + protocol: all + type: egress + state: absent + register: fw +- name: verify results of absent firewall rule egress all idempotence + assert: + that: + - fw|success + - not fw|changed From 3916dc8f9e2d01f75c5d81af9efecb7348291616 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 19 May 2015 10:11:55 +0200 Subject: [PATCH 0639/3617] cloudstack: add tests for cs_portforward --- test/integration/cloudstack.yml | 1 + .../test_cs_portforward/defaults/main.yml | 3 + .../roles/test_cs_portforward/meta/main.yml | 3 + .../roles/test_cs_portforward/tasks/main.yml | 111 ++++++++++++++++++ 4 files changed, 118 insertions(+) create mode 100644 test/integration/roles/test_cs_portforward/defaults/main.yml create mode 100644 test/integration/roles/test_cs_portforward/meta/main.yml create mode 100644 test/integration/roles/test_cs_portforward/tasks/main.yml diff --git a/test/integration/cloudstack.yml b/test/integration/cloudstack.yml index 7cdf593a8c7c4d..7eff30d22f52d6 100644 --- a/test/integration/cloudstack.yml +++ b/test/integration/cloudstack.yml @@ -11,4 +11,5 @@ - { role: test_cs_securitygroup_rule, tags: test_cs_securitygroup_rule } - { role: test_cs_instance, tags: test_cs_instance } - { role: test_cs_instancegroup, tags: test_cs_instancegroup } + - { role: test_cs_portforward, tags: test_cs_portforward } - { role: test_cs_account, tags: test_cs_account } diff --git a/test/integration/roles/test_cs_portforward/defaults/main.yml b/test/integration/roles/test_cs_portforward/defaults/main.yml new file mode 100644 index 00000000000000..f4083ed220a0b2 --- /dev/null +++ b/test/integration/roles/test_cs_portforward/defaults/main.yml @@ -0,0 +1,3 @@ +--- +cs_portforward_public_ip: "10.100.212.5" +cs_portforward_vm: "{{ cs_resource_prefix }}-vm" diff --git a/test/integration/roles/test_cs_portforward/meta/main.yml b/test/integration/roles/test_cs_portforward/meta/main.yml new file mode 100644 index 00000000000000..03e38bd4f7afd2 --- /dev/null +++ b/test/integration/roles/test_cs_portforward/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - test_cs_common diff --git a/test/integration/roles/test_cs_portforward/tasks/main.yml b/test/integration/roles/test_cs_portforward/tasks/main.yml new file mode 100644 index 00000000000000..02326ec13bd684 --- /dev/null +++ b/test/integration/roles/test_cs_portforward/tasks/main.yml @@ -0,0 +1,111 @@ +--- +- name: setup + cs_portforward: + ip_address: "{{ cs_portforward_public_ip }}" + public_port: 80 + private_port: 8080 + state: absent + register: pf +- name: verify setup + assert: + that: + - pf|success + +- name: test fail if missing params + action: cs_portforward + register: pf + ignore_errors: true +- name: verify results of fail if missing params + assert: + that: + - pf|failed + - 'pf.msg == "missing required arguments: private_port,ip_address,public_port"' + +- name: test present port forwarding + cs_portforward: + ip_address: "{{ cs_portforward_public_ip }}" + public_port: 80 + vm: "{{ cs_portforward_vm }}" + private_port: 8080 + register: pf +- name: verify results of present port forwarding + assert: + that: + - pf|success + - pf|changed + - pf.vm_name == "{{ cs_portforward_vm }}" + - pf.ip_address == "{{ cs_portforward_public_ip }}" + - pf.public_port == 80 + - pf.public_end_port == 80 + - pf.private_port == 8080 + - pf.private_end_port == 8080 + +- name: test present port forwarding idempotence + cs_portforward: + ip_address: "{{ cs_portforward_public_ip }}" + public_port: 80 + vm: "{{ cs_portforward_vm }}" + private_port: 8080 + register: pf +- name: verify results of present port forwarding idempotence + assert: + that: + - pf|success + - not pf|changed + - pf.vm_name == "{{ cs_portforward_vm }}" + - pf.ip_address == "{{ cs_portforward_public_ip }}" + - pf.public_port == 80 + - pf.public_end_port == 80 + - pf.private_port == 8080 + - pf.private_end_port == 8080 + +- name: test change port forwarding + cs_portforward: + ip_address: "{{ cs_portforward_public_ip }}" + public_port: 80 + vm: "{{ cs_portforward_vm }}" + private_port: 8888 + register: pf +- name: verify results of change port forwarding + assert: + that: + - pf|success + - pf|changed + - pf.vm_name == "{{ cs_portforward_vm }}" + - pf.ip_address == "{{ cs_portforward_public_ip }}" + - pf.public_port == 80 + - pf.public_end_port == 80 + - pf.private_port == 8888 + - pf.private_end_port == 8888 + +- name: test absent port forwarding + cs_portforward: + ip_address: "{{ cs_portforward_public_ip }}" + public_port: 80 + private_port: 8888 + state: absent + register: pf +- name: verify results of absent port forwarding + assert: + that: + - pf|success + - pf|changed + - pf.vm_name == "{{ cs_portforward_vm }}" + - pf.ip_address == "{{ cs_portforward_public_ip }}" + - pf.public_port == 80 + - pf.public_end_port == 80 + - pf.private_port == 8888 + - pf.private_end_port == 8888 + +- name: test absent port forwarding idempotence + cs_portforward: + ip_address: "{{ cs_portforward_public_ip }}" + public_port: 80 + private_port: 8888 + state: absent + register: pf +- name: verify results of absent port forwarding idempotence + assert: + that: + - pf|success + - not pf|changed From da6d15d1f951155111cccba29b72700bca5613f8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 19 May 2015 10:45:48 -0400 Subject: [PATCH 0640/3617] removed empty choices from files --- lib/ansible/utils/module_docs_fragments/files.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py index adff1f2f1bf2a8..5087c0cf5081b7 100644 --- a/lib/ansible/utils/module_docs_fragments/files.py +++ b/lib/ansible/utils/module_docs_fragments/files.py @@ -24,25 +24,21 @@ class ModuleDocFragment(object): mode: required: false default: null - choices: [] description: - mode the file or directory should be, such as 0644 as would be fed to I(chmod). As of version 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)). owner: required: false default: null - choices: [] description: - name of the user that should own the file/directory, as would be fed to I(chown) group: required: false default: null - choices: [] description: - name of the group that should own the file/directory, as would be fed to I(chown) seuser: required: false default: null - choices: [] description: - user part of SELinux file context. Will default to system policy, if applicable. If set to C(_default), it will use the C(user) portion of the @@ -50,19 +46,16 @@ class ModuleDocFragment(object): serole: required: false default: null - choices: [] description: - role part of SELinux file context, C(_default) feature works as for I(seuser). setype: required: false default: null - choices: [] description: - type part of SELinux file context, C(_default) feature works as for I(seuser). selevel: required: false default: "s0" - choices: [] description: - level part of the SELinux file context. This is the MLS/MCS attribute, sometimes known as the C(range). C(_default) feature works as for From 9a88e0fc8e0ba40cf60cb6d1e021e2080863df19 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 19 May 2015 10:45:48 -0400 Subject: [PATCH 0641/3617] removed empty choices from files --- lib/ansible/utils/module_docs_fragments/files.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py index adff1f2f1bf2a8..5087c0cf5081b7 100644 --- a/lib/ansible/utils/module_docs_fragments/files.py +++ b/lib/ansible/utils/module_docs_fragments/files.py @@ -24,25 +24,21 @@ class ModuleDocFragment(object): mode: required: false default: null - choices: [] description: - mode the file or directory should be, such as 0644 as would be fed to I(chmod). As of version 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)). owner: required: false default: null - choices: [] description: - name of the user that should own the file/directory, as would be fed to I(chown) group: required: false default: null - choices: [] description: - name of the group that should own the file/directory, as would be fed to I(chown) seuser: required: false default: null - choices: [] description: - user part of SELinux file context. Will default to system policy, if applicable. If set to C(_default), it will use the C(user) portion of the @@ -50,19 +46,16 @@ class ModuleDocFragment(object): serole: required: false default: null - choices: [] description: - role part of SELinux file context, C(_default) feature works as for I(seuser). setype: required: false default: null - choices: [] description: - type part of SELinux file context, C(_default) feature works as for I(seuser). selevel: required: false default: "s0" - choices: [] description: - level part of the SELinux file context. This is the MLS/MCS attribute, sometimes known as the C(range). C(_default) feature works as for From 8f29ca23ae3880d925a39b900e8e56f1d6b4d268 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 19 May 2015 17:34:39 +0200 Subject: [PATCH 0642/3617] basic: fix ValueError if value of a type='int' is not an int With this fix, we get a friendly error message: failed: [localhost] => {"failed": true} msg: value of argument start_port is not of type int and we were unable to automatically convert --- lib/ansible/module_utils/basic.py | 101 +++++++++++++++--------------- 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 0c2e57f81a6d03..2116850e2bba7a 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1015,57 +1015,60 @@ def _check_argument_types(self): value = self.params[k] is_invalid = False - if wanted == 'str': - if not isinstance(value, basestring): - self.params[k] = str(value) - elif wanted == 'list': - if not isinstance(value, list): - if isinstance(value, basestring): - self.params[k] = value.split(",") - elif isinstance(value, int) or isinstance(value, float): - self.params[k] = [ str(value) ] - else: - is_invalid = True - elif wanted == 'dict': - if not isinstance(value, dict): - if isinstance(value, basestring): - if value.startswith("{"): - try: - self.params[k] = json.loads(value) - except: - (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) - if exc is not None: - self.fail_json(msg="unable to evaluate dictionary for %s" % k) - self.params[k] = result - elif '=' in value: - self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")]) + try: + if wanted == 'str': + if not isinstance(value, basestring): + self.params[k] = str(value) + elif wanted == 'list': + if not isinstance(value, list): + if isinstance(value, basestring): + self.params[k] = value.split(",") + elif isinstance(value, int) or isinstance(value, float): + self.params[k] = [ str(value) ] else: - self.fail_json(msg="dictionary requested, could not parse JSON or key=value") - else: - is_invalid = True - elif wanted == 'bool': - if not isinstance(value, bool): - if isinstance(value, basestring): - self.params[k] = self.boolean(value) - else: - is_invalid = True - elif wanted == 'int': - if not isinstance(value, int): - if isinstance(value, basestring): - self.params[k] = int(value) - else: - is_invalid = True - elif wanted == 'float': - if not isinstance(value, float): - if isinstance(value, basestring): - self.params[k] = float(value) - else: - is_invalid = True - else: - self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) + is_invalid = True + elif wanted == 'dict': + if not isinstance(value, dict): + if isinstance(value, basestring): + if value.startswith("{"): + try: + self.params[k] = json.loads(value) + except: + (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) + if exc is not None: + self.fail_json(msg="unable to evaluate dictionary for %s" % k) + self.params[k] = result + elif '=' in value: + self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")]) + else: + self.fail_json(msg="dictionary requested, could not parse JSON or key=value") + else: + is_invalid = True + elif wanted == 'bool': + if not isinstance(value, bool): + if isinstance(value, basestring): + self.params[k] = self.boolean(value) + else: + is_invalid = True + elif wanted == 'int': + if not isinstance(value, int): + if isinstance(value, basestring): + self.params[k] = int(value) + else: + is_invalid = True + elif wanted == 'float': + if not isinstance(value, float): + if isinstance(value, basestring): + self.params[k] = float(value) + else: + is_invalid = True + else: + self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) - if is_invalid: - self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) + if is_invalid: + self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) + except ValueError, e: + self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted)) def _set_defaults(self, pre=True): for (k,v) in self.argument_spec.iteritems(): From 8da580a29c0722e6c939677e155e9780a3fac821 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 19 May 2015 17:34:39 +0200 Subject: [PATCH 0643/3617] basic: fix ValueError if value of a type='int' is not an int With this fix, we get a friendly error message: failed: [localhost] => {"failed": true} msg: value of argument start_port is not of type int and we were unable to automatically convert --- lib/ansible/module_utils/basic.py | 101 +++++++++++++++--------------- 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 1f0abb177643dd..237cb5b106ca73 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1016,57 +1016,60 @@ def _check_argument_types(self): value = self.params[k] is_invalid = False - if wanted == 'str': - if not isinstance(value, basestring): - self.params[k] = str(value) - elif wanted == 'list': - if not isinstance(value, list): - if isinstance(value, basestring): - self.params[k] = value.split(",") - elif isinstance(value, int) or isinstance(value, float): - self.params[k] = [ str(value) ] - else: - is_invalid = True - elif wanted == 'dict': - if not isinstance(value, dict): - if isinstance(value, basestring): - if value.startswith("{"): - try: - self.params[k] = json.loads(value) - except: - (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) - if exc is not None: - self.fail_json(msg="unable to evaluate dictionary for %s" % k) - self.params[k] = result - elif '=' in value: - self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")]) + try: + if wanted == 'str': + if not isinstance(value, basestring): + self.params[k] = str(value) + elif wanted == 'list': + if not isinstance(value, list): + if isinstance(value, basestring): + self.params[k] = value.split(",") + elif isinstance(value, int) or isinstance(value, float): + self.params[k] = [ str(value) ] else: - self.fail_json(msg="dictionary requested, could not parse JSON or key=value") - else: - is_invalid = True - elif wanted == 'bool': - if not isinstance(value, bool): - if isinstance(value, basestring): - self.params[k] = self.boolean(value) - else: - is_invalid = True - elif wanted == 'int': - if not isinstance(value, int): - if isinstance(value, basestring): - self.params[k] = int(value) - else: - is_invalid = True - elif wanted == 'float': - if not isinstance(value, float): - if isinstance(value, basestring): - self.params[k] = float(value) - else: - is_invalid = True - else: - self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) + is_invalid = True + elif wanted == 'dict': + if not isinstance(value, dict): + if isinstance(value, basestring): + if value.startswith("{"): + try: + self.params[k] = json.loads(value) + except: + (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) + if exc is not None: + self.fail_json(msg="unable to evaluate dictionary for %s" % k) + self.params[k] = result + elif '=' in value: + self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")]) + else: + self.fail_json(msg="dictionary requested, could not parse JSON or key=value") + else: + is_invalid = True + elif wanted == 'bool': + if not isinstance(value, bool): + if isinstance(value, basestring): + self.params[k] = self.boolean(value) + else: + is_invalid = True + elif wanted == 'int': + if not isinstance(value, int): + if isinstance(value, basestring): + self.params[k] = int(value) + else: + is_invalid = True + elif wanted == 'float': + if not isinstance(value, float): + if isinstance(value, basestring): + self.params[k] = float(value) + else: + is_invalid = True + else: + self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) - if is_invalid: - self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) + if is_invalid: + self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) + except ValueError, e: + self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted)) def _set_defaults(self, pre=True): for (k,v) in self.argument_spec.iteritems(): From b93674b3807cfae097ce2156344d26f38db2f535 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 14:18:47 -0700 Subject: [PATCH 0644/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7dd9f57e161b78..0c04a54f67d4d5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7dd9f57e161b78981eb797a4c77fd6e7042ad7fd +Subproject commit 0c04a54f67d4d5fea16b5ea2cc3d56fe98a68dfe diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 576d94e8d4fa8e..fefbf7c41a0b24 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 576d94e8d4fa8e79216441efd65be62cfb0c603f +Subproject commit fefbf7c41a0b24097e9696aafcb57154eee6665b From 73804b375e9124f8b98b214e8f5c2b3698fc5647 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 14:24:16 -0700 Subject: [PATCH 0645/3617] Update submodule ref for core doc update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0c04a54f67d4d5..8d2fdf2aff1106 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0c04a54f67d4d5fea16b5ea2cc3d56fe98a68dfe +Subproject commit 8d2fdf2aff1106fab5a8a9d17719383c5714efe8 From b48be7c484a723fdd73f08e6bb5d725b24eeea02 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 14:27:54 -0700 Subject: [PATCH 0646/3617] Update submodule refs for v2 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 3dd0f2c40f9dbc..c935d4dc08949d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 3dd0f2c40f9dbc2311021e072a06671cd3da681a +Subproject commit c935d4dc08949df92fd08c28caf6419687f21df8 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 20bf6d825e807a..fefbf7c41a0b24 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 20bf6d825e807a590585f944c405d83c53704f43 +Subproject commit fefbf7c41a0b24097e9696aafcb57154eee6665b From 0bb4101842fda6392cf4ad97ee2fa1335532cdb5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 15:13:09 -0700 Subject: [PATCH 0647/3617] Fix doc formatting --- docsite/rst/developing_modules.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index dd4d6b4d7ad312..0748a82effa66e 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -473,14 +473,14 @@ Module checklist * Avoid catchall exceptions, they are not very useful unless the underlying API gives very good error messages pertaining the attempted action. * The module must not use sys.exit() --> use fail_json() from the module object * Import custom packages in try/except and handled with fail_json() in main() e.g.:: -* The return structure should be consistent, even if NA/None are used for keys normally returned under other options. - try: - import foo - HAS_LIB=True - except: - HAS_LIB=False + try: + import foo + HAS_LIB=True + except: + HAS_LIB=False +* The return structure should be consistent, even if NA/None are used for keys normally returned under other options. * Are module actions idempotent? If not document in the descriptions or the notes * Import module snippets `from ansible.module_utils.basic import *` at the bottom, conserves line numbers for debugging. * Try to normalize parameters with other modules, you can have aliases for when user is more familiar with underlying API name for the option From 2180981a6e56c06d37d83e73bf81c40ffad505f3 Mon Sep 17 00:00:00 2001 From: Jeremy Olexa Date: Tue, 19 May 2015 22:18:24 -0500 Subject: [PATCH 0648/3617] Minor Fix for broken link --- docsite/rst/intro_windows.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index b675cd77d9d683..5dd9ad5d1d0f85 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -129,7 +129,7 @@ Note there are a few other Ansible modules that don't start with "win" that also Developers: Supported modules and how it works `````````````````````````````````````````````` -Developing ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix. +Developing ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix. What if you want to write Windows modules for ansible though? For Windows, ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. From 96759cda82273953553732c6b6c2ef8c851da2e6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 13 Feb 2015 10:39:10 -0500 Subject: [PATCH 0649/3617] Add deprecation notices to the old nova inventory --- plugins/inventory/nova.ini | 3 +++ plugins/inventory/nova.py | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/plugins/inventory/nova.ini b/plugins/inventory/nova.ini index 4900c49651603b..c5cfeef8104efc 100644 --- a/plugins/inventory/nova.ini +++ b/plugins/inventory/nova.ini @@ -1,4 +1,7 @@ # Ansible OpenStack external inventory script +# DEPRECATED: please use openstack.py inventory which is configured for +# auth using the os-client-config library and either clouds.yaml or standard +# openstack environment variables [openstack] diff --git a/plugins/inventory/nova.py b/plugins/inventory/nova.py index 7e58390ee1a147..af2e7a0760a71a 100644 --- a/plugins/inventory/nova.py +++ b/plugins/inventory/nova.py @@ -17,6 +17,10 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# WARNING: This file is deprecated. New work should focus on the openstack.py +# inventory module, which properly handles multiple clouds as well as keystone +# v3 and keystone auth plugins + import sys import re import os @@ -28,6 +32,9 @@ except ImportError: import simplejson as json + +sys.stderr.write("WARNING: this inventory module is deprecated. please migrate usage to openstack.py\n") + ################################################### # executed with no parameters, return the list of # all groups and hosts From 3b5a3aa80a118387cd6af0161cf957f060813873 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 20 May 2015 17:58:40 -0700 Subject: [PATCH 0650/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8d2fdf2aff1106..e591763d624ab5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8d2fdf2aff1106fab5a8a9d17719383c5714efe8 +Subproject commit e591763d624ab5d456bbd2cf97bd84466cbc5988 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index fefbf7c41a0b24..8fb19f0e47b699 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit fefbf7c41a0b24097e9696aafcb57154eee6665b +Subproject commit 8fb19f0e47b6992db89adcaade7f38225c552107 From cc51e6b7c217816836901aa312195de80ba4c9fb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 20 May 2015 18:12:09 -0700 Subject: [PATCH 0651/3617] Update submodule refs in v2 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c935d4dc08949d..cbbe4196bdb047 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c935d4dc08949df92fd08c28caf6419687f21df8 +Subproject commit cbbe4196bdb047a2d8e9f1132519a0de55fa0c5a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index fefbf7c41a0b24..8fb19f0e47b699 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit fefbf7c41a0b24097e9696aafcb57154eee6665b +Subproject commit 8fb19f0e47b6992db89adcaade7f38225c552107 From 9921a1d2be0a254fe17e40d925a3fe36399e2f87 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 21 May 2015 02:03:38 -0500 Subject: [PATCH 0652/3617] Unit tests for base strategy class (v2) --- lib/ansible/plugins/strategies/__init__.py | 59 ----- .../plugins/strategies/test_strategy_base.py | 230 +++++++++++++++++- 2 files changed, 229 insertions(+), 60 deletions(-) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 7cc1709e08474d..e933ca73d4c580 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -236,8 +236,6 @@ def _wait_on_pending_results(self, iterator): debug("waiting for pending results (%d left)" % self._pending_results) results = self._process_pending_results(iterator) ret_results.extend(results) - if self._tqm._terminated: - break time.sleep(0.01) return ret_results @@ -336,63 +334,6 @@ def _load_included_file(self, included_file): return block_list - def cleanup(self, iterator, connection_info): - ''' - Iterates through failed hosts and runs any outstanding rescue/always blocks - and handlers which may still need to be run after a failure. - ''' - - debug("in cleanup") - result = True - - debug("getting failed hosts") - failed_hosts = self.get_failed_hosts(iterator._play) - if len(failed_hosts) == 0: - debug("there are no failed hosts") - return result - - debug("marking hosts failed in the iterator") - # mark the host as failed in the iterator so it will take - # any required rescue paths which may be outstanding - for host in failed_hosts: - iterator.mark_host_failed(host) - - debug("clearing the failed hosts list") - # clear the failed hosts dictionary now while also - for entry in self._tqm._failed_hosts.keys(): - del self._tqm._failed_hosts[entry] - - work_to_do = True - while work_to_do: - work_to_do = False - for host in failed_hosts: - host_name = host.name - - if host_name in self._tqm._failed_hosts: - iterator.mark_host_failed(host) - del self._tqm._failed_hosts[host_name] - - if host_name in self._blocked_hosts: - work_to_do = True - continue - elif iterator.get_next_task_for_host(host, peek=True) and host_name not in self._tqm._unreachable_hosts: - work_to_do = True - - # pop the task, mark the host blocked, and queue it - self._blocked_hosts[host_name] = True - task = iterator.get_next_task_for_host(host) - task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) - self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task) - self._queue_task(host, task, task_vars, connection_info) - - self._process_pending_results(iterator) - time.sleep(0.01) - - # no more work, wait until the queue is drained - self._wait_on_pending_results(iterator) - - return result - def run_handlers(self, iterator, connection_info): ''' Runs handlers on those hosts which have been notified. diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 36e22a9719e1f3..7d8cb42ee6e9d1 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -22,12 +22,15 @@ from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock +from ansible.errors import AnsibleError, AnsibleParserError from ansible.plugins.strategies import StrategyBase from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.executor.task_result import TaskResult +from six.moves import queue as Queue from units.mock.loader import DictDataLoader -class TestVariableManager(unittest.TestCase): +class TestStrategyBase(unittest.TestCase): def setUp(self): pass @@ -125,3 +128,228 @@ def test_strategy_base_queue_task(self): self.assertEqual(strategy_base._cur_worker, 1) self.assertEqual(strategy_base._pending_results, 3) + def test_strategy_base_process_pending_results(self): + mock_tqm = MagicMock() + mock_tqm._terminated = False + mock_tqm._failed_hosts = dict() + mock_tqm._unreachable_hosts = dict() + mock_tqm.send_callback.return_value = None + + queue_items = [] + def _queue_empty(*args, **kwargs): + return len(queue_items) == 0 + def _queue_get(*args, **kwargs): + if len(queue_items) == 0: + raise Queue.Empty + else: + return queue_items.pop() + + mock_queue = MagicMock() + mock_queue.empty.side_effect = _queue_empty + mock_queue.get.side_effect = _queue_get + mock_tqm._final_q = mock_queue + + mock_tqm._stats = MagicMock() + mock_tqm._stats.increment.return_value = None + + mock_iterator = MagicMock() + mock_iterator.mark_host_failed.return_value = None + + mock_host = MagicMock() + mock_host.name = 'test01' + mock_host.vars = dict() + + mock_task = MagicMock() + mock_task._role = None + mock_task.ignore_errors = False + + mock_group = MagicMock() + mock_group.add_host.return_value = None + + def _get_host(host_name): + if host_name == 'test01': + return mock_host + return None + def _get_group(group_name): + if group_name in ('all', 'foo'): + return mock_group + return None + + mock_inventory = MagicMock() + mock_inventory._hosts_cache = dict() + mock_inventory.get_host.side_effect = _get_host + mock_inventory.get_group.side_effect = _get_group + mock_inventory.clear_pattern_cache.return_value = None + + mock_var_mgr = MagicMock() + mock_var_mgr.set_host_variable.return_value = None + mock_var_mgr.set_host_facts.return_value = None + + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base._inventory = mock_inventory + strategy_base._variable_manager = mock_var_mgr + strategy_base._blocked_hosts = dict() + strategy_base._notified_handlers = dict() + + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 0) + + task_result = TaskResult(host=mock_host, task=mock_task, return_data=dict(changed=True)) + queue_items.append(('host_task_ok', task_result)) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + + task_result = TaskResult(host=mock_host, task=mock_task, return_data='{"failed":true}') + queue_items.append(('host_task_failed', task_result)) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + results = strategy_base._process_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + self.assertIn('test01', mock_tqm._failed_hosts) + del mock_tqm._failed_hosts['test01'] + + task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}') + queue_items.append(('host_unreachable', task_result)) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + self.assertIn('test01', mock_tqm._unreachable_hosts) + del mock_tqm._unreachable_hosts['test01'] + + task_result = TaskResult(host=mock_host, task=mock_task, return_data='{}') + queue_items.append(('host_task_skipped', task_result)) + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + results = strategy_base._wait_on_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 1) + self.assertEqual(results[0], task_result) + self.assertEqual(strategy_base._pending_results, 0) + self.assertNotIn('test01', strategy_base._blocked_hosts) + + strategy_base._blocked_hosts['test01'] = True + strategy_base._pending_results = 1 + + queue_items.append(('add_host', dict(add_host=dict(host_name='newhost01', new_groups=['foo'])))) + results = strategy_base._process_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 0) + self.assertEqual(strategy_base._pending_results, 1) + self.assertIn('test01', strategy_base._blocked_hosts) + + queue_items.append(('add_group', mock_host, dict(add_group=dict(group_name='foo')))) + results = strategy_base._process_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 0) + self.assertEqual(strategy_base._pending_results, 1) + self.assertIn('test01', strategy_base._blocked_hosts) + + queue_items.append(('notify_handler', mock_host, 'test handler')) + results = strategy_base._process_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 0) + self.assertEqual(strategy_base._pending_results, 1) + self.assertIn('test01', strategy_base._blocked_hosts) + self.assertIn('test handler', strategy_base._notified_handlers) + self.assertIn(mock_host, strategy_base._notified_handlers['test handler']) + + queue_items.append(('set_host_var', mock_host, 'foo', 'bar')) + results = strategy_base._process_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 0) + self.assertEqual(strategy_base._pending_results, 1) + + queue_items.append(('set_host_facts', mock_host, 'foo', dict())) + results = strategy_base._process_pending_results(iterator=mock_iterator) + self.assertEqual(len(results), 0) + self.assertEqual(strategy_base._pending_results, 1) + + queue_items.append(('bad')) + self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator) + + def test_strategy_base_load_included_file(self): + fake_loader = DictDataLoader({ + "test.yml": """ + - debug: msg='foo' + """, + "bad.yml": """ + """, + }) + + mock_tqm = MagicMock() + mock_tqm._final_q = MagicMock() + + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base._loader = fake_loader + + mock_play = MagicMock() + + mock_block = MagicMock() + mock_block._play = mock_play + mock_block.vars = dict() + + mock_task = MagicMock() + mock_task._block = mock_block + mock_task._role = None + + mock_inc_file = MagicMock() + mock_inc_file._task = mock_task + + mock_inc_file._filename = "test.yml" + res = strategy_base._load_included_file(included_file=mock_inc_file) + + mock_inc_file._filename = "bad.yml" + self.assertRaises(AnsibleParserError, strategy_base._load_included_file, included_file=mock_inc_file) + + def test_strategy_base_run_handlers(self): + workers = [] + for i in range(0, 3): + worker_main_q = MagicMock() + worker_main_q.put.return_value = None + worker_result_q = MagicMock() + workers.append([i, worker_main_q, worker_result_q]) + + mock_tqm = MagicMock() + mock_tqm._final_q = MagicMock() + mock_tqm.get_workers.return_value = workers + mock_tqm.send_callback.return_value = None + + mock_conn_info = MagicMock() + + mock_handler_task = MagicMock() + mock_handler_task.get_name.return_value = "test handler" + mock_handler_task.has_triggered.return_value = False + + mock_handler = MagicMock() + mock_handler.block = [mock_handler_task] + mock_handler.flag_for_host.return_value = False + + mock_play = MagicMock() + mock_play.handlers = [mock_handler] + + mock_host = MagicMock() + mock_host.name = "test01" + + mock_iterator = MagicMock() + + mock_inventory = MagicMock() + mock_inventory.get_hosts.return_value = [mock_host] + + mock_var_mgr = MagicMock() + mock_var_mgr.get_vars.return_value = dict() + + mock_iterator = MagicMock + mock_iterator._play = mock_play + + strategy_base = StrategyBase(tqm=mock_tqm) + strategy_base._inventory = mock_inventory + strategy_base._notified_handlers = {"test handler": [mock_host]} + + result = strategy_base.run_handlers(iterator=mock_iterator, connection_info=mock_conn_info) From 04e15ab54f0edab7c89895dafe7d5ec2a9b60ae5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 21 May 2015 07:53:00 -0700 Subject: [PATCH 0653/3617] Update v2 submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cbbe4196bdb047..e10a581abdf375 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cbbe4196bdb047a2d8e9f1132519a0de55fa0c5a +Subproject commit e10a581abdf375b855418897944d5206682994b6 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8fb19f0e47b699..24390f1ac69fe4 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8fb19f0e47b6992db89adcaade7f38225c552107 +Subproject commit 24390f1ac69fe4731e143eab16120bc422fd6233 From 16c2de84ec3d9d679e5e33b8cd55fddb20bc908c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 21 May 2015 10:00:25 -0700 Subject: [PATCH 0654/3617] Update the submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e591763d624ab5..150b71f11af607 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e591763d624ab5d456bbd2cf97bd84466cbc5988 +Subproject commit 150b71f11af607a31b108f2171308149c99f2cbd diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8fb19f0e47b699..5187c7fcd72d47 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8fb19f0e47b6992db89adcaade7f38225c552107 +Subproject commit 5187c7fcd72d4750d5a1c9398ceaf62527272eaf From b312e97a30a9fa855abe65a3bcfb168d329460d9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 21 May 2015 10:59:57 -0700 Subject: [PATCH 0655/3617] Think that integration tests are failing in json due to lack of json mimetype. Make a short wrapper to fix that. --- test/integration/roles/test_uri/files/testserver.py | 6 ++++++ test/integration/roles/test_uri/tasks/main.yml | 4 ++++ 2 files changed, 10 insertions(+) create mode 100644 test/integration/roles/test_uri/files/testserver.py diff --git a/test/integration/roles/test_uri/files/testserver.py b/test/integration/roles/test_uri/files/testserver.py new file mode 100644 index 00000000000000..03cbfec507612d --- /dev/null +++ b/test/integration/roles/test_uri/files/testserver.py @@ -0,0 +1,6 @@ +import mimetypes +import SimpleHTTPServer + +if __name__ == '__main__': + mimetypes.add_type('application/json', '.json') + SimpleHTTPServer.test() diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 6072754f224551..b6fc5094cb9c6e 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -37,6 +37,10 @@ dest: "{{files_dir}}/{{ item }}" with_sequence: start=0 end=30 format=fail%d.json +- copy: + src: "testserver.py" + dest: "{{ output_dir }}/testserver.py" + - name: verify that python2 is installed so this test can continue shell: which python2 register: py2 From 7af2632c87b97c60307f956815cd09bc3cd46b90 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 21 May 2015 11:56:58 -0700 Subject: [PATCH 0656/3617] Forgot to invoke wrapper instead of SimpleHttpServer --- test/integration/Makefile | 3 +++ test/integration/roles/test_uri/tasks/main.yml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 923a29bc9fec98..513b3b2311a411 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -42,6 +42,9 @@ unicode: test_templating_settings: ansible-playbook test_templating_settings.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) +mine: + ansible-playbook mine.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + non_destructive: ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index b6fc5094cb9c6e..66e01ae8e53124 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -46,7 +46,7 @@ register: py2 - name: start SimpleHTTPServer - shell: cd {{ files_dir }} && {{ py2.stdout }} -m SimpleHTTPServer {{ http_port }} + shell: cd {{ files_dir }} && {{ py2.stdout }} {{ output_dir}}/testserver.py {{ http_port }} async: 60 # this test set takes ~15 seconds to run poll: 0 From a8d52e3e940543300ad15e80de8d8b70b2e45a24 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 21 May 2015 12:24:41 -0700 Subject: [PATCH 0657/3617] Have to setup the proper mime-types before importing SImpleHttpServer --- test/integration/Makefile | 3 --- test/integration/roles/test_uri/files/testserver.py | 3 ++- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 513b3b2311a411..923a29bc9fec98 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -42,9 +42,6 @@ unicode: test_templating_settings: ansible-playbook test_templating_settings.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) -mine: - ansible-playbook mine.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) - non_destructive: ansible-playbook non_destructive.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) diff --git a/test/integration/roles/test_uri/files/testserver.py b/test/integration/roles/test_uri/files/testserver.py index 03cbfec507612d..d0d24a0050fc39 100644 --- a/test/integration/roles/test_uri/files/testserver.py +++ b/test/integration/roles/test_uri/files/testserver.py @@ -1,6 +1,7 @@ import mimetypes -import SimpleHTTPServer if __name__ == '__main__': + mimetypes.init() mimetypes.add_type('application/json', '.json') + import SimpleHTTPServer SimpleHTTPServer.test() From ecd5eb902db1156206f2eb35aac42b340759d310 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 22 May 2015 03:32:40 -0500 Subject: [PATCH 0658/3617] Adding unit tests for ConnectionInformation (v2) --- lib/ansible/executor/connection_info.py | 10 +- .../executor/test_connection_information.py | 153 ++++++++++++++++++ 2 files changed, 154 insertions(+), 9 deletions(-) create mode 100644 test/units/executor/test_connection_information.py diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index bf78cf63a5b122..424ac062b3da6f 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -88,14 +88,6 @@ def __init__(self, play=None, options=None, passwords=None): if play: self.set_play(play) - def __repr__(self): - value = "CONNECTION INFO:\n" - fields = self._get_fields() - fields.sort() - for field in fields: - value += "%20s : %s\n" % (field, getattr(self, field)) - return value - def set_play(self, play): ''' Configures this connection information instance with data from @@ -199,7 +191,7 @@ def set_task_and_host_override(self, task, host): for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'): if hasattr(task, attr): attr_val = getattr(task, attr) - if attr_val: + if attr_val is not None: setattr(new_info, attr, attr_val) # finally, use the MAGIC_VARIABLE_MAPPING dictionary to update this diff --git a/test/units/executor/test_connection_information.py b/test/units/executor/test_connection_information.py new file mode 100644 index 00000000000000..13b14c25de865e --- /dev/null +++ b/test/units/executor/test_connection_information.py @@ -0,0 +1,153 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible import constants as C +from ansible.cli import CLI +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.executor.connection_info import ConnectionInformation + +from units.mock.loader import DictDataLoader + +class TestConnectionInformation(unittest.TestCase): + + def setUp(self): + self._parser = CLI.base_parser( + runas_opts = True, + meta_opts = True, + runtask_opts = True, + vault_opts = True, + async_opts = True, + connect_opts = True, + subset_opts = True, + check_opts = True, + diff_opts = True, + ) + + def tearDown(self): + pass + + def test_connection_info(self): + (options, args) = self._parser.parse_args(['-vv', '--check']) + conn_info = ConnectionInformation(options=options) + self.assertEqual(conn_info.connection, 'smart') + self.assertEqual(conn_info.remote_addr, None) + self.assertEqual(conn_info.remote_user, 'root') + self.assertEqual(conn_info.password, '') + self.assertEqual(conn_info.port, None) + self.assertEqual(conn_info.private_key_file, C.DEFAULT_PRIVATE_KEY_FILE) + self.assertEqual(conn_info.timeout, C.DEFAULT_TIMEOUT) + self.assertEqual(conn_info.shell, None) + self.assertEqual(conn_info.verbosity, 2) + self.assertEqual(conn_info.check_mode, True) + self.assertEqual(conn_info.no_log, False) + + mock_play = MagicMock() + mock_play.connection = 'mock' + mock_play.remote_user = 'mock' + mock_play.port = 1234 + mock_play.become = True + mock_play.become_method = 'mock' + mock_play.become_user = 'mockroot' + mock_play.become_pass = 'mockpass' + mock_play.no_log = True + mock_play.environment = dict(mock='mockenv') + + conn_info = ConnectionInformation(play=mock_play, options=options) + self.assertEqual(conn_info.connection, 'mock') + self.assertEqual(conn_info.remote_user, 'mock') + self.assertEqual(conn_info.password, '') + self.assertEqual(conn_info.port, 1234) + self.assertEqual(conn_info.no_log, True) + self.assertEqual(conn_info.environment, dict(mock="mockenv")) + self.assertEqual(conn_info.become, True) + self.assertEqual(conn_info.become_method, "mock") + self.assertEqual(conn_info.become_user, "mockroot") + self.assertEqual(conn_info.become_pass, "mockpass") + + mock_task = MagicMock() + mock_task.connection = 'mocktask' + mock_task.remote_user = 'mocktask' + mock_task.become = True + mock_task.become_method = 'mocktask' + mock_task.become_user = 'mocktaskroot' + mock_task.become_pass = 'mocktaskpass' + mock_task.no_log = False + mock_task.environment = dict(mock='mocktaskenv') + + mock_host = MagicMock() + mock_host.get_vars.return_value = dict( + ansible_connection = 'mock_inventory', + ansible_ssh_port = 4321, + ) + + conn_info = ConnectionInformation(play=mock_play, options=options) + conn_info = conn_info.set_task_and_host_override(task=mock_task, host=mock_host) + self.assertEqual(conn_info.connection, 'mock_inventory') + self.assertEqual(conn_info.remote_user, 'mocktask') + self.assertEqual(conn_info.port, 4321) + self.assertEqual(conn_info.no_log, False) + self.assertEqual(conn_info.environment, dict(mock="mocktaskenv")) + self.assertEqual(conn_info.become, True) + self.assertEqual(conn_info.become_method, "mocktask") + self.assertEqual(conn_info.become_user, "mocktaskroot") + self.assertEqual(conn_info.become_pass, "mocktaskpass") + + def test_connection_info_make_become_cmd(self): + (options, args) = self._parser.parse_args([]) + conn_info = ConnectionInformation(options=options) + + default_cmd = "/bin/foo" + default_exe = "/bin/bash" + sudo_exe = C.DEFAULT_SUDO_EXE + sudo_flags = C.DEFAULT_SUDO_FLAGS + su_exe = C.DEFAULT_SU_EXE + su_flags = C.DEFAULT_SU_FLAGS + pbrun_exe = 'pbrun' + pbrun_flags = '' + + (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable=default_exe) + self.assertEqual(cmd, default_cmd) + + conn_info.become = True + conn_info.become_user = 'foo' + + conn_info.become_method = 'sudo' + (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable="/bin/bash") + self.assertEqual(cmd, """%s -c '%s -k && %s %s -S -p "%s" -u %s %s -c '"'"'echo %s; %s'"'"''""" % (default_exe, sudo_exe, sudo_exe, sudo_flags, prompt, conn_info.become_user, default_exe, key, default_cmd)) + + conn_info.become_method = 'su' + (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable="/bin/bash") + self.assertEqual(cmd, """%s -c '%s %s -c "%s -c '"'"'echo %s; %s'"'"'"'""" % (default_exe, su_exe, conn_info.become_user, default_exe, key, default_cmd)) + + conn_info.become_method = 'pbrun' + (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable="/bin/bash") + self.assertEqual(cmd, """%s -c '%s -b -l %s -u %s '"'"'echo %s; %s'"'"''""" % (default_exe, pbrun_exe, pbrun_flags, conn_info.become_user, key, default_cmd)) + + conn_info.become_method = 'pfexec' + (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable="/bin/bash") + self.assertEqual(cmd, """%s -c '%s %s "'"'"'echo %s; %s'"'"'"'""" % (default_exe, pbrun_exe, pbrun_flags, key, default_cmd)) + + conn_info.become_method = 'bad' + self.assertRaises(AnsibleError, conn_info.make_become_cmd, cmd=default_cmd, executable="/bin/bash") + From 838ff320019d4858024950977279a62ad2bed10d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 22 May 2015 08:38:39 -0500 Subject: [PATCH 0659/3617] Fix unit test for conn_info (v2) The default user expected in the connection information is the current user, not root --- test/units/executor/test_connection_information.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/units/executor/test_connection_information.py b/test/units/executor/test_connection_information.py index 13b14c25de865e..65575c0f93df87 100644 --- a/test/units/executor/test_connection_information.py +++ b/test/units/executor/test_connection_information.py @@ -19,6 +19,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import pwd +import os + from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock @@ -52,7 +55,7 @@ def test_connection_info(self): conn_info = ConnectionInformation(options=options) self.assertEqual(conn_info.connection, 'smart') self.assertEqual(conn_info.remote_addr, None) - self.assertEqual(conn_info.remote_user, 'root') + self.assertEqual(conn_info.remote_user, pwd.getpwuid(os.geteuid())[0]) self.assertEqual(conn_info.password, '') self.assertEqual(conn_info.port, None) self.assertEqual(conn_info.private_key_file, C.DEFAULT_PRIVATE_KEY_FILE) From ed4df57361529020cfa09e6b316f1e3ea0acd05d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 22 May 2015 13:30:00 -0500 Subject: [PATCH 0660/3617] Submodule update for devel --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 150b71f11af607..b5399d34464475 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 150b71f11af607a31b108f2171308149c99f2cbd +Subproject commit b5399d34464475a9aa87c6b928628cd262022cd5 From 301019059272ab0a1b288a20c9772107b592dccd Mon Sep 17 00:00:00 2001 From: Florian Apolloner Date: Sat, 23 May 2015 08:42:17 -0500 Subject: [PATCH 0661/3617] Fixing up the hacking module_formatter code for v2 --- hacking/module_formatter.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index c3aca94949c2bf..9002b9d8d1c358 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -33,8 +33,8 @@ import cgi from jinja2 import Environment, FileSystemLoader -import ansible.utils -import ansible.utils.module_docs as module_docs +from ansible.utils import module_docs +from ansible.utils.vars import merge_hash ##################################################################################### # constants and paths @@ -135,7 +135,7 @@ def list_modules(module_dir, depth=0): res = list_modules(d, depth + 1) for key in res.keys(): if key in categories: - categories[key] = ansible.utils.merge_hash(categories[key], res[key]) + categories[key] = merge_hash(categories[key], res[key]) res.pop(key, None) if depth < 2: @@ -236,11 +236,11 @@ def process_module(module, options, env, template, outputname, module_map, alias print "rendering: %s" % module # use ansible core library to parse out doc metadata YAML and plaintext examples - doc, examples, returndocs= ansible.utils.module_docs.get_docstring(fname, verbose=options.verbose) + doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose) # crash if module is missing documentation and not explicitly hidden from docs index if doc is None: - if module in ansible.utils.module_docs.BLACKLIST_MODULES: + if module in module_docs.BLACKLIST_MODULES: return "SKIPPED" else: sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) @@ -278,8 +278,9 @@ def process_module(module, options, env, template, outputname, module_map, alias if added and added_float < TO_OLD_TO_BE_NOTABLE: del doc['version_added'] - for (k,v) in doc['options'].iteritems(): - all_keys.append(k) + if 'options' in doc: + for (k,v) in doc['options'].iteritems(): + all_keys.append(k) all_keys = sorted(all_keys) From b6ea8de39999ccf67c0afcbeceb27345ab1cbb54 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Sat, 23 May 2015 20:30:08 +0200 Subject: [PATCH 0662/3617] limit extensions for files in group/host_vars dir inventory vars: make loading from a directory obey the same rules as when checking the base paths, looking at the file name extensions as defined in CONSTANTS.YAML_FILENAME_EXTENSIONS Fixes Github issue #11017 --- lib/ansible/utils/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py index 476a1e28e81939..eb6fa2a712ba89 100644 --- a/lib/ansible/utils/__init__.py +++ b/lib/ansible/utils/__init__.py @@ -1617,7 +1617,9 @@ def _load_vars_from_folder(folder_path, results, vault_password=None): names.sort() # do not parse hidden files or dirs, e.g. .svn/ - paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')] + paths = [os.path.join(folder_path, name) for name in names + if not name.startswith('.') + and os.path.splitext(name)[1] in C.YAML_FILENAME_EXTENSIONS] for path in paths: _found, results = _load_vars_from_path(path, results, vault_password=vault_password) return results From b92d70c5b66aa741d35e9f6a294d27f43367205e Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Sat, 23 May 2015 21:37:12 +0200 Subject: [PATCH 0663/3617] tests files extensions in group/host_vars dir only files with extensions as per C.YAML_FILENAME_EXTENSIONS should be parsed --- test/units/TestInventory.py | 7 ++++++- test/units/inventory_test_data/group_vars/noparse/all.yml~ | 2 ++ test/units/inventory_test_data/group_vars/noparse/file.txt | 2 ++ test/units/inventory_test_data/group_vars/parse/all.yml | 2 ++ test/units/inventory_test_data/simple_hosts | 6 ++++++ 5 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 test/units/inventory_test_data/group_vars/noparse/all.yml~ create mode 100644 test/units/inventory_test_data/group_vars/noparse/file.txt create mode 100644 test/units/inventory_test_data/group_vars/parse/all.yml diff --git a/test/units/TestInventory.py b/test/units/TestInventory.py index dc3a0ce6d6edce..b4bee4300ef4d5 100644 --- a/test/units/TestInventory.py +++ b/test/units/TestInventory.py @@ -56,7 +56,7 @@ def dir_inventory(self): 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2', 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5', 'Hotep-a', 'Hotep-b', 'Hotep-c', - 'BastC', 'BastD', 'neptun', ] + 'BastC', 'BastD', 'neptun', 'goldorak', ] ##################################### ### Empty inventory format tests @@ -222,6 +222,11 @@ def test_subset_filename(self): inventory.subset('@' + os.path.join(self.test_dir, 'restrict_pattern')) self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin'])) + def test_vars_yaml_extension(self): + inventory = self.simple_inventory() + vars = inventory.get_variables('goldorak') + assert vars['YAML_FILENAME_EXTENSIONS_TEST'] + @raises(errors.AnsibleError) def testinvalid_entry(self): Inventory('1234') diff --git a/test/units/inventory_test_data/group_vars/noparse/all.yml~ b/test/units/inventory_test_data/group_vars/noparse/all.yml~ new file mode 100644 index 00000000000000..6f52f114b13b34 --- /dev/null +++ b/test/units/inventory_test_data/group_vars/noparse/all.yml~ @@ -0,0 +1,2 @@ +--- +YAML_FILENAME_EXTENSIONS_TEST: False diff --git a/test/units/inventory_test_data/group_vars/noparse/file.txt b/test/units/inventory_test_data/group_vars/noparse/file.txt new file mode 100644 index 00000000000000..6f52f114b13b34 --- /dev/null +++ b/test/units/inventory_test_data/group_vars/noparse/file.txt @@ -0,0 +1,2 @@ +--- +YAML_FILENAME_EXTENSIONS_TEST: False diff --git a/test/units/inventory_test_data/group_vars/parse/all.yml b/test/units/inventory_test_data/group_vars/parse/all.yml new file mode 100644 index 00000000000000..8687c86c7c38a3 --- /dev/null +++ b/test/units/inventory_test_data/group_vars/parse/all.yml @@ -0,0 +1,2 @@ +--- +YAML_FILENAME_EXTENSIONS_TEST: True diff --git a/test/units/inventory_test_data/simple_hosts b/test/units/inventory_test_data/simple_hosts index 4625b3dbabe97a..08c62b45376836 100644 --- a/test/units/inventory_test_data/simple_hosts +++ b/test/units/inventory_test_data/simple_hosts @@ -20,3 +20,9 @@ Bast[C:D] [auth] neptun auth="YWRtaW46YWRtaW4=" + +[parse:children] +noparse + +[noparse] +goldorak From d4a31e8d26e22f160a6a433fd6f21da8c0435b70 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 24 May 2015 07:47:06 -0500 Subject: [PATCH 0664/3617] Adding unit tests for TaskExecutor (v2) --- test/units/executor/test_task_executor.py | 324 ++++++++++++++++++++++ 1 file changed, 324 insertions(+) create mode 100644 test/units/executor/test_task_executor.py diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py new file mode 100644 index 00000000000000..64ce1d5faa2f2e --- /dev/null +++ b/test/units/executor/test_task_executor.py @@ -0,0 +1,324 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, MagicMock + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.executor.connection_info import ConnectionInformation +from ansible.executor.task_executor import TaskExecutor +from ansible.plugins import action_loader + +from units.mock.loader import DictDataLoader + +class TestTaskExecutor(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_task_executor_init(self): + fake_loader = DictDataLoader({}) + mock_host = MagicMock() + mock_task = MagicMock() + mock_conn_info = MagicMock() + mock_shared_loader = MagicMock() + new_stdin = None + job_vars = dict() + te = TaskExecutor( + host = mock_host, + task = mock_task, + job_vars = job_vars, + connection_info = mock_conn_info, + new_stdin = new_stdin, + loader = fake_loader, + shared_loader_obj = mock_shared_loader, + ) + + def test_task_executor_run(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task._role._role_path = '/path/to/role/foo' + + mock_conn_info = MagicMock() + + mock_shared_loader = MagicMock() + + new_stdin = None + job_vars = dict() + + te = TaskExecutor( + host = mock_host, + task = mock_task, + job_vars = job_vars, + connection_info = mock_conn_info, + new_stdin = new_stdin, + loader = fake_loader, + shared_loader_obj = mock_shared_loader, + ) + + te._get_loop_items = MagicMock(return_value=None) + te._execute = MagicMock(return_value=dict()) + res = te.run() + + te._get_loop_items = MagicMock(return_value=[]) + res = te.run() + + te._get_loop_items = MagicMock(return_value=['a','b','c']) + te._run_loop = MagicMock(return_value=[dict(item='a', changed=True), dict(item='b', failed=True), dict(item='c')]) + res = te.run() + + te._get_loop_items = MagicMock(side_effect=AnsibleError("")) + res = te.run() + self.assertIn("failed", res) + + def test_task_executor_get_loop_items(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task.loop = 'items' + mock_task.loop_args = ['a', 'b', 'c'] + + mock_conn_info = MagicMock() + + mock_shared_loader = MagicMock() + + new_stdin = None + job_vars = dict() + + te = TaskExecutor( + host = mock_host, + task = mock_task, + job_vars = job_vars, + connection_info = mock_conn_info, + new_stdin = new_stdin, + loader = fake_loader, + shared_loader_obj = mock_shared_loader, + ) + + items = te._get_loop_items() + self.assertEqual(items, ['a', 'b', 'c']) + + def test_task_executor_run_loop(self): + items = ['a', 'b', 'c'] + + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + def _copy(): + new_item = MagicMock() + return new_item + + mock_task = MagicMock() + mock_task.copy.side_effect = _copy + + mock_conn_info = MagicMock() + + mock_shared_loader = MagicMock() + + new_stdin = None + job_vars = dict() + + te = TaskExecutor( + host = mock_host, + task = mock_task, + job_vars = job_vars, + connection_info = mock_conn_info, + new_stdin = new_stdin, + loader = fake_loader, + shared_loader_obj = mock_shared_loader, + ) + + def _execute(variables): + return dict(item=variables.get('item')) + + te._squash_items = MagicMock(return_value=items) + te._execute = MagicMock(side_effect=_execute) + + res = te._run_loop(items) + self.assertEqual(len(res), 3) + + def test_task_executor_squash_items(self): + items = ['a', 'b', 'c'] + + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + def _evaluate_conditional(templar, variables): + item = variables.get('item') + if item == 'b': + return False + return True + + mock_task = MagicMock() + mock_task.evaluate_conditional.side_effect = _evaluate_conditional + + mock_conn_info = MagicMock() + + mock_shared_loader = None + + new_stdin = None + job_vars = dict() + + te = TaskExecutor( + host = mock_host, + task = mock_task, + job_vars = job_vars, + connection_info = mock_conn_info, + new_stdin = new_stdin, + loader = fake_loader, + shared_loader_obj = mock_shared_loader, + ) + + mock_task.action = 'foo' + new_items = te._squash_items(items=items, variables=job_vars) + self.assertEqual(new_items, ['a', 'b', 'c']) + + mock_task.action = 'yum' + new_items = te._squash_items(items=items, variables=job_vars) + self.assertEqual(new_items, ['a,c']) + + def test_task_executor_execute(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task.args = dict() + mock_task.retries = 0 + mock_task.delay = -1 + mock_task.register = 'foo' + mock_task.until = None + mock_task.changed_when = None + mock_task.failed_when = None + mock_task.post_validate.return_value = None + + mock_conn_info = MagicMock() + mock_conn_info.post_validate.return_value = None + mock_conn_info.update_vars.return_value = None + + mock_connection = MagicMock() + mock_connection.set_host_overrides.return_value = None + mock_connection._connect.return_value = None + + mock_action = MagicMock() + + shared_loader = None + new_stdin = None + job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX") + + te = TaskExecutor( + host = mock_host, + task = mock_task, + job_vars = job_vars, + connection_info = mock_conn_info, + new_stdin = new_stdin, + loader = fake_loader, + shared_loader_obj = shared_loader, + ) + + te._get_connection = MagicMock(return_value=mock_connection) + te._get_action_handler = MagicMock(return_value=mock_action) + + mock_action.run.return_value = dict(ansible_facts=dict()) + res = te._execute() + + mock_task.changed_when = "1 == 1" + res = te._execute() + + mock_task.changed_when = None + mock_task.failed_when = "1 == 1" + res = te._execute() + + mock_task.failed_when = None + mock_task.evaluate_conditional.return_value = False + res = te._execute() + + mock_task.evaluate_conditional.return_value = True + mock_task.args = dict(_raw_params='foo.yml', a='foo', b='bar') + mock_task.action = 'include' + res = te._execute() + + def test_task_executor_poll_async_result(self): + fake_loader = DictDataLoader({}) + + mock_host = MagicMock() + + mock_task = MagicMock() + mock_task.async = 3 + mock_task.poll = 1 + + mock_conn_info = MagicMock() + + mock_connection = MagicMock() + + mock_action = MagicMock() + + shared_loader = None + new_stdin = None + job_vars = dict(omit="XXXXXXXXXXXXXXXXXXX") + + te = TaskExecutor( + host = mock_host, + task = mock_task, + job_vars = job_vars, + connection_info = mock_conn_info, + new_stdin = new_stdin, + loader = fake_loader, + shared_loader_obj = shared_loader, + ) + + te._connection = MagicMock() + + def _get(*args, **kwargs): + mock_action = MagicMock() + mock_action.run.return_value = dict() + return mock_action + + # testing with some bad values in the result passed to poll async, + # and with a bad value returned from the mock action + with patch.object(action_loader, 'get', _get): + mock_templar = MagicMock() + res = te._poll_async_result(result=dict(), templar=mock_templar) + self.assertIn('failed', res) + res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar) + self.assertIn('failed', res) + + def _get(*args, **kwargs): + mock_action = MagicMock() + mock_action.run.return_value = dict(finished=1) + return mock_action + + # now testing with good values + with patch.object(action_loader, 'get', _get): + mock_templar = MagicMock() + res = te._poll_async_result(result=dict(ansible_job_id=1), templar=mock_templar) + self.assertEqual(res, dict(finished=1)) + From 5c455ad729a55d7b5f8da303cefe2fef36375f2e Mon Sep 17 00:00:00 2001 From: Erik Weathers Date: Sun, 24 May 2015 17:02:02 -0700 Subject: [PATCH 0665/3617] fix typo in --ask-sudo-pass reference within playbook_intro doc --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 3899502ed475cf..4fe2ab3ec3f2f9 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -186,7 +186,7 @@ You can also use other privilege escalation methods, like su:: become_method: su If you need to specify a password to sudo, run `ansible-playbook` with ``--ask-become-pass`` or -when using the old sudo syntax ``--ask-sudo--pass`` (`-K`). If you run a become playbook and the +when using the old sudo syntax ``--ask-sudo-pass`` (`-K`). If you run a become playbook and the playbook seems to hang, it's probably stuck at the privilege escalation prompt. Just `Control-C` to kill it and run it again adding the appropriate password. From 3775dd5ec82265fe5aec909accffe950d08a38d2 Mon Sep 17 00:00:00 2001 From: Etienne CARRIERE Date: Mon, 25 May 2015 09:53:23 +0200 Subject: [PATCH 0666/3617] Factor F5 primitives --- lib/ansible/module_utils/f5.py | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 lib/ansible/module_utils/f5.py diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py new file mode 100644 index 00000000000000..2d97662a0b6576 --- /dev/null +++ b/lib/ansible/module_utils/f5.py @@ -0,0 +1,64 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Etienne Carrière ,2015 +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + + +def f5_argument_spec(): + return dict( + server=dict(type='str', required=True), + user=dict(type='str', required=True), + password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True), + validate_certs = dict(default='yes', type='bool'), + state = dict(type='str', default='present', choices=['present', 'absent']), + partition = dict(type='str', default='Common') + ) + + +def f5_parse_arguments(module): + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + if not module.params['validate_certs']: + disable_ssl_cert_validation() + return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition']) + +def bigip_api(bigip, user, password): + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + return api + +def disable_ssl_cert_validation(): + # You probably only want to do this for testing and never in production. + # From https://www.python.org/dev/peps/pep-0476/#id29 + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + From 308879075d60118537080ca1fd63bf78be19150a Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Mon, 25 May 2015 16:26:37 +0200 Subject: [PATCH 0667/3617] Remove duplicate RETRY_FILES_* constants --- v2/ansible/constants.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/v2/ansible/constants.py b/v2/ansible/constants.py index 456beb8bbc40f4..245972b1a5663c 100644 --- a/v2/ansible/constants.py +++ b/v2/ansible/constants.py @@ -185,9 +185,6 @@ def shell_expand_path(path): RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') -RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) -RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') - # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") From 0f0f28145b908419eeb699d5809b7f2ce66f8a22 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 25 May 2015 10:35:28 -0400 Subject: [PATCH 0668/3617] added promox module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index abe42602a6ba1e..ef7778a47d9709 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ New Modules: * openstack: os_server_volume * openstack: os_subnet * openstack: os_volume + * proxmox * pushover * pushbullet * rabbitmq_binding From eaddc0b309bb55fec9fc72a0a4a073aedb3bc930 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 25 May 2015 11:05:47 -0400 Subject: [PATCH 0669/3617] removed duplicate retry config entries --- lib/ansible/constants.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 9c1c820421a52b..98f058e21ccf96 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -188,9 +188,6 @@ def shell_expand_path(path): RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') -RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) -RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') - # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") From dcc691f462470edffd53a58b00f96daf7ff1bf9e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 25 May 2015 09:23:04 -0700 Subject: [PATCH 0670/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b5399d34464475..32e609720a962f 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b5399d34464475a9aa87c6b928628cd262022cd5 +Subproject commit 32e609720a962fa948094de03eba4750ab03918b diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 5187c7fcd72d47..47c74936c1095f 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 5187c7fcd72d4750d5a1c9398ceaf62527272eaf +Subproject commit 47c74936c1095fb63e75cf7be3f1b376c5f11116 From 5f246dc1a621bdbe2f5477b6afd961fb6e2a242f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 25 May 2015 09:35:40 -0700 Subject: [PATCH 0671/3617] Update extras submodule for doc fixes --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 47c74936c1095f..8dfa63d1d8be33 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 47c74936c1095fb63e75cf7be3f1b376c5f11116 +Subproject commit 8dfa63d1d8be333dd107f4f90be2c337b4909432 From b740b0372af8e91c0f8217d8e6350c15e1be2b66 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 10:05:25 -0400 Subject: [PATCH 0672/3617] added new win_environment module --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ef7778a47d9709..a1a6a58e5bb20b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ New Modules: * vertica_schema * vertica_user * vmware_datacenter + * win_environment New Inventory scripts: * cloudstack From 16c70dd7d459372318aaf60bfd3708dda6abc3f6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 11:55:52 -0400 Subject: [PATCH 0673/3617] added equivalent of #9636 to v2 --- lib/ansible/module_utils/basic.py | 1 + lib/ansible/plugins/shell/sh.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 237cb5b106ca73..2da2bad3ef7888 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -868,6 +868,7 @@ def _check_locale(self): locale.setlocale(locale.LC_ALL, 'C') os.environ['LANG'] = 'C' os.environ['LC_CTYPE'] = 'C' + os.environ['LC_MESSAGES'] = 'C' except Exception, e: self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e) diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 628df9bbfbf738..f7ba06d93188c8 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -34,8 +34,9 @@ class ShellModule(object): def env_prefix(self, **kwargs): '''Build command prefix with environment variables.''' env = dict( - LANG = C.DEFAULT_MODULE_LANG, - LC_CTYPE = C.DEFAULT_MODULE_LANG, + LANG = C.DEFAULT_MODULE_LANG, + LC_CTYPE = C.DEFAULT_MODULE_LANG, + LC_MESSAGES = C.DEFAULT_MODULE_LANG, ) env.update(kwargs) return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()]) From 540c23dfce733527f7d33734060ae36c111fcc75 Mon Sep 17 00:00:00 2001 From: Stefan Midjich Date: Wed, 6 May 2015 22:47:53 +0200 Subject: [PATCH 0674/3617] this fixes ansible on openbsd and freebsd systems. only tested on openbsd. --- lib/ansible/module_utils/facts.py | 37 +++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index b223c5f5f7d3eb..3485690b83f13e 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2545,6 +2545,43 @@ def get_virtual_facts(self): self.facts['virtualization_role'] = 'NA' return +class FreeBSDVirtual(Virtual): + """ + This is a FreeBSD-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'FreeBSD' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + self.facts['virtualization_type'] = '' + self.facts['virtualization_role'] = '' + +class OpenBSDVirtual(Virtual): + """ + This is a OpenBSD-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'OpenBSD' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + self.facts['virtualization_type'] = '' + self.facts['virtualization_role'] = '' class HPUXVirtual(Virtual): """ From d0a154c446f637c2b041dc28bc1ccbb891b48fac Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 26 May 2015 09:15:04 -0700 Subject: [PATCH 0675/3617] Include more info when a task fails Adds "playbook", "role", and "task" fields to the output when a task fails. This makes it easier to pinpoint where the problem is, especially when you have a lot of roles and playbooks. e.g.: failed: [vagrant] => {..."playbook": "/Users/marca/dev/ansible/vagrant.yml", ..."role": "pythonapp", ..."task": "pip install -r /opt/src/{{ sm_app_role }}/requirements.txt"...} --- lib/ansible/callbacks.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py index 39d3a8d442892f..a7d2283cf0aa6c 100644 --- a/lib/ansible/callbacks.py +++ b/lib/ansible/callbacks.py @@ -487,6 +487,10 @@ def on_failed(self, host, results, ignore_errors=False): stdout = results2.pop('stdout', None) returned_msg = results2.pop('msg', None) + results2['task'] = self.task.name + results2['role'] = self.task.role_name + results2['playbook'] = self.playbook.filename + if item: msg = "failed: [%s] => (item=%s) => %s" % (host, item, utils.jsonify(results2)) else: From aea8758b440b834ab47c86252139b1ed73f3aa44 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 13:51:32 -0400 Subject: [PATCH 0676/3617] added os_network and deprecated quantum_network --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1a6a58e5bb20b..1d1c015c6443d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ Major Changes: Deprecated Modules (new ones in parens): * ec2_ami_search (ec2_ami_find) + * quantum_network (os_network) * nova_compute (os_server) New Modules: @@ -34,6 +35,7 @@ New Modules: * cloudstack: cs_securitygroup_rule * cloudstack: cs_vmsnapshot * maven_artifact + * openstack: os_network * openstack: os_server * openstack: os_server_facts * openstack: os_server_volume From 31609e1b16e8edd9ff5911097d3d33733a2817e5 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sun, 26 Oct 2014 10:41:58 -0700 Subject: [PATCH 0677/3617] Add required_if to AnsibleModule There is a common pattern in modules where some parameters are required only if another parameter is present AND set to a particular value. For instance, if a cloud server state is "present" it's important to indicate the image to be used, but if it's "absent", the image that was used to launch it is not necessary. Provide a check that takes as an input a list of 3-element tuples containing parameter to depend on, the value it should be set to, and a list of parameters which are required if the required parameter is set to the required value. --- lib/ansible/module_utils/basic.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 2da2bad3ef7888..446cf56f07931f 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -337,7 +337,8 @@ class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True, mutually_exclusive=None, required_together=None, - required_one_of=None, add_file_common_args=False, supports_check_mode=False): + required_one_of=None, add_file_common_args=False, supports_check_mode=False, + required_if=None): ''' common code for quickly building an ansible module in Python @@ -385,6 +386,7 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self._check_argument_types() self._check_required_together(required_together) self._check_required_one_of(required_one_of) + self._check_required_if(required_if) self._set_defaults(pre=False) if not self.no_log: @@ -958,6 +960,20 @@ def _check_required_arguments(self): if len(missing) > 0: self.fail_json(msg="missing required arguments: %s" % ",".join(missing)) + def _check_required_if(self, spec): + ''' ensure that parameters which conditionally required are present ''' + if spec is None: + return + for (key, val, requirements) in spec: + missing = [] + if key in self.params and self.params[key] == val: + for check in requirements: + count = self._count_terms(check) + if count == 0: + missing.append(check) + if len(missing) > 0: + self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)) + def _check_argument_values(self): ''' ensure all arguments have the requested values, and there are no stray arguments ''' for (k,v) in self.argument_spec.iteritems(): From 0f23d8a503c7c081090b2a8a175205fd13adee4f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 26 May 2015 11:28:30 -0700 Subject: [PATCH 0678/3617] Fix syntaxerror in the required_if arg spec check --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 935eb31e66e6d5..e772a12efcefce 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -972,7 +972,7 @@ def _check_required_if(self, spec): if count == 0: missing.append(check) if len(missing) > 0: - self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)) + self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing))) def _check_argument_values(self): ''' ensure all arguments have the requested values, and there are no stray arguments ''' From d793ed360b65f991e384a7839c7456830c445778 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 26 May 2015 11:28:30 -0700 Subject: [PATCH 0679/3617] Fix syntaxerror in the required_if arg spec check --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 446cf56f07931f..2e4805cb86b993 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -972,7 +972,7 @@ def _check_required_if(self, spec): if count == 0: missing.append(check) if len(missing) > 0: - self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)) + self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing))) def _check_argument_values(self): ''' ensure all arguments have the requested values, and there are no stray arguments ''' From c87586e9e553a5a4b254a01895a9e5e8b98bab45 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 15:11:02 -0400 Subject: [PATCH 0680/3617] updated formatting --- docsite/rst/developing_modules.rst | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 0748a82effa66e..ddd4e90c82a3a3 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -454,20 +454,20 @@ Module checklist * The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work * Documentation: Make sure it exists - * `required` should always be present, be it true or false - * If `required` is false you need to document `default`, even if its 'null' - * `default` is not needed for `required: true` - * Remove unnecessary doc like `aliases: []` or `choices: []` - * The version is not a float number and value the current development version - * The verify that arguments in doc and module spec dict are identical - * For password / secret arguments no_log=True should be set - * Requirements should be documented, using the `requirements=[]` field - * Author should be set, name and github id at least - * Made use of U() for urls, C() for files and options, I() for params, M() for modules? - * GPL License header - * Examples: make sure they are reproducible - * Return: document the return structure of the module -* Does module use check_mode? Could it be modified to use it? Document it + * `required` should always be present, be it true or false + * If `required` is false you need to document `default`, even if its 'null' + * `default` is not needed for `required: true` + * Remove unnecessary doc like `aliases: []` or `choices: []` + * The version is not a float number and value the current development version + * The verify that arguments in doc and module spec dict are identical + * For password / secret arguments no_log=True should be set + * Requirements should be documented, using the `requirements=[]` field + * Author should be set, name and github id at least + * Made use of U() for urls, C() for files and options, I() for params, M() for modules? + * GPL License header + * Does module use check_mode? Could it be modified to use it? Document it + * Examples: make sure they are reproducible + * Return: document the return structure of the module * Exceptions: The module must handle them. (exceptions are bugs) * Give out useful messages on what you were doing and you can add the exception message to that. * Avoid catchall exceptions, they are not very useful unless the underlying API gives very good error messages pertaining the attempted action. From f1ab1c48f4f19867a537c9ac5ef7656b0b05901e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 26 May 2015 12:38:26 -0700 Subject: [PATCH 0681/3617] Update submodule refs for v2 --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e10a581abdf375..9cc23c749a8cd5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e10a581abdf375b855418897944d5206682994b6 +Subproject commit 9cc23c749a8cd5039db7aa1998d310bbb04d1e13 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 24390f1ac69fe4..a07fc88ba0d254 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 24390f1ac69fe4731e143eab16120bc422fd6233 +Subproject commit a07fc88ba0d2546b92fbe93b2bede699fdf2bc48 From 7a0c521131852e6c5c9987be6d3ac8c12d34bd0a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 17:30:10 -0400 Subject: [PATCH 0682/3617] added os_server_actions --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d1c015c6443d0..98006503692fcb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ New Modules: * maven_artifact * openstack: os_network * openstack: os_server + * openstack: os_server_actions * openstack: os_server_facts * openstack: os_server_volume * openstack: os_subnet From ea4421d10e7aad5df863b007fd6f31a887d55079 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 26 May 2015 15:33:47 -0700 Subject: [PATCH 0683/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 32e609720a962f..476af93e96f5d2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 32e609720a962fa948094de03eba4750ab03918b +Subproject commit 476af93e96f5d2518470b5c27ece59cbda66ec1d diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8dfa63d1d8be33..a07fc88ba0d254 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8dfa63d1d8be333dd107f4f90be2c337b4909432 +Subproject commit a07fc88ba0d2546b92fbe93b2bede699fdf2bc48 From 339a02c3847ce41ac8560b3e1f429f8d1d2e88f3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 27 May 2015 03:20:54 -0500 Subject: [PATCH 0684/3617] Started reworking module_utils/basic unit tests (v2) --- lib/ansible/module_utils/basic.py | 4 +- test/units/module_utils/test_basic.py | 510 ++++++++++++-------------- 2 files changed, 227 insertions(+), 287 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 2e4805cb86b993..c222bb4d16810e 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -930,7 +930,7 @@ def _check_mutually_exclusive(self, spec): for check in spec: count = self._count_terms(check) if count > 1: - self.fail_json(msg="parameters are mutually exclusive: %s" % check) + self.fail_json(msg="parameters are mutually exclusive: %s" % (check,)) def _check_required_one_of(self, spec): if spec is None: @@ -948,7 +948,7 @@ def _check_required_together(self, spec): non_zero = [ c for c in counts if c > 0 ] if len(non_zero) > 0: if 0 in counts: - self.fail_json(msg="parameters are required together: %s" % check) + self.fail_json(msg="parameters are required together: %s" % (check,)) def _check_required_arguments(self): ''' ensure all required arguments are present ''' diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index 60f501ba28b5c8..c3db5138bf272e 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # (c) 2012-2014, Michael DeHaan # # This file is part of Ansible @@ -16,301 +17,167 @@ # along with Ansible. If not, see . # Make coding more python3-ish -#from __future__ import (absolute_import, division, print_function) from __future__ import (absolute_import, division) __metaclass__ = type -import os -import tempfile +import __builtin__ + +from nose.tools import timed from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch, MagicMock -from ansible.errors import * -from ansible.executor.module_common import modify_module -from ansible.module_utils.basic import heuristic_log_sanitize -from ansible.utils.hashing import checksum as utils_checksum - -TEST_MODULE_DATA = """ -from ansible.module_utils.basic import * - -def get_module(): - return AnsibleModule( - argument_spec = dict(), - supports_check_mode = True, - no_log = True, - ) - -get_module() - -""" - class TestModuleUtilsBasic(unittest.TestCase): - def cleanup_temp_file(self, fd, path): - try: - os.close(fd) - os.remove(path) - except: - pass - - def cleanup_temp_dir(self, path): - try: - os.rmdir(path) - except: - pass - def setUp(self): - # create a temporary file for the test module - # we're about to generate - self.tmp_fd, self.tmp_path = tempfile.mkstemp() - os.write(self.tmp_fd, TEST_MODULE_DATA) - - # template the module code and eval it - module_data, module_style, shebang = modify_module(self.tmp_path, {}) - - d = {} - exec(module_data, d, d) - self.module = d['get_module']() - - # module_utils/basic.py screws with CWD, let's save it and reset - self.cwd = os.getcwd() + pass def tearDown(self): - self.cleanup_temp_file(self.tmp_fd, self.tmp_path) - # Reset CWD back to what it was before basic.py changed it - os.chdir(self.cwd) - - ################################################################################# - # run_command() tests - - # test run_command with a string command - def test_run_command_string(self): - (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'") - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar') - (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True) - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar') - - # test run_command with an array of args (with both use_unsafe_shell=True|False) - def test_run_command_args(self): - (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"]) - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar') - (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True) - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar') - - # test run_command with leading environment variables - #@raises(SystemExit) - def test_run_command_string_with_env_variables(self): - self.assertRaises(SystemExit, self.module.run_command, 'FOO=bar /bin/echo -n "foo bar"') - - #@raises(SystemExit) - def test_run_command_args_with_env_variables(self): - self.assertRaises(SystemExit, self.module.run_command, ['FOO=bar', '/bin/echo', '-n', 'foo bar']) - - def test_run_command_string_unsafe_with_env_variables(self): - (rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True) - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar') - - # test run_command with a command pipe (with both use_unsafe_shell=True|False) - def test_run_command_string_unsafe_with_pipe(self): - (rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True) - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar\n') - - # test run_command with a shell redirect in (with both use_unsafe_shell=True|False) - def test_run_command_string_unsafe_with_redirect_in(self): - (rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True) - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar\n') - - # test run_command with a shell redirect out (with both use_unsafe_shell=True|False) - def test_run_command_string_unsafe_with_redirect_out(self): - tmp_fd, tmp_path = tempfile.mkstemp() - try: - (rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True) - self.assertEqual(rc, 0) - self.assertTrue(os.path.exists(tmp_path)) - checksum = utils_checksum(tmp_path) - self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') - except: - raise - finally: - self.cleanup_temp_file(tmp_fd, tmp_path) - - # test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False) - def test_run_command_string_unsafe_with_double_redirect_out(self): - tmp_fd, tmp_path = tempfile.mkstemp() - try: - (rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True) - self.assertEqual(rc, 0) - self.assertTrue(os.path.exists(tmp_path)) - checksum = utils_checksum(tmp_path) - self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec') - except: - raise - finally: - self.cleanup_temp_file(tmp_fd, tmp_path) - - # test run_command with data - def test_run_command_string_with_data(self): - (rc, out, err) = self.module.run_command('cat', data='foo bar') - self.assertEqual(rc, 0) - self.assertEqual(out, 'foo bar\n') - - # test run_command with binary data - def test_run_command_string_with_binary_data(self): - (rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True) - self.assertEqual(rc, 0) - self.assertEqual(out, 'ABCD') - - # test run_command with a cwd set - def test_run_command_string_with_cwd(self): - tmp_path = tempfile.mkdtemp() - try: - (rc, out, err) = self.module.run_command('pwd', cwd=tmp_path) - self.assertEqual(rc, 0) - self.assertTrue(os.path.exists(tmp_path)) - self.assertEqual(out.strip(), os.path.realpath(tmp_path)) - except: - raise - finally: - self.cleanup_temp_dir(tmp_path) - - -class TestModuleUtilsBasicHelpers(unittest.TestCase): - ''' Test some implementation details of AnsibleModule - - Some pieces of AnsibleModule are implementation details but they have - potential cornercases that we need to check. Go ahead and test at - this level that the functions are behaving even though their API may - change and we'd have to rewrite these tests so that we know that we - need to check for those problems in any rewrite. - - In the future we might want to restructure higher level code to be - friendlier to unittests so that we can test at the level that the public - is interacting with the APIs. - ''' - - MANY_RECORDS = 7000 - URL_SECRET = 'http://username:pas:word@foo.com/data' - SSH_SECRET = 'username:pas:word@foo.com/data' - - def cleanup_temp_file(self, fd, path): - try: - os.close(fd) - os.remove(path) - except: + pass + + def test_module_utils_basic_imports(self): + realimport = __builtin__.__import__ + + def _mock_import(name, *args, **kwargs): + if name == 'json': + raise ImportError() + realimport(name, *args, **kwargs) + + with patch.object(__builtin__, '__import__', _mock_import, create=True) as m: + m('ansible.module_utils.basic') + __builtin__.__import__('ansible.module_utils.basic') + + def test_module_utils_basic_get_platform(self): + with patch('platform.system', return_value='foo'): + from ansible.module_utils.basic import get_platform + self.assertEqual(get_platform(), 'foo') + + def test_module_utils_basic_get_distribution(self): + from ansible.module_utils.basic import get_distribution + + with patch('platform.system', return_value='Foo'): + self.assertEqual(get_distribution(), None) + + with patch('platform.system', return_value='Linux'): + with patch('platform.linux_distribution', return_value=("foo", "1", "One")): + self.assertEqual(get_distribution(), "Foo") + + with patch('os.path.isfile', return_value=True): + def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1): + if supported_dists != (): + return ("AmazonFooBar", "", "") + else: + return ("", "", "") + + with patch('platform.linux_distribution', side_effect=_dist): + self.assertEqual(get_distribution(), "Amazon") + + def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1): + if supported_dists != (): + return ("Bar", "2", "Two") + else: + return ("", "", "") + + with patch('platform.linux_distribution', side_effect=_dist): + self.assertEqual(get_distribution(), "OtherLinux") + + with patch('platform.linux_distribution', side_effect=Exception("boo")): + with patch('platform.dist', return_value=("bar", "2", "Two")): + self.assertEqual(get_distribution(), "Bar") + + def test_module_utils_basic_get_distribution_version(self): + from ansible.module_utils.basic import get_distribution_version + + with patch('platform.system', return_value='Foo'): + self.assertEqual(get_distribution_version(), None) + + with patch('platform.system', return_value='Linux'): + with patch('platform.linux_distribution', return_value=("foo", "1", "One")): + self.assertEqual(get_distribution_version(), "1") + + with patch('os.path.isfile', return_value=True): + def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1): + if supported_dists != (): + return ("AmazonFooBar", "2", "") + else: + return ("", "", "") + + with patch('platform.linux_distribution', side_effect=_dist): + self.assertEqual(get_distribution_version(), "2") + + with patch('platform.linux_distribution', side_effect=Exception("boo")): + with patch('platform.dist', return_value=("bar", "3", "Three")): + self.assertEqual(get_distribution_version(), "3") + + def test_module_utils_basic_load_platform_subclass(self): + class LinuxTest: pass - def cleanup_temp_dir(self, path): - try: - os.rmdir(path) - except: - pass - - def _gen_data(self, records, per_rec, top_level, secret_text): - hostvars = {'hostvars': {}} - for i in range(1, records, 1): - host_facts = {'host%s' % i: - {'pstack': - {'running': '875.1', - 'symlinked': '880.0', - 'tars': [], - 'versions': ['885.0']}, - }} - - if per_rec: - host_facts['host%s' % i]['secret'] = secret_text - hostvars['hostvars'].update(host_facts) - if top_level: - hostvars['secret'] = secret_text - return hostvars - - def setUp(self): - self.many_url = repr(self._gen_data(self.MANY_RECORDS, True, True, - self.URL_SECRET)) - self.many_ssh = repr(self._gen_data(self.MANY_RECORDS, True, True, - self.SSH_SECRET)) - self.one_url = repr(self._gen_data(self.MANY_RECORDS, False, True, - self.URL_SECRET)) - self.one_ssh = repr(self._gen_data(self.MANY_RECORDS, False, True, - self.SSH_SECRET)) - self.zero_secrets = repr(self._gen_data(self.MANY_RECORDS, False, - False, '')) - self.few_url = repr(self._gen_data(2, True, True, self.URL_SECRET)) - self.few_ssh = repr(self._gen_data(2, True, True, self.SSH_SECRET)) - - # create a temporary file for the test module - # we're about to generate - self.tmp_fd, self.tmp_path = tempfile.mkstemp() - os.write(self.tmp_fd, TEST_MODULE_DATA) - - # template the module code and eval it - module_data, module_style, shebang = modify_module(self.tmp_path, {}) - - d = {} - exec(module_data, d, d) - self.module = d['get_module']() - - # module_utils/basic.py screws with CWD, let's save it and reset - self.cwd = os.getcwd() - - def tearDown(self): - self.cleanup_temp_file(self.tmp_fd, self.tmp_path) - # Reset CWD back to what it was before basic.py changed it - os.chdir(self.cwd) - - - ################################################################################# - - # - # Speed tests - # - - # Previously, we used regexes which had some pathologically slow cases for - # parameters with large amounts of data with many ':' but no '@'. The - # present function gets slower when there are many replacements so we may - # want to explore regexes in the future (for the speed when substituting - # or flexibility). These speed tests will hopefully tell us if we're - # introducing code that has cases that are simply too slow. - # - # Some regex notes: - # * re.sub() is faster than re.match() + str.join(). - # * We may be able to detect a large number of '@' symbols and then use - # a regex else use the present function. - - #@timed(5) - #def test_log_sanitize_speed_many_url(self): - # heuristic_log_sanitize(self.many_url) - - #@timed(5) - #def test_log_sanitize_speed_many_ssh(self): - # heuristic_log_sanitize(self.many_ssh) - - #@timed(5) - #def test_log_sanitize_speed_one_url(self): - # heuristic_log_sanitize(self.one_url) - - #@timed(5) - #def test_log_sanitize_speed_one_ssh(self): - # heuristic_log_sanitize(self.one_ssh) - - #@timed(5) - #def test_log_sanitize_speed_zero_secrets(self): - # heuristic_log_sanitize(self.zero_secrets) - - # - # Test that the password obfuscation sanitizes somewhat cleanly. - # - - def test_log_sanitize_correctness(self): - url_data = repr(self._gen_data(3, True, True, self.URL_SECRET)) - ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET)) + class Foo(LinuxTest): + platform = "Linux" + distribution = None + + class Bar(LinuxTest): + platform = "Linux" + distribution = "Bar" + + from ansible.module_utils.basic import load_platform_subclass + + # match just the platform class, not a specific distribution + with patch('ansible.module_utils.basic.get_platform', return_value="Linux"): + with patch('ansible.module_utils.basic.get_distribution', return_value=None): + self.assertIs(type(load_platform_subclass(LinuxTest)), Foo) + + # match both the distribution and platform class + with patch('ansible.module_utils.basic.get_platform', return_value="Linux"): + with patch('ansible.module_utils.basic.get_distribution', return_value="Bar"): + self.assertIs(type(load_platform_subclass(LinuxTest)), Bar) + + # if neither match, the fallback should be the top-level class + with patch('ansible.module_utils.basic.get_platform', return_value="Foo"): + with patch('ansible.module_utils.basic.get_distribution', return_value=None): + self.assertIs(type(load_platform_subclass(LinuxTest)), LinuxTest) + + def test_module_utils_basic_json_dict_converters(self): + from ansible.module_utils.basic import json_dict_unicode_to_bytes, json_dict_bytes_to_unicode + + test_data = dict( + item1 = u"Fóo", + item2 = [u"Bár", u"Bam"], + item3 = dict(sub1=u"Súb"), + item4 = (u"föo", u"bär", u"©"), + item5 = 42, + ) + res = json_dict_unicode_to_bytes(test_data) + res2 = json_dict_bytes_to_unicode(res) + + self.assertEqual(test_data, res2) + + def test_module_utils_basic_heuristic_log_sanitize(self): + from ansible.module_utils.basic import heuristic_log_sanitize + + URL_SECRET = 'http://username:pas:word@foo.com/data' + SSH_SECRET = 'username:pas:word@foo.com/data' + + def _gen_data(records, per_rec, top_level, secret_text): + hostvars = {'hostvars': {}} + for i in range(1, records, 1): + host_facts = {'host%s' % i: + {'pstack': + {'running': '875.1', + 'symlinked': '880.0', + 'tars': [], + 'versions': ['885.0']}, + }} + if per_rec: + host_facts['host%s' % i]['secret'] = secret_text + hostvars['hostvars'].update(host_facts) + if top_level: + hostvars['secret'] = secret_text + return hostvars + + url_data = repr(_gen_data(3, True, True, URL_SECRET)) + ssh_data = repr(_gen_data(3, True, True, SSH_SECRET)) url_output = heuristic_log_sanitize(url_data) ssh_output = heuristic_log_sanitize(ssh_data) @@ -349,7 +216,80 @@ def test_log_sanitize_correctness(self): # python2.6 or less's unittest self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output)) - # The overzealous-ness here may lead to us changing the algorithm in - # the future. We could make it consume less of the data (with the - # possibility of leaving partial passwords exposed) and encourage - # people to use no_log instead of relying on this obfuscation. + + def test_module_utils_basic_ansible_module_creation(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec=dict(), + ) + + arg_spec = dict( + foo = dict(required=True), + bar = dict(), + bam = dict(), + baz = dict(), + ) + mut_ex = (('bar', 'bam'),) + req_to = (('bam', 'baz'),) + + # should test ok + basic.MODULE_COMPLEX_ARGS = '{"foo":"hello"}' + am = basic.AnsibleModule( + argument_spec = arg_spec, + mutually_exclusive = mut_ex, + required_together = req_to, + no_log=True, + check_invalid_arguments=False, + add_file_common_args=True, + supports_check_mode=True, + ) + + # fail, because a required param was not specified + basic.MODULE_COMPLEX_ARGS = '{}' + self.assertRaises( + SystemExit, + basic.AnsibleModule, + argument_spec = arg_spec, + mutually_exclusive = mut_ex, + required_together = req_to, + no_log=True, + check_invalid_arguments=False, + add_file_common_args=True, + supports_check_mode=True, + ) + + # fail because of mutually exclusive parameters + basic.MODULE_COMPLEX_ARGS = '{"foo":"hello", "bar": "bad", "bam": "bad"}' + self.assertRaises( + SystemExit, + basic.AnsibleModule, + argument_spec = arg_spec, + mutually_exclusive = mut_ex, + required_together = req_to, + no_log=True, + check_invalid_arguments=False, + add_file_common_args=True, + supports_check_mode=True, + ) + + # fail because a param required due to another param was not specified + basic.MODULE_COMPLEX_ARGS = '{"bam":"bad"}' + self.assertRaises( + SystemExit, + basic.AnsibleModule, + argument_spec = arg_spec, + mutually_exclusive = mut_ex, + required_together = req_to, + no_log=True, + check_invalid_arguments=False, + add_file_common_args=True, + supports_check_mode=True, + ) + + def test_module_utils_basic_get_module_path(self): + from ansible.module_utils.basic import get_module_path + with patch('os.path.realpath', return_value='/path/to/foo/'): + self.assertEqual(get_module_path(), '/path/to/foo') + From 7508709045c68738990b28e030cb80928d19a3e6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 27 May 2015 07:27:31 -0400 Subject: [PATCH 0685/3617] updated as per feedback --- docsite/rst/developing_modules.rst | 48 ++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index ddd4e90c82a3a3..0763814a1aab9c 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -490,6 +490,54 @@ Module checklist * If you are asking 'how can i have a module execute other modules' ... you want to write a role +Windows modules checklist +````````````````````````` +* Favour native powershell and .net ways of doing things over calls to COM libraries or calls to native executables which may or may not be present in all versions of windows +* modules are in powershell (.ps1 files) but the docs reside in same name python file (.py) +* look at ansible/lib/ansible/module_utils/powershell.ps1 for commmon code, avoid duplication +* start with:: + + #!powershell + +then:: + +then:: + # WANT_JSON + # POWERSHELL_COMMON + +* Arguments: + * Try and use state present and state absent like other modules + * You need to check that all your mandatory args are present:: + + If ($params.state) { + $state = $params.state.ToString().ToLower() + If (($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted')) { + Fail-Json $result "state is '$state'; must be 'started', 'stopped', or 'restarted'" + } + } + + * Look at existing modules for more examples of argument checking. + +* Results + * The result object should allways contain an attribute called changed set to either $true or $false + * Create your result object like this:: + + $result = New-Object psobject @{ + changed = $false + other_result_attribute = $some_value + }; + + If all is well, exit with a + Exit-Json $result + + * Ensure anything you return, including errors can be converted to json. + * Be aware that because exception messages could contain almost anything. + * ConvertTo-Json will fail if it encounters a trailing \ in a string. + * If all is not well use Fail-Json to exit. + +* Have you tested for powershell 3.0 and 4.0 compliance? + + Deprecating and making module aliases `````````````````````````````````````` From 83074f4d93f628f1d4563687000a5cb51fd3f979 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Wed, 20 May 2015 16:31:17 +0200 Subject: [PATCH 0686/3617] doc: we need GPLv3 license headers GPLv2 only headers are incompatible with GPLv3 --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index ddd4e90c82a3a3..46cb36f634c57d 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -464,7 +464,7 @@ Module checklist * Requirements should be documented, using the `requirements=[]` field * Author should be set, name and github id at least * Made use of U() for urls, C() for files and options, I() for params, M() for modules? - * GPL License header + * GPL 3 License header * Does module use check_mode? Could it be modified to use it? Document it * Examples: make sure they are reproducible * Return: document the return structure of the module From b72a912562a0174cf0228d4fd8bd217e2161e417 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 27 May 2015 06:50:46 -0700 Subject: [PATCH 0687/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 476af93e96f5d2..44ef8b3bc66365 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 476af93e96f5d2518470b5c27ece59cbda66ec1d +Subproject commit 44ef8b3bc66365a0ca89411041eb0d51c541d6db diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index a07fc88ba0d254..b2e4f31bebfec4 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit a07fc88ba0d2546b92fbe93b2bede699fdf2bc48 +Subproject commit b2e4f31bebfec49380659b9d65b5828f1c1ed8d9 From c2968d6d84fb5aa66ae50d9df0bed963f22abbd8 Mon Sep 17 00:00:00 2001 From: Yannig Perre Date: Wed, 27 May 2015 21:51:20 +0200 Subject: [PATCH 0688/3617] New lookup plugin : ini. Can handle ini file and java properties file. Can also read a list of value in a section using regexp. --- lib/ansible/runner/lookup_plugins/ini.py | 92 +++++++++++++++++++++ test/integration/lookup.ini | 24 ++++++ test/integration/lookup.properties | 5 ++ test/integration/test_lookup_properties.yml | 29 +++++++ 4 files changed, 150 insertions(+) create mode 100644 lib/ansible/runner/lookup_plugins/ini.py create mode 100644 test/integration/lookup.ini create mode 100644 test/integration/lookup.properties create mode 100644 test/integration/test_lookup_properties.yml diff --git a/lib/ansible/runner/lookup_plugins/ini.py b/lib/ansible/runner/lookup_plugins/ini.py new file mode 100644 index 00000000000000..002dda09089876 --- /dev/null +++ b/lib/ansible/runner/lookup_plugins/ini.py @@ -0,0 +1,92 @@ +# (c) 2015, Yannig Perre +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible import utils, errors +import StringIO +import os +import codecs +import ConfigParser +import re + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + self.cp = ConfigParser.ConfigParser() + + def read_properties(self, filename, key, dflt, is_regexp): + config = StringIO.StringIO() + config.write('[java_properties]\n' + open(filename).read()) + config.seek(0, os.SEEK_SET) + self.cp.readfp(config) + return self.get_value(key, 'java_properties', dflt, is_regexp) + + def read_ini(self, filename, key, section, dflt, is_regexp): + self.cp.readfp(open(filename)) + return self.get_value(key, section, dflt, is_regexp) + + def get_value(self, key, section, dflt, is_regexp): + # Retrieve all values from a section using a regexp + if is_regexp: + return [v for k, v in self.cp.items(section) if re.match(key, k)] + # Retrieve a single value + value = self.cp.get(section, key) + if value == None: + return dflt + return value + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + + if isinstance(terms, basestring): + terms = [ terms ] + + ret = [] + for term in terms: + params = term.split() + key = params[0] + + paramvals = { + 'file' : 'ansible.ini', + 're' : False, + 'default' : None, + 'section' : "global", + 'type' : "ini", + } + + # parameters specified? + try: + for param in params[1:]: + name, value = param.split('=') + assert(name in paramvals) + paramvals[name] = value + except (ValueError, AssertionError), e: + raise errors.AnsibleError(e) + + path = utils.path_dwim(self.basedir, paramvals['file']) + if paramvals['type'] == "properties": + var = self.read_properties(path, key, paramvals['default'], paramvals['re']) + else: + var = self.read_ini(path, key, paramvals['section'], paramvals['default'], paramvals['re']) + if var is not None: + if type(var) is list: + for v in var: + ret.append(v) + else: + ret.append(var) + return ret diff --git a/test/integration/lookup.ini b/test/integration/lookup.ini new file mode 100644 index 00000000000000..ce0dbf848604b6 --- /dev/null +++ b/test/integration/lookup.ini @@ -0,0 +1,24 @@ +[global] +# A comment +value1=Text associated with value1 and global section +value2=Same for value2 and global section +value.dot=Properties with dot +field.with.space = another space + +[section1] +value1=Another value for section1 +# No value2 in this section + +[value_section] +value1=1 +value2=2 +value3=3 +other1=4 +other2=5 + +[other_section] +value1=1 +value2=2 +value3=3 +other1=4 +other2=5 diff --git a/test/integration/lookup.properties b/test/integration/lookup.properties new file mode 100644 index 00000000000000..f388d8cfbf5ad5 --- /dev/null +++ b/test/integration/lookup.properties @@ -0,0 +1,5 @@ +# A comment +value1=Text associated with value1 +value2=Same for value2 +value.dot=Properties with dot +field.with.space = another space diff --git a/test/integration/test_lookup_properties.yml b/test/integration/test_lookup_properties.yml new file mode 100644 index 00000000000000..dcd5eb698b2fd6 --- /dev/null +++ b/test/integration/test_lookup_properties.yml @@ -0,0 +1,29 @@ +--- +- name: "Lookup test" + hosts: "localhost" +# connection: local + tasks: + - name: "read properties value" + set_fact: + test1: "{{lookup('ini', 'value1 type=properties file=lookup.properties')}}" + test2: "{{lookup('ini', 'value2 type=properties file=lookup.properties')}}" + test_dot: "{{lookup('ini', 'value.dot type=properties file=lookup.properties')}}" + field_with_space: "{{lookup('ini', 'field.with.space type=properties file=lookup.properties')}}" + - debug: var={{item}} + with_items: [ 'test1', 'test2', 'test_dot', 'field_with_space' ] + - name: "read ini value" + set_fact: + value1_global: "{{lookup('ini', 'value1 section=global file=lookup.ini')}}" + value2_global: "{{lookup('ini', 'value2 section=global file=lookup.ini')}}" + value1_section1: "{{lookup('ini', 'value1 section=section1 file=lookup.ini')}}" + - debug: var={{item}} + with_items: [ 'value1_global', 'value2_global', 'value1_section1' ] + - name: "read ini value with section and regexp" + set_fact: + value_section: "{{lookup('ini', 'value[1-2] section=value_section file=lookup.ini re=true')}}" + other_section: "{{lookup('ini', 'other[1-2] section=other_section file=lookup.ini re=true')}}" + - debug: var={{item}} + with_items: [ 'value_section', 'other_section' ] + - name: "Reading unknown value" + set_fact: + value2_section2: "{{lookup('ini', 'value2 section=section1 file=lookup.ini')}}" From b91532aff358826dd9d3c04588b0cd8dcebe5a69 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 27 May 2015 13:39:09 -0700 Subject: [PATCH 0689/3617] Drop the mysql test db first so that we test with a clean slate. --- test/integration/roles/test_mysql_db/tasks/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/integration/roles/test_mysql_db/tasks/main.yml b/test/integration/roles/test_mysql_db/tasks/main.yml index 60a573bd0b821b..a059cd212a83ff 100644 --- a/test/integration/roles/test_mysql_db/tasks/main.yml +++ b/test/integration/roles/test_mysql_db/tasks/main.yml @@ -17,6 +17,11 @@ # along with Ansible. If not, see . # ============================================================ + +- name: make sure the test database is not there + command: mysql "-e drop database '{{db_name}}';" + ignore_errors: True + - name: test state=present for a database name (expect changed=true) mysql_db: name={{ db_name }} state=present register: result From 388827a636337df9f255aeec882b6440658abf9a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 27 May 2015 20:28:29 -0700 Subject: [PATCH 0690/3617] Update submodule ref --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 44ef8b3bc66365..2b5e932cfb4df4 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 44ef8b3bc66365a0ca89411041eb0d51c541d6db +Subproject commit 2b5e932cfb4df42f46812aee2476fdf5aabab172 From e59d4f3b51665b5e24132bb9303c682a56b63604 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 28 May 2015 01:26:04 -0500 Subject: [PATCH 0691/3617] More module_utils/basic.py unit tests for v2 --- lib/ansible/module_utils/basic.py | 2 +- test/units/module_utils/test_basic.py | 451 +++++++++++++++++++++++++- 2 files changed, 447 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index c222bb4d16810e..793223b1652b19 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -579,7 +579,7 @@ def set_context_if_different(self, path, context, changed): if len(context) > i: if context[i] is not None and context[i] != cur_context[i]: new_context[i] = context[i] - if context[i] is None: + elif context[i] is None: new_context[i] = cur_context[i] if cur_context != new_context: diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index c3db5138bf272e..cd2bf0536e5fd9 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -21,11 +21,12 @@ __metaclass__ = type import __builtin__ +import errno from nose.tools import timed from ansible.compat.tests import unittest -from ansible.compat.tests.mock import patch, MagicMock +from ansible.compat.tests.mock import patch, MagicMock, mock_open class TestModuleUtilsBasic(unittest.TestCase): @@ -216,6 +217,10 @@ def _gen_data(records, per_rec, top_level, secret_text): # python2.6 or less's unittest self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output)) + def test_module_utils_basic_get_module_path(self): + from ansible.module_utils.basic import get_module_path + with patch('os.path.realpath', return_value='/path/to/foo/'): + self.assertEqual(get_module_path(), '/path/to/foo') def test_module_utils_basic_ansible_module_creation(self): from ansible.module_utils import basic @@ -246,6 +251,8 @@ def test_module_utils_basic_ansible_module_creation(self): supports_check_mode=True, ) + # FIXME: add asserts here to verify the basic config + # fail, because a required param was not specified basic.MODULE_COMPLEX_ARGS = '{}' self.assertRaises( @@ -288,8 +295,442 @@ def test_module_utils_basic_ansible_module_creation(self): supports_check_mode=True, ) - def test_module_utils_basic_get_module_path(self): - from ansible.module_utils.basic import get_module_path - with patch('os.path.realpath', return_value='/path/to/foo/'): - self.assertEqual(get_module_path(), '/path/to/foo') + def test_module_utils_basic_ansible_module_load_file_common_arguments(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + am.selinux_mls_enabled = MagicMock() + am.selinux_mls_enabled.return_value = True + am.selinux_default_context = MagicMock() + am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3) + + # with no params, the result should be an empty dict + res = am.load_file_common_arguments(params=dict()) + self.assertEqual(res, dict()) + + base_params = dict( + path = '/path/to/file', + mode = 0600, + owner = 'root', + group = 'root', + seuser = '_default', + serole = '_default', + setype = '_default', + selevel = '_default', + ) + + extended_params = base_params.copy() + extended_params.update(dict( + follow = True, + foo = 'bar', + )) + + final_params = base_params.copy() + final_params.update(dict( + path = '/path/to/real_file', + secontext=['unconfined_u', 'object_r', 'default_t', 's0'], + )) + + # with the proper params specified, the returned dictionary should represent + # only those params which have something to do with the file arguments, excluding + # other params and updated as required with proper values which may have been + # massaged by the method + with patch('os.path.islink', return_value=True): + with patch('os.path.realpath', return_value='/path/to/real_file'): + res = am.load_file_common_arguments(params=extended_params) + self.assertEqual(res, final_params) + + def test_module_utils_basic_ansible_module_selinux_mls_enabled(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + basic.HAVE_SELINUX = False + self.assertEqual(am.selinux_mls_enabled(), False) + + basic.HAVE_SELINUX = True + with patch('selinux.is_selinux_mls_enabled', return_value=0): + self.assertEqual(am.selinux_mls_enabled(), False) + with patch('selinux.is_selinux_mls_enabled', return_value=1): + self.assertEqual(am.selinux_mls_enabled(), True) + + def test_module_utils_basic_ansible_module_selinux_initial_context(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + am.selinux_mls_enabled = MagicMock() + am.selinux_mls_enabled.return_value = False + self.assertEqual(am.selinux_initial_context(), [None, None, None]) + am.selinux_mls_enabled.return_value = True + self.assertEqual(am.selinux_initial_context(), [None, None, None, None]) + + def test_module_utils_basic_ansible_module_selinux_enabled(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + # we first test the cases where the python selinux lib is + # not installed, which has two paths: one in which the system + # does have selinux installed (and the selinuxenabled command + # is present and returns 0 when run), or selinux is not installed + basic.HAVE_SELINUX = False + am.get_bin_path = MagicMock() + am.get_bin_path.return_value = '/path/to/selinuxenabled' + am.run_command = MagicMock() + am.run_command.return_value=(0, '', '') + self.assertRaises(SystemExit, am.selinux_enabled) + am.get_bin_path.return_value = None + self.assertEqual(am.selinux_enabled(), False) + + # finally we test the case where the python selinux lib is installed, + # and both possibilities there (enabled vs. disabled) + basic.HAVE_SELINUX = True + with patch('selinux.is_selinux_enabled', return_value=0): + self.assertEqual(am.selinux_enabled(), False) + with patch('selinux.is_selinux_enabled', return_value=1): + self.assertEqual(am.selinux_enabled(), True) + + def test_module_utils_basic_ansible_module_selinux_default_context(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + am.selinux_initial_context = MagicMock(return_value=[None, None, None, None]) + am.selinux_enabled = MagicMock(return_value=True) + + # we first test the cases where the python selinux lib is not installed + basic.HAVE_SELINUX = False + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + + # all following tests assume the python selinux bindings are installed + basic.HAVE_SELINUX = True + + # next, we test with a mocked implementation of selinux.matchpathcon to simulate + # an actual context being found + with patch('selinux.matchpathcon', return_value=[0, 'unconfined_u:object_r:default_t:s0']): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) + + # we also test the case where matchpathcon returned a failure + with patch('selinux.matchpathcon', return_value=[-1, '']): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + + # finally, we test where an OSError occurred during matchpathcon's call + with patch('selinux.matchpathcon', side_effect=OSError): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + + def test_module_utils_basic_ansible_module_selinux_context(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + am.selinux_initial_context = MagicMock(return_value=[None, None, None, None]) + am.selinux_enabled = MagicMock(return_value=True) + + # we first test the cases where the python selinux lib is not installed + basic.HAVE_SELINUX = False + self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None]) + + # all following tests assume the python selinux bindings are installed + basic.HAVE_SELINUX = True + + # next, we test with a mocked implementation of selinux.lgetfilecon_raw to simulate + # an actual context being found + with patch('selinux.lgetfilecon_raw', return_value=[0, 'unconfined_u:object_r:default_t:s0']): + self.assertEqual(am.selinux_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) + + # we also test the case where matchpathcon returned a failure + with patch('selinux.lgetfilecon_raw', return_value=[-1, '']): + self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None]) + + # finally, we test where an OSError occurred during matchpathcon's call + e = OSError() + e.errno = errno.ENOENT + with patch('selinux.lgetfilecon_raw', side_effect=e): + self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') + + e = OSError() + with patch('selinux.lgetfilecon_raw', side_effect=e): + self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') + + def test_module_utils_basic_ansible_module_is_special_selinux_path(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + basic.SELINUX_SPECIAL_FS = 'nfs,nfsd,foos' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + def _mock_find_mount_point(path): + if path.startswith('/some/path'): + return '/some/path' + elif path.startswith('/weird/random/fstype'): + return '/weird/random/fstype' + return '/' + + am.find_mount_point = MagicMock(side_effect=_mock_find_mount_point) + am.selinux_context = MagicMock(return_value=['foo_u', 'foo_r', 'foo_t', 's0']) + + m = mock_open() + m.side_effect = OSError + + with patch('__builtin__.open', m, create=True): + self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (False, None)) + + mount_data = [ + '/dev/disk1 / ext4 rw,seclabel,relatime,data=ordered 0 0\n', + '1.1.1.1:/path/to/nfs /some/path nfs ro 0 0\n', + 'whatever /weird/random/fstype foos rw 0 0\n', + ] + + # mock_open has a broken readlines() implementation apparently... + # this should work by default but doesn't, so we fix it + m = mock_open(read_data=''.join(mount_data)) + m.return_value.readlines.return_value = mount_data + + with patch('__builtin__.open', m, create=True): + self.assertEqual(am.is_special_selinux_path('/some/random/path'), (False, None)) + self.assertEqual(am.is_special_selinux_path('/some/path/that/should/be/nfs'), (True, ['foo_u', 'foo_r', 'foo_t', 's0'])) + self.assertEqual(am.is_special_selinux_path('/weird/random/fstype/path'), (True, ['foo_u', 'foo_r', 'foo_t', 's0'])) + + def test_module_utils_basic_ansible_module_to_filesystem_str(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + self.assertEqual(am._to_filesystem_str(u'foo'), 'foo') + self.assertEqual(am._to_filesystem_str(u'föö'), 'f\xc3\xb6\xc3\xb6') + + def test_module_utils_basic_ansible_module_user_and_group(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + mock_stat = MagicMock() + mock_stat.st_uid = 0 + mock_stat.st_gid = 0 + + with patch('os.lstat', return_value=mock_stat): + self.assertEqual(am.user_and_group('/path/to/file'), (0, 0)) + + def test_module_utils_basic_ansible_module_find_mount_point(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + def _mock_ismount(path): + if path == '/': + return True + return False + + with patch('os.path.ismount', side_effect=_mock_ismount): + self.assertEqual(am.find_mount_point('/root/fs/../mounted/path/to/whatever'), '/') + + def _mock_ismount(path): + if path == '/subdir/mount': + return True + return False + + with patch('os.path.ismount', side_effect=_mock_ismount): + self.assertEqual(am.find_mount_point('/subdir/mount/path/to/whatever'), '/subdir/mount') + + def test_module_utils_basic_ansible_module_set_context_if_different(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + basic.HAS_SELINUX = False + + am.selinux_enabled = MagicMock(return_value=False) + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True), True) + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), False) + + basic.HAS_SELINUX = True + + am.selinux_enabled = MagicMock(return_value=True) + am.selinux_context = MagicMock(return_value=['bar_u', 'bar_r', None, None]) + am.is_special_selinux_path = MagicMock(return_value=(False, None)) + + with patch('selinux.lsetfilecon', return_value=0) as m: + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + m.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0') + m.reset_mock() + am.check_mode = True + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + self.assertEqual(m.called, False) + am.check_mode = False + + with patch('selinux.lsetfilecon', return_value=1) as m: + self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) + + with patch('selinux.lsetfilecon', side_effect=OSError) as m: + self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) + + am.is_special_selinux_path = MagicMock(return_value=(True, ['sp_u', 'sp_r', 'sp_t', 's0'])) + + with patch('selinux.lsetfilecon', return_value=0) as m: + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + m.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0') + + def test_module_utils_basic_ansible_module_set_owner_if_different(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + self.assertEqual(am.set_owner_if_different('/path/to/file', None, True), True) + self.assertEqual(am.set_owner_if_different('/path/to/file', None, False), False) + + am.user_and_group = MagicMock(return_value=(500, 500)) + + with patch('os.lchown', return_value=None) as m: + self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True) + m.assert_called_with('/path/to/file', 0, -1) + + def _mock_getpwnam(*args, **kwargs): + mock_pw = MagicMock() + mock_pw.pw_uid = 0 + return mock_pw + + m.reset_mock() + with patch('pwd.getpwnam', side_effect=_mock_getpwnam): + self.assertEqual(am.set_owner_if_different('/path/to/file', 'root', False), True) + m.assert_called_with('/path/to/file', 0, -1) + + with patch('pwd.getpwnam', side_effect=KeyError): + self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False) + + m.reset_mock() + am.check_mode = True + self.assertEqual(am.set_owner_if_different('/path/to/file', 0, False), True) + self.assertEqual(m.called, False) + am.check_mode = False + + with patch('os.lchown', side_effect=OSError) as m: + self.assertRaises(SystemExit, am.set_owner_if_different, '/path/to/file', 'root', False) + + def test_module_utils_basic_ansible_module_set_group_if_different(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + self.assertEqual(am.set_group_if_different('/path/to/file', None, True), True) + self.assertEqual(am.set_group_if_different('/path/to/file', None, False), False) + + am.user_and_group = MagicMock(return_value=(500, 500)) + + with patch('os.lchown', return_value=None) as m: + self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True) + m.assert_called_with('/path/to/file', -1, 0) + + def _mock_getgrnam(*args, **kwargs): + mock_gr = MagicMock() + mock_gr.gr_gid = 0 + return mock_gr + + m.reset_mock() + with patch('grp.getgrnam', side_effect=_mock_getgrnam): + self.assertEqual(am.set_group_if_different('/path/to/file', 'root', False), True) + m.assert_called_with('/path/to/file', -1, 0) + + with patch('grp.getgrnam', side_effect=KeyError): + self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False) + + m.reset_mock() + am.check_mode = True + self.assertEqual(am.set_group_if_different('/path/to/file', 0, False), True) + self.assertEqual(m.called, False) + am.check_mode = False + + with patch('os.lchown', side_effect=OSError) as m: + self.assertRaises(SystemExit, am.set_group_if_different, '/path/to/file', 'root', False) + + def test_module_utils_basic_ansible_module_set_mode_if_different(self): + from ansible.module_utils import basic + + basic.MODULE_COMPLEX_ARGS = '{}' + am = basic.AnsibleModule( + argument_spec = dict(), + ) + + mock_stat1 = MagicMock() + mock_stat1.st_mode = 0444 + mock_stat2 = MagicMock() + mock_stat2.st_mode = 0660 + + with patch('os.lstat', side_effect=[mock_stat1]): + self.assertEqual(am.set_mode_if_different('/path/to/file', None, True), True) + with patch('os.lstat', side_effect=[mock_stat1]): + self.assertEqual(am.set_mode_if_different('/path/to/file', None, False), False) + + with patch('os.lstat') as m: + with patch('os.lchmod', return_value=None, create=True) as m_os: + m.side_effect = [mock_stat1, mock_stat2, mock_stat2] + self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True) + m_os.assert_called_with('/path/to/file', 0660) + + m.side_effect = [mock_stat1, mock_stat2, mock_stat2] + am._symbolic_mode_to_octal = MagicMock(return_value=0660) + self.assertEqual(am.set_mode_if_different('/path/to/file', 'o+w,g+w,a-r', False), True) + m_os.assert_called_with('/path/to/file', 0660) + + m.side_effect = [mock_stat1, mock_stat2, mock_stat2] + am._symbolic_mode_to_octal = MagicMock(side_effect=Exception) + self.assertRaises(SystemExit, am.set_mode_if_different, '/path/to/file', 'o+w,g+w,a-r', False) + + m.side_effect = [mock_stat1, mock_stat2, mock_stat2] + am.check_mode = True + self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True) + am.check_mode = False + + # FIXME: this isn't working yet + #with patch('os.lstat', side_effect=[mock_stat1, mock_stat2]): + # with patch('os.lchmod', return_value=None, create=True) as m_os: + # del m_os.lchmod + # with patch('os.path.islink', return_value=False): + # with patch('os.chmod', return_value=None) as m_chmod: + # self.assertEqual(am.set_mode_if_different('/path/to/file/no_lchmod', 0660, False), True) + # m_chmod.assert_called_with('/path/to/file', 0660) + # with patch('os.path.islink', return_value=True): + # with patch('os.chmod', return_value=None) as m_chmod: + # with patch('os.stat', return_value=mock_stat2): + # self.assertEqual(am.set_mode_if_different('/path/to/file', 0660, False), True) + # m_chmod.assert_called_with('/path/to/file', 0660) From 37ae5aab31ad10bf4e194b54e09050d5dbd807ef Mon Sep 17 00:00:00 2001 From: alberto Date: Thu, 28 May 2015 12:19:32 +0200 Subject: [PATCH 0692/3617] Capture only IOError when reading shebang from inventory file, to avoid ignoring other possible exceptions like timeouts from a task --- lib/ansible/inventory/__init__.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index f012246e227016..e4080e39d825a1 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -105,19 +105,18 @@ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): # class we can show a more apropos error shebang_present = False try: - inv_file = open(host_list) - first_line = inv_file.readlines()[0] - inv_file.close() - if first_line.startswith('#!'): - shebang_present = True - except: + with open(host_list, "r") as inv_file: + first_line = inv_file.readline() + if first_line.startswith("#!"): + shebang_present = True + except IOError: pass if utils.is_executable(host_list): try: self.parser = InventoryScript(filename=host_list) self.groups = self.parser.groups.values() - except: + except errors.AnsibleError: if not shebang_present: raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \ "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list) @@ -127,7 +126,7 @@ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): try: self.parser = InventoryParser(filename=host_list) self.groups = self.parser.groups.values() - except: + except errors.AnsibleError: if shebang_present: raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \ "Perhaps you want to correct this with `chmod +x %s`?" % host_list) From aef76cc701d8f647444c624da664bb65e84e6bce Mon Sep 17 00:00:00 2001 From: Edwin Chiu Date: Thu, 28 May 2015 14:43:25 -0400 Subject: [PATCH 0693/3617] More complex example of using test-module --- hacking/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/hacking/README.md b/hacking/README.md index ae8db7e3a9b952..be19249519204b 100644 --- a/hacking/README.md +++ b/hacking/README.md @@ -33,6 +33,22 @@ Example: This is a good way to insert a breakpoint into a module, for instance. +For more complex arguments such as the following yaml: + +```yaml +parent: + child: + - item: first + val: foo + - item: second + val: boo +``` + +Use: + + $ ./hacking/test-module -m module \ + -a "{"parent": {"child": [{"item": "first", "val": "foo"}, {"item": "second", "val": "bar"}]}}" + Module-formatter ---------------- From 1ccf2a4685d136a81d266ed5728c7f2c9b7351e4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 12:35:37 -0700 Subject: [PATCH 0694/3617] Make fetch_url check the server's certificate on https connections --- lib/ansible/module_utils/urls.py | 51 ++++++++++++------- .../roles/test_get_url/tasks/main.yml | 20 ++++++++ 2 files changed, 53 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index d56cc89395e338..18317e86aeb8e3 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -50,6 +50,15 @@ except: HAS_SSL=False +HAS_MATCH_HOSTNAME = True +try: + from ssl import match_hostname, CertificateError +except ImportError: + try: + from backports.ssl_match_hostname import match_hostname, CertificateError + except ImportError: + HAS_MATCH_HOSTNAME = False + import httplib import os import re @@ -293,11 +302,13 @@ def http_request(self, req): connect_result = s.recv(4096) self.validate_proxy_response(connect_result) ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + match_hostname(ssl_s.getpeercert(), self.hostname) else: self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection #ssl_s.unwrap() s.close() @@ -311,6 +322,9 @@ def http_request(self, req): 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ 'Paths checked for this platform: %s' % ", ".join(paths_checked) ) + except CertificateError: + self.module.fail_json(msg="SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=no (insecure)" % self.hostname) + try: # cleanup the temp file created, don't worry # if it fails for some reason @@ -363,28 +377,29 @@ def fetch_url(module, url, data=None, headers=None, method=None, # FIXME: change the following to use the generic_urlparse function # to remove the indexed references for 'parsed' parsed = urlparse.urlparse(url) - if parsed[0] == 'https': - if not HAS_SSL and validate_certs: + if parsed[0] == 'https' and validate_certs: + if not HAS_SSL: if distribution == 'Redhat': module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL') else: module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended') - - elif validate_certs: - # do the cert validation - netloc = parsed[1] - if '@' in netloc: - netloc = netloc.split('@', 1)[1] - if ':' in netloc: - hostname, port = netloc.split(':', 1) - port = int(port) - else: - hostname = netloc - port = 443 - # create the SSL validation handler and - # add it to the list of handlers - ssl_handler = SSLValidationHandler(module, hostname, port) - handlers.append(ssl_handler) + if not HAS_MATCH_HOSTNAME: + module.fail_json(msg='Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=no, however this is unsafe and not recommended') + + # do the cert validation + netloc = parsed[1] + if '@' in netloc: + netloc = netloc.split('@', 1)[1] + if ':' in netloc: + hostname, port = netloc.split(':', 1) + port = int(port) + else: + hostname = netloc + port = 443 + # create the SSL validation handler and + # add it to the list of handlers + ssl_handler = SSLValidationHandler(module, hostname, port) + handlers.append(ssl_handler) if parsed[0] != 'ftp': username = module.params.get('url_username', '') diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 1aa4b287ea7a2d..6d016fe6be3844 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -25,3 +25,23 @@ that: - result.changed - '"OK" in result.msg' + +- name: test https fetch to a site with invalid domain + get_url: + url: "https://kennethreitz.org/" + dest: "{{ output_dir }}/shouldnotexist.html" + ignore_errors: True + register: result + +- stat: + path: "{{ output_dir }}/shouldnotexist.html" + register: stat_result + +- debug: var=result + +- name: Assert that the file was not downloaded + assert: + that: + - "result.failed == true" + - "'Certificate does not belong to ' in result.msg" + - "stat_result.stat.exists == false" From afc19894e1006780d2f248e325f7ecae84bb4f14 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 12:35:37 -0700 Subject: [PATCH 0695/3617] Make fetch_url check the server's certificate on https connections --- lib/ansible/module_utils/urls.py | 51 ++++++++++++------- .../roles/test_get_url/tasks/main.yml | 20 ++++++++ 2 files changed, 53 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index d56cc89395e338..18317e86aeb8e3 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -50,6 +50,15 @@ except: HAS_SSL=False +HAS_MATCH_HOSTNAME = True +try: + from ssl import match_hostname, CertificateError +except ImportError: + try: + from backports.ssl_match_hostname import match_hostname, CertificateError + except ImportError: + HAS_MATCH_HOSTNAME = False + import httplib import os import re @@ -293,11 +302,13 @@ def http_request(self, req): connect_result = s.recv(4096) self.validate_proxy_response(connect_result) ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + match_hostname(ssl_s.getpeercert(), self.hostname) else: self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection #ssl_s.unwrap() s.close() @@ -311,6 +322,9 @@ def http_request(self, req): 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ 'Paths checked for this platform: %s' % ", ".join(paths_checked) ) + except CertificateError: + self.module.fail_json(msg="SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=no (insecure)" % self.hostname) + try: # cleanup the temp file created, don't worry # if it fails for some reason @@ -363,28 +377,29 @@ def fetch_url(module, url, data=None, headers=None, method=None, # FIXME: change the following to use the generic_urlparse function # to remove the indexed references for 'parsed' parsed = urlparse.urlparse(url) - if parsed[0] == 'https': - if not HAS_SSL and validate_certs: + if parsed[0] == 'https' and validate_certs: + if not HAS_SSL: if distribution == 'Redhat': module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL') else: module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended') - - elif validate_certs: - # do the cert validation - netloc = parsed[1] - if '@' in netloc: - netloc = netloc.split('@', 1)[1] - if ':' in netloc: - hostname, port = netloc.split(':', 1) - port = int(port) - else: - hostname = netloc - port = 443 - # create the SSL validation handler and - # add it to the list of handlers - ssl_handler = SSLValidationHandler(module, hostname, port) - handlers.append(ssl_handler) + if not HAS_MATCH_HOSTNAME: + module.fail_json(msg='Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=no, however this is unsafe and not recommended') + + # do the cert validation + netloc = parsed[1] + if '@' in netloc: + netloc = netloc.split('@', 1)[1] + if ':' in netloc: + hostname, port = netloc.split(':', 1) + port = int(port) + else: + hostname = netloc + port = 443 + # create the SSL validation handler and + # add it to the list of handlers + ssl_handler = SSLValidationHandler(module, hostname, port) + handlers.append(ssl_handler) if parsed[0] != 'ftp': username = module.params.get('url_username', '') diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 1aa4b287ea7a2d..6d016fe6be3844 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -25,3 +25,23 @@ that: - result.changed - '"OK" in result.msg' + +- name: test https fetch to a site with invalid domain + get_url: + url: "https://kennethreitz.org/" + dest: "{{ output_dir }}/shouldnotexist.html" + ignore_errors: True + register: result + +- stat: + path: "{{ output_dir }}/shouldnotexist.html" + register: stat_result + +- debug: var=result + +- name: Assert that the file was not downloaded + assert: + that: + - "result.failed == true" + - "'Certificate does not belong to ' in result.msg" + - "stat_result.stat.exists == false" From 4d8427538dbf3b15e65622b56ff20a6fc67429fd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 15:10:06 -0700 Subject: [PATCH 0696/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 2b5e932cfb4df4..7fea93835c172d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 2b5e932cfb4df42f46812aee2476fdf5aabab172 +Subproject commit 7fea93835c172d23638959cbe2d00a3be8d14557 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index b2e4f31bebfec4..c223716bc7ccf2 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit b2e4f31bebfec49380659b9d65b5828f1c1ed8d9 +Subproject commit c223716bc7ccf2d0ac7995b36f76cca8ccd5bfda From 0f4a3409d851c658a765c95442d985ea7b9a13ec Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 15:35:25 -0700 Subject: [PATCH 0697/3617] Add test that validate_certs=no works --- .../roles/test_get_url/tasks/main.yml | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 6d016fe6be3844..3a6bc509c0e541 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -26,7 +26,7 @@ - result.changed - '"OK" in result.msg' -- name: test https fetch to a site with invalid domain +- name: test https fetch to a site with mismatched hostname and certificate get_url: url: "https://kennethreitz.org/" dest: "{{ output_dir }}/shouldnotexist.html" @@ -37,11 +37,26 @@ path: "{{ output_dir }}/shouldnotexist.html" register: stat_result -- debug: var=result - - name: Assert that the file was not downloaded assert: that: - "result.failed == true" - "'Certificate does not belong to ' in result.msg" - "stat_result.stat.exists == false" + +- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no + get_url: + url: "https://kennethreitz.org/" + dest: "{{ output_dir }}/kreitz.html" + validate_certs: no + register: result + +- stat: + path: "{{ output_dir }}/kreitz.html" + register: stat_result + +- name: Assert that the file was not downloaded + assert: + that: + - "result.failed == false" + - "stat_result.stat.exists == true" From 1bda7cc200d5bd1054d1bcb3b1986afe80b30dbd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 15:35:45 -0700 Subject: [PATCH 0698/3617] Test that uri module validates certs --- .../integration/roles/test_uri/tasks/main.yml | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 66e01ae8e53124..da4bf655749fc9 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -91,3 +91,38 @@ with_together: - fail_checksum.results - fail.results + +- name: test https fetch to a site with mismatched hostname and certificate + uri: + url: "https://kennethreitz.org/" + dest: "{{ output_dir }}/shouldnotexist.html" + ignore_errors: True + register: result + +- stat: + path: "{{ output_dir }}/shouldnotexist.html" + register: stat_result + +- name: Assert that the file was not downloaded + assert: + that: + - "result.failed == true" + - "'certificate does not match ' in result.msg" + - "stat_result.stat.exists == false" + +- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no + get_url: + url: "https://kennethreitz.org/" + dest: "{{ output_dir }}/kreitz.html" + validate_certs: no + register: result + +- stat: + path: "{{ output_dir }}/kreitz.html" + register: stat_result + +- name: Assert that the file was not downloaded + assert: + that: + - "result.failed == false" + - "stat_result.stat.exists == true" From 2f4ad2714f773b0a34dfc5ba4be4e3e62719df53 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 15:36:35 -0700 Subject: [PATCH 0699/3617] Update core module ref --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 7fea93835c172d..a7a3ef54d7e917 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 7fea93835c172d23638959cbe2d00a3be8d14557 +Subproject commit a7a3ef54d7e917fb81d44cda4266ff2b4e8870c9 From 5ffc1183dd18397048d9a82d720cb79882c88bfd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 15:37:12 -0700 Subject: [PATCH 0700/3617] WHoops, that was the core module stable branch --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a7a3ef54d7e917..5983d64d7728ea 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a7a3ef54d7e917fb81d44cda4266ff2b4e8870c9 +Subproject commit 5983d64d7728ea88ef27606e95e4aa34cde5ff46 From 5d213cab23ced2664fdd0d77a9c1e1b11a3d489b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 16:00:58 -0700 Subject: [PATCH 0701/3617] Update extras submodule ref for doc fix --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index c223716bc7ccf2..1276420a3a3934 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit c223716bc7ccf2d0ac7995b36f76cca8ccd5bfda +Subproject commit 1276420a3a39340fcd9e053a1e621cdd89f480fa From e5190327f2131997cae02e57e0c012e69c1a1828 Mon Sep 17 00:00:00 2001 From: Stefan Midjich Date: Wed, 6 May 2015 22:47:53 +0200 Subject: [PATCH 0702/3617] this fixes ansible on openbsd and freebsd systems. only tested on openbsd. --- lib/ansible/module_utils/facts.py | 37 +++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 6ddae5df855d65..7209f699c34bc1 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2535,6 +2535,43 @@ def get_virtual_facts(self): self.facts['virtualization_role'] = 'NA' return +class FreeBSDVirtual(Virtual): + """ + This is a FreeBSD-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'FreeBSD' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + self.facts['virtualization_type'] = '' + self.facts['virtualization_role'] = '' + +class OpenBSDVirtual(Virtual): + """ + This is a OpenBSD-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'OpenBSD' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + self.facts['virtualization_type'] = '' + self.facts['virtualization_role'] = '' class HPUXVirtual(Virtual): """ From 73b7d96501420fcce7bc002bd839ec9cafde6a0a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 17:01:18 -0700 Subject: [PATCH 0703/3617] Test on fields that exist --- test/integration/roles/test_get_url/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 3a6bc509c0e541..88ff3b2e21c648 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -55,8 +55,8 @@ path: "{{ output_dir }}/kreitz.html" register: stat_result -- name: Assert that the file was not downloaded +- name: Assert that the file was downloaded assert: that: - - "result.failed == false" + - "result.changed == true" - "stat_result.stat.exists == true" From e7a096c4c53084572adf3c67ccd245919c47e0a8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 28 May 2015 20:01:39 -0400 Subject: [PATCH 0704/3617] cowsay is back! --- lib/ansible/utils/display.py | 50 ++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index d5b6ad71a93fd1..6c5e850a700cba 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -20,6 +20,9 @@ __metaclass__ = type import textwrap +import os +import random +import subprocess import sys from ansible import constants as C @@ -37,6 +40,31 @@ def __init__(self, verbosity=0): self._warns = {} self._errors = {} + self.cowsay = None + self.noncow = os.getenv("ANSIBLE_COW_SELECTION",None) + self.set_cowsay_info() + + def set_cowsay_info(self): + + if not C.ANSIBLE_NOCOWS: + if os.path.exists("/usr/bin/cowsay"): + self.cowsay = "/usr/bin/cowsay" + elif os.path.exists("/usr/games/cowsay"): + self.cowsay = "/usr/games/cowsay" + elif os.path.exists("/usr/local/bin/cowsay"): + # BSD path for cowsay + self.cowsay = "/usr/local/bin/cowsay" + elif os.path.exists("/opt/local/bin/cowsay"): + # MacPorts path for cowsay + self.cowsay = "/opt/local/bin/cowsay" + + if self.cowsay and self.noncow == 'random': + cmd = subprocess.Popen([self.cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + cows = out.split() + cows.append(False) + self.noncow = random.choice(cows) + def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False): msg2 = msg if color: @@ -125,6 +153,14 @@ def banner(self, msg, color=None): Prints a header-looking line with stars taking up to 80 columns of width (3 columns, minimum) ''' + if self.cowsay: + try: + self.banner_cowsay(msg) + return + except OSError: + # somebody cleverly deleted cowsay or something during the PB run. heh. + pass + msg = msg.strip() star_len = (80 - len(msg)) if star_len < 0: @@ -132,6 +168,20 @@ def banner(self, msg, color=None): stars = "*" * star_len self.display("\n%s %s" % (msg, stars), color=color) + def banner_cowsay(self, msg, color=None): + if ": [" in msg: + msg = msg.replace("[","") + if msg.endswith("]"): + msg = msg[:-1] + runcmd = [self.cowsay,"-W", "60"] + if self.noncow: + runcmd.append('-f') + runcmd.append(self.noncow) + runcmd.append(msg) + cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + self.display("%s\n" % out, color=color) + def error(self, msg): new_msg = "\n[ERROR]: %s" % msg wrapped = textwrap.wrap(new_msg, 79) From ac14ad1419aff12aa9b7186dae129fe9aa770106 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 17:02:48 -0700 Subject: [PATCH 0705/3617] Test on fields that are actually set --- test/integration/roles/test_uri/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index da4bf655749fc9..99c6048a59e181 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -121,8 +121,8 @@ path: "{{ output_dir }}/kreitz.html" register: stat_result -- name: Assert that the file was not downloaded +- name: Assert that the file was downloaded assert: that: - - "result.failed == false" + - "result.changed == true" - "stat_result.stat.exists == true" From fe014148d9ed97c11951f9c6d34c72c1c303c64a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 28 May 2015 20:29:16 -0500 Subject: [PATCH 0706/3617] Removing errant debug print --- lib/ansible/plugins/strategies/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index e933ca73d4c580..e37610a9dba2d2 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -96,7 +96,6 @@ def run(self, iterator, connection_info, result=True): return 0 def get_hosts_remaining(self, play): - print("inventory get hosts: %s" % self._inventory.get_hosts(play.hosts)) return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts] def get_failed_hosts(self, play): From 7985d2a8be1804c53390e14618d141b1ad33fb0a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 28 May 2015 23:58:38 -0500 Subject: [PATCH 0707/3617] Moving included file stuff to a proper dedicated class and file (v2) --- lib/ansible/playbook/included_file.py | 79 ++++++++++++++++++++++ lib/ansible/plugins/strategies/__init__.py | 17 +++-- lib/ansible/plugins/strategies/linear.py | 62 ++--------------- 3 files changed, 98 insertions(+), 60 deletions(-) create mode 100644 lib/ansible/playbook/included_file.py diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py new file mode 100644 index 00000000000000..74fdfbc9034382 --- /dev/null +++ b/lib/ansible/playbook/included_file.py @@ -0,0 +1,79 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +class IncludedFile: + + def __init__(self, filename, args, task): + self._filename = filename + self._args = args + self._task = task + self._hosts = [] + + def add_host(self, host): + if host not in self._hosts: + self._hosts.append(host) + + def __eq__(self, other): + return other._filename == self._filename and other._args == self._args + + def __repr__(self): + return "%s (%s): %s" % (self._filename, self._args, self._hosts) + + @staticmethod + def process_include_results(results, tqm, iterator, loader): + included_files = [] + + for res in results: + if res._host in tqm._failed_hosts: + raise AnsibleError("host is failed, not including files") + + if res._task.action == 'include': + if res._task.loop: + include_results = res._result['results'] + else: + include_results = [ res._result ] + + for include_result in include_results: + # if the task result was skipped or failed, continue + if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: + continue + + original_task = iterator.get_original_task(res._host, res._task) + if original_task and original_task._role: + include_file = loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include']) + else: + include_file = loader.path_dwim(res._task.args.get('_raw_params')) + + include_variables = include_result.get('include_variables', dict()) + if 'item' in include_result: + include_variables['item'] = include_result['item'] + + inc_file = IncludedFile(include_file, include_variables, original_task) + + try: + pos = included_files.index(inc_file) + inc_file = included_files[pos] + except ValueError: + included_files.append(inc_file) + + inc_file.add_host(res._host) + + return included_files diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index e37610a9dba2d2..03ad57ed4ac5d5 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -23,10 +23,9 @@ import time from ansible.errors import * - +from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group - from ansible.playbook.handler import Handler from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role import ROLE_CACHE, hash_params @@ -307,12 +306,22 @@ def _add_group(self, host, group_name): # and add the host to the group new_group.add_host(actual_host) - def _load_included_file(self, included_file): + def _load_included_file(self, included_file, iterator): ''' Loads an included YAML file of tasks, applying the optional set of variables. ''' - data = self._loader.load_from_file(included_file._filename) + try: + data = self._loader.load_from_file(included_file._filename) + except AnsibleError, e: + for host in included_file._hosts: + tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e))) + iterator.mark_host_failed(host) + self._tqm._failed_hosts[host.name] = True + self._tqm._stats.increment('failures', host.name) + self._tqm.send_callback('v2_runner_on_failed', tr) + return [] + if not isinstance(data, list): raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index ec829c8996a38f..af12587b926ebf 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -22,6 +22,7 @@ from ansible.errors import AnsibleError from ansible.executor.play_iterator import PlayIterator from ansible.playbook.block import Block +from ansible.playbook.included_file import IncludedFile from ansible.playbook.task import Task from ansible.plugins import action_loader from ansible.plugins.strategies import StrategyBase @@ -114,7 +115,6 @@ def _advance_selected_hosts(hosts, cur_block, cur_state): # return None for all hosts in the list return [(host, None) for host in hosts] - def run(self, iterator, connection_info): ''' The linear strategy is simple - get the next task and queue @@ -208,61 +208,11 @@ def run(self, iterator, connection_info): results = self._wait_on_pending_results(iterator) host_results.extend(results) - # FIXME: this needs to be somewhere else - class IncludedFile: - def __init__(self, filename, args, task): - self._filename = filename - self._args = args - self._task = task - self._hosts = [] - def add_host(self, host): - if host not in self._hosts: - self._hosts.append(host) - def __eq__(self, other): - return other._filename == self._filename and other._args == self._args - def __repr__(self): - return "%s (%s): %s" % (self._filename, self._args, self._hosts) - - # FIXME: this should also be moved to the base class in a method - included_files = [] - for res in host_results: - if res._host in self._tqm._failed_hosts: - return 1 - - if res._task.action == 'include': - if res._task.loop: - include_results = res._result['results'] - else: - include_results = [ res._result ] - - for include_result in include_results: - # if the task result was skipped or failed, continue - if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: - continue - - original_task = iterator.get_original_task(res._host, res._task) - if original_task and original_task._role: - include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include']) - else: - include_file = self._loader.path_dwim(res._task.args.get('_raw_params')) - - include_variables = include_result.get('include_variables', dict()) - if 'item' in include_result: - include_variables['item'] = include_result['item'] - - inc_file = IncludedFile(include_file, include_variables, original_task) - - try: - pos = included_files.index(inc_file) - inc_file = included_files[pos] - except ValueError: - included_files.append(inc_file) - - inc_file.add_host(res._host) + try: + included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader) + except AnsibleError, e: + return 1 - # FIXME: should this be moved into the iterator class? Main downside would be - # that accessing the TQM's callback member would be more difficult, if - # we do want to send callbacks from here if len(included_files) > 0: noop_task = Task() noop_task.action = 'meta' @@ -274,7 +224,7 @@ def __repr__(self): # included hosts get the task list while those excluded get an equal-length # list of noop tasks, to make sure that they continue running in lock-step try: - new_blocks = self._load_included_file(included_file) + new_blocks = self._load_included_file(included_file, iterator=iterator) except AnsibleError, e: for host in included_file._hosts: iterator.mark_host_failed(host) From 0828028c71bb5273a6796c0c47f93cf23b818471 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 29 May 2015 00:15:14 -0500 Subject: [PATCH 0708/3617] Fixing unit test for included file changes --- test/units/plugins/strategies/test_strategy_base.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 7d8cb42ee6e9d1..4c177f73434fb5 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -299,14 +299,17 @@ def test_strategy_base_load_included_file(self): mock_task._block = mock_block mock_task._role = None + mock_iterator = MagicMock() + mock_iterator.mark_host_failed.return_value = None + mock_inc_file = MagicMock() mock_inc_file._task = mock_task mock_inc_file._filename = "test.yml" - res = strategy_base._load_included_file(included_file=mock_inc_file) + res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) mock_inc_file._filename = "bad.yml" - self.assertRaises(AnsibleParserError, strategy_base._load_included_file, included_file=mock_inc_file) + self.assertRaises(AnsibleParserError, strategy_base._load_included_file, included_file=mock_inc_file, iterator=mock_iterator) def test_strategy_base_run_handlers(self): workers = [] From 9371c38af928f750114525e5f447ebad73446caa Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Fri, 29 May 2015 14:50:08 +0100 Subject: [PATCH 0709/3617] Add -Compress to ConvertTo-Json calls in common powershell code --- lib/ansible/module_utils/powershell.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index ee7d3ddeca4ba8..9606f47783b66c 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -65,7 +65,7 @@ Function Exit-Json($obj) $obj = New-Object psobject } - echo $obj | ConvertTo-Json -Depth 99 + echo $obj | ConvertTo-Json -Compress -Depth 99 Exit } @@ -89,7 +89,7 @@ Function Fail-Json($obj, $message = $null) Set-Attr $obj "msg" $message Set-Attr $obj "failed" $true - echo $obj | ConvertTo-Json -Depth 99 + echo $obj | ConvertTo-Json -Compress -Depth 99 Exit 1 } From 12691ce109dcf1625c6c41357ce26f95da0862f0 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Fri, 29 May 2015 14:50:08 +0100 Subject: [PATCH 0710/3617] Add -Compress to ConvertTo-Json calls in common powershell code --- lib/ansible/module_utils/powershell.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index 57d2c1b101caa7..c58ac4b9b7521d 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -65,7 +65,7 @@ Function Exit-Json($obj) $obj = New-Object psobject } - echo $obj | ConvertTo-Json -Depth 99 + echo $obj | ConvertTo-Json -Compress -Depth 99 Exit } @@ -89,7 +89,7 @@ Function Fail-Json($obj, $message = $null) Set-Attr $obj "msg" $message Set-Attr $obj "failed" $true - echo $obj | ConvertTo-Json -Depth 99 + echo $obj | ConvertTo-Json -Compress -Depth 99 Exit 1 } From dee2d53b3e68e85d96d821167183803ad7e27f99 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 29 May 2015 08:51:50 -0700 Subject: [PATCH 0711/3617] Update v2 submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9cc23c749a8cd5..191a672891359f 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9cc23c749a8cd5039db7aa1998d310bbb04d1e13 +Subproject commit 191a672891359f3b6faff83cb0613f1b38e3fc0e diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index a07fc88ba0d254..1276420a3a3934 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit a07fc88ba0d2546b92fbe93b2bede699fdf2bc48 +Subproject commit 1276420a3a39340fcd9e053a1e621cdd89f480fa From 1e418fe56a67bfa18468783f47c75781f02b11e4 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 29 May 2015 13:57:11 -0400 Subject: [PATCH 0712/3617] Only run win_feature tests when the host has the ServerManager module. --- .../roles/test_win_feature/tasks/main.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/integration/roles/test_win_feature/tasks/main.yml b/test/integration/roles/test_win_feature/tasks/main.yml index a49622c232d347..4b31f8b3581db4 100644 --- a/test/integration/roles/test_win_feature/tasks/main.yml +++ b/test/integration/roles/test_win_feature/tasks/main.yml @@ -17,10 +17,16 @@ # along with Ansible. If not, see . +- name: check whether servermanager module is available (windows 2008 r2 or later) + raw: PowerShell -Command Import-Module ServerManager + register: win_feature_has_servermanager + ignore_errors: true + - name: start with feature absent win_feature: name: "{{ test_win_feature_name }}" state: absent + when: win_feature_has_servermanager|success - name: install feature win_feature: @@ -30,6 +36,7 @@ include_sub_features: yes include_management_tools: yes register: win_feature_install_result + when: win_feature_has_servermanager|success - name: check result of installing feature assert: @@ -45,6 +52,7 @@ - "win_feature_install_result.feature_result[0].restart_needed is defined" - "win_feature_install_result.feature_result[0].skip_reason" - "win_feature_install_result.feature_result[0].success is defined" + when: win_feature_has_servermanager|success - name: install feature again win_feature: @@ -54,6 +62,7 @@ include_sub_features: yes include_management_tools: yes register: win_feature_install_again_result + when: win_feature_has_servermanager|success - name: check result of installing feature again assert: @@ -63,12 +72,14 @@ - "win_feature_install_again_result.exitcode == 'NoChangeNeeded'" - "not win_feature_install_again_result.restart_needed" - "win_feature_install_again_result.feature_result == []" + when: win_feature_has_servermanager|success - name: remove feature win_feature: name: "{{ test_win_feature_name }}" state: absent register: win_feature_remove_result + when: win_feature_has_servermanager|success - name: check result of removing feature assert: @@ -84,12 +95,14 @@ - "win_feature_remove_result.feature_result[0].restart_needed is defined" - "win_feature_remove_result.feature_result[0].skip_reason" - "win_feature_remove_result.feature_result[0].success is defined" + when: win_feature_has_servermanager|success - name: remove feature again win_feature: name: "{{ test_win_feature_name }}" state: absent register: win_feature_remove_again_result + when: win_feature_has_servermanager|success - name: check result of removing feature again assert: @@ -99,6 +112,7 @@ - "win_feature_remove_again_result.exitcode == 'NoChangeNeeded'" - "not win_feature_remove_again_result.restart_needed" - "win_feature_remove_again_result.feature_result == []" + when: win_feature_has_servermanager|success - name: try to install an invalid feature name win_feature: @@ -106,6 +120,7 @@ state: present register: win_feature_install_invalid_result ignore_errors: true + when: win_feature_has_servermanager|success - name: check result of installing invalid feature name assert: @@ -114,6 +129,7 @@ - "not win_feature_install_invalid_result|changed" - "win_feature_install_invalid_result.msg" - "win_feature_install_invalid_result.exitcode == 'InvalidArgs'" + when: win_feature_has_servermanager|success - name: try to remove an invalid feature name win_feature: @@ -121,6 +137,7 @@ state: absent register: win_feature_remove_invalid_result ignore_errors: true + when: win_feature_has_servermanager|success - name: check result of removing invalid feature name assert: @@ -129,3 +146,4 @@ - "not win_feature_remove_invalid_result|changed" - "win_feature_remove_invalid_result.msg" - "win_feature_remove_invalid_result.exitcode == 'InvalidArgs'" + when: win_feature_has_servermanager|success From b659621575168b57d06b44de2d507aba202f2607 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 11 May 2015 08:06:21 -0400 Subject: [PATCH 0713/3617] Remove unneeded required_one_of for openstack We're being too strict - there is a third possibility, which is that a user will have defined the OS_* environment variables and expect them to pass through. --- lib/ansible/module_utils/openstack.py | 6 +----- lib/ansible/utils/module_docs_fragments/openstack.py | 7 +++++-- v1/ansible/module_utils/openstack.py | 6 +----- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/lib/ansible/module_utils/openstack.py b/lib/ansible/module_utils/openstack.py index b58cc534287050..4069449144346d 100644 --- a/lib/ansible/module_utils/openstack.py +++ b/lib/ansible/module_utils/openstack.py @@ -93,11 +93,7 @@ def openstack_full_argument_spec(**kwargs): def openstack_module_kwargs(**kwargs): - ret = dict( - required_one_of=[ - ['cloud', 'auth'], - ], - ) + ret = {} for key in ('mutually_exclusive', 'required_together', 'required_one_of'): if key in kwargs: if key in ret: diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index f989b3dcb80f8e..c295ed430683fc 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -23,7 +23,9 @@ class ModuleDocFragment(object): options: cloud: description: - - Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin) + - Named cloud to operate against. Provides default values for I(auth) and + I(auth_type). This parameter is not needed if I(auth) is provided or if + OpenStack OS_* environment variables are present. required: false auth: description: @@ -32,7 +34,8 @@ class ModuleDocFragment(object): I(auth_url), I(username), I(password), I(project_name) and any information about domains if the cloud supports them. For other plugins, this param will need to contain whatever parameters that auth plugin - requires. This parameter is not needed if a named cloud is provided. + requires. This parameter is not needed if a named cloud is provided or + OpenStack OS_* environment variables are present. required: false auth_type: description: diff --git a/v1/ansible/module_utils/openstack.py b/v1/ansible/module_utils/openstack.py index b58cc534287050..4069449144346d 100644 --- a/v1/ansible/module_utils/openstack.py +++ b/v1/ansible/module_utils/openstack.py @@ -93,11 +93,7 @@ def openstack_full_argument_spec(**kwargs): def openstack_module_kwargs(**kwargs): - ret = dict( - required_one_of=[ - ['cloud', 'auth'], - ], - ) + ret = {} for key in ('mutually_exclusive', 'required_together', 'required_one_of'): if key in kwargs: if key in ret: From 2046d763109d8d62a39e6e215ae8cd2a2465d422 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 11 May 2015 08:10:37 -0400 Subject: [PATCH 0714/3617] Add defaults and a link to os-client-config docs --- lib/ansible/utils/module_docs_fragments/openstack.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index c295ed430683fc..94d5b9834c3302 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -80,14 +80,17 @@ class ModuleDocFragment(object): - A path to a CA Cert bundle that can be used as part of verifying SSL API requests. required: false + default: None cert: description: - A path to a client certificate to use as part of the SSL transaction required: false + default: None key: description: - A path to a client key to use as part of the SSL transaction required: false + default: None endpoint_type: description: - Endpoint URL type to fetch from the service catalog. @@ -102,5 +105,6 @@ class ModuleDocFragment(object): can come from a yaml config file in /etc/ansible/openstack.yaml, /etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from standard environment variables, then finally by explicit parameters in - plays. + plays. More information can be found at + U(http://docs.openstack.org/developer/os-client-config) ''' From a8c290cc3bb4b2549a0e5b64beb985ff78bf8d23 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 29 May 2015 16:13:30 -0400 Subject: [PATCH 0715/3617] fixed ubuntu facts for all versions made sure NA is option of last resort --- lib/ansible/module_utils/facts.py | 9 ++++++--- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 3485690b83f13e..6f5f35f8310b7d 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -99,8 +99,9 @@ class Facts(object): ('/etc/os-release', 'SuSE'), ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), + ('/etc/lsb-release', 'Mandriva'), ('/etc/os-release', 'NA'), - ('/etc/lsb-release', 'Mandriva')) + ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -416,11 +417,13 @@ def get_distribution_facts(self): self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1) elif name == 'Debian': data = get_file_content(path) - if 'Debian' in data or 'Raspbian' in data: + if 'Ubuntu' in data: + break # Ubuntu gets correct info from python functions + elif 'Debian' in data or 'Raspbian' in data: release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) if release: self.facts['distribution_release'] = release.groups()[0] - break + break elif name == 'Mandriva': data = get_file_content(path) if 'Mandriva' in data: diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5983d64d7728ea..9cc23c749a8cd5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5983d64d7728ea88ef27606e95e4aa34cde5ff46 +Subproject commit 9cc23c749a8cd5039db7aa1998d310bbb04d1e13 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 1276420a3a3934..a07fc88ba0d254 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 1276420a3a39340fcd9e053a1e621cdd89f480fa +Subproject commit a07fc88ba0d2546b92fbe93b2bede699fdf2bc48 From 7e020d21deeb3425784e3bf13e07eed1cf036b22 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 29 May 2015 16:19:09 -0400 Subject: [PATCH 0716/3617] correctly identify ubuntu now in all cases made NA the last resort --- lib/ansible/module_utils/facts.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 7209f699c34bc1..39546cc8bba209 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -99,8 +99,9 @@ class Facts(object): ('/etc/os-release', 'SuSE'), ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), + ('/etc/lsb-release', 'Mandriva'), ('/etc/os-release', 'NA'), - ('/etc/lsb-release', 'Mandriva')) + ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -416,7 +417,9 @@ def get_distribution_facts(self): self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1) elif name == 'Debian': data = get_file_content(path) - if 'Debian' in data or 'Raspbian' in data: + if 'Ubuntu' in data: + break # Ubuntu gets correct info from python functions + elif 'Debian' in data or 'Raspbian' in data: release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) if release: self.facts['distribution_release'] = release.groups()[0] From 529726d0baa5a34cff8dcd5ffaf81b904f842b4f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 29 May 2015 16:22:55 -0400 Subject: [PATCH 0717/3617] fixed mistaken module update in prev commit --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9cc23c749a8cd5..5983d64d7728ea 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9cc23c749a8cd5039db7aa1998d310bbb04d1e13 +Subproject commit 5983d64d7728ea88ef27606e95e4aa34cde5ff46 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index a07fc88ba0d254..1276420a3a3934 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit a07fc88ba0d2546b92fbe93b2bede699fdf2bc48 +Subproject commit 1276420a3a39340fcd9e053a1e621cdd89f480fa From d8bfb4c6290e1da3f281c728c5ad8a77598830f1 Mon Sep 17 00:00:00 2001 From: Rob Szarka Date: Fri, 29 May 2015 21:49:52 -0400 Subject: [PATCH 0718/3617] Update guide_aws.rst Fixed typos. --- docsite/rst/guide_aws.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/guide_aws.rst b/docsite/rst/guide_aws.rst index c4e12eab4970a8..e0d0c12630df3c 100644 --- a/docsite/rst/guide_aws.rst +++ b/docsite/rst/guide_aws.rst @@ -13,7 +13,7 @@ Requirements for the AWS modules are minimal. All of the modules require and are tested against recent versions of boto. You'll need this Python module installed on your control machine. Boto can be installed from your OS distribution or python's "pip install boto". -Whereas classically ansible will execute tasks in it's host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control. +Whereas classically ansible will execute tasks in its host loop against multiple remote machines, most cloud-control steps occur on your local machine with reference to the regions to control. In your playbook steps we'll typically be using the following pattern for provisioning steps:: @@ -214,7 +214,7 @@ AWS Image Building With Ansible ``````````````````````````````` Many users may want to have images boot to a more complete configuration rather than configuring them entirely after instantiation. To do this, -one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get it's own AMI ID for usage with +one of many programs can be used with Ansible playbooks to define and upload a base image, which will then get its own AMI ID for usage with the ec2 module or other Ansible AWS modules such as ec2_asg or the cloudformation module. Possible tools include Packer, aminator, and Ansible's ec2_ami module. From 5954892457a89cbd61133cc2e95377c04c83bca1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 29 May 2015 19:00:16 -0700 Subject: [PATCH 0719/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5983d64d7728ea..f8d8af17cdc725 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5983d64d7728ea88ef27606e95e4aa34cde5ff46 +Subproject commit f8d8af17cdc72500af8319c96004b86ac702a0a4 From 908d6c0ef25384d126a488d3be4196803eb5f06e Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Sun, 31 May 2015 20:05:02 -0400 Subject: [PATCH 0720/3617] Fixes #11046 --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 6f5f35f8310b7d..1162e05b9cfef7 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2163,7 +2163,7 @@ def parse_media_line(self, words, current_if, ips): current_if['media'] = 'Unknown' # Mac does not give us this current_if['media_select'] = words[1] if len(words) > 2: - current_if['media_type'] = words[2][1:] + current_if['media_type'] = words[2][1:-1] if len(words) > 3: current_if['media_options'] = self.get_options(words[3]) From 8d742df1deba75d0e7ebfbb73db3f030827b0283 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Sun, 31 May 2015 23:15:28 -0400 Subject: [PATCH 0721/3617] Allow prepare_win_tests role to run multiple times, before each role that depends on it. --- test/integration/roles/prepare_win_tests/meta/main.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 test/integration/roles/prepare_win_tests/meta/main.yml diff --git a/test/integration/roles/prepare_win_tests/meta/main.yml b/test/integration/roles/prepare_win_tests/meta/main.yml new file mode 100644 index 00000000000000..cf5427b6084683 --- /dev/null +++ b/test/integration/roles/prepare_win_tests/meta/main.yml @@ -0,0 +1,3 @@ +--- + +allow_duplicates: yes From d2ba0de6aab12a136d71959d45b4158bfbf45ce9 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Sun, 31 May 2015 23:16:45 -0400 Subject: [PATCH 0722/3617] When running winrm tests against multiple hosts, fail the play when any host has a failure. --- test/integration/test_winrm.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/test_winrm.yml b/test/integration/test_winrm.yml index 69d3b652a6f727..b249224cb8ab8d 100644 --- a/test/integration/test_winrm.yml +++ b/test/integration/test_winrm.yml @@ -18,6 +18,7 @@ - hosts: windows gather_facts: false + max_fail_percentage: 1 roles: - { role: test_win_raw, tags: test_win_raw } - { role: test_win_script, tags: test_win_script } From 46a72d108acbe6e194aa44592203dd7206fdfdbb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 1 Jun 2015 10:17:18 -0400 Subject: [PATCH 0723/3617] added cs_project new module --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98006503692fcb..f806cbfb1f81ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ New Modules: * cloudstack: cs_instance * cloudstack: cs_instancegroup * cloudstack: cs_portforward + * cloudstack: cs_project * cloudstack: cs_sshkeypair * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule From 816b20af0beb5a96957cd51412aa116f14374b04 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Sun, 31 May 2015 20:05:02 -0400 Subject: [PATCH 0724/3617] Fixes #11046 --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 39546cc8bba209..8575f457fb872b 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2153,7 +2153,7 @@ def parse_media_line(self, words, current_if, ips): current_if['media'] = 'Unknown' # Mac does not give us this current_if['media_select'] = words[1] if len(words) > 2: - current_if['media_type'] = words[2][1:] + current_if['media_type'] = words[2][1:-1] if len(words) > 3: current_if['media_options'] = self.get_options(words[3]) From d2db7bad1bed5c00cee4f05852bff1c177040bb5 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Mon, 1 Jun 2015 13:23:28 -0400 Subject: [PATCH 0725/3617] Fixes OSX fact gathering for the bridge interface. Issue #11104 --- lib/ansible/module_utils/facts.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 1162e05b9cfef7..f65f776a242722 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2163,7 +2163,13 @@ def parse_media_line(self, words, current_if, ips): current_if['media'] = 'Unknown' # Mac does not give us this current_if['media_select'] = words[1] if len(words) > 2: - current_if['media_type'] = words[2][1:-1] + # MacOSX sets the media to '' for bridge interface + # and parsing splits this into two words; this if/else helps + if words[1] == '': + current_if['media_select'] = 'Unknown' + current_if['media_type'] = 'unknown type' + else: + current_if['media_type'] = words[2][1:-1] if len(words) > 3: current_if['media_options'] = self.get_options(words[3]) From 30b92a6f4cd92b69ae562d970efaf831858891e2 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Mon, 1 Jun 2015 21:53:49 +0100 Subject: [PATCH 0726/3617] Get-FileChecksum allways returns a string now, and the test_win_copy integration tests that depend on the checksum have been updated in this change too. --- lib/ansible/module_utils/powershell.ps1 | 2 +- test/integration/roles/test_win_copy/tasks/main.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index 9606f47783b66c..a11e316989c6f0 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -151,7 +151,7 @@ Function Get-FileChecksum($path) { $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); - [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); } ElseIf (Test-Path -PathType Container $path) diff --git a/test/integration/roles/test_win_copy/tasks/main.yml b/test/integration/roles/test_win_copy/tasks/main.yml index d898219a85c1b9..48df4273807b7e 100644 --- a/test/integration/roles/test_win_copy/tasks/main.yml +++ b/test/integration/roles/test_win_copy/tasks/main.yml @@ -62,7 +62,7 @@ - name: verify that the file checksum is correct assert: that: - - "copy_result.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" + - "copy_result.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" - name: check the stat results of the file win_stat: path={{output_file}} @@ -78,7 +78,7 @@ # - "stat_results.stat.isfifo == false" # - "stat_results.stat.isreg == true" # - "stat_results.stat.issock == false" - - "stat_results.stat.checksum[0] == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" + - "stat_results.stat.checksum == 'c79a6506c1c948be0d456ab5104d5e753ab2f3e6'" - name: overwrite the file via same means win_copy: src=foo.txt dest={{output_file}} From 4bc7703db310c6178b45969b941dea9cddcee046 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 1 Jun 2015 16:41:52 -0500 Subject: [PATCH 0727/3617] Fixing some small bugs related to integration tests (v2) --- lib/ansible/executor/play_iterator.py | 2 +- lib/ansible/inventory/group.py | 2 - lib/ansible/module_utils/basic.py | 4 +- lib/ansible/parsing/yaml/dumper.py | 37 +++++++++++++++++++ lib/ansible/plugins/filter/core.py | 11 ++++-- lib/ansible/plugins/strategies/__init__.py | 28 ++++++++------ lib/ansible/plugins/strategies/linear.py | 4 +- lib/ansible/template/__init__.py | 8 ---- test/integration/Makefile | 13 ++++--- .../roles/test_lineinfile/tasks/main.yml | 2 +- test/integration/test_filters.yml | 5 +++ test/units/module_utils/test_basic.py | 2 +- 12 files changed, 80 insertions(+), 38 deletions(-) create mode 100644 lib/ansible/parsing/yaml/dumper.py create mode 100644 test/integration/test_filters.yml diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index dc4d4c7d5d2522..d7c966148916d2 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -239,7 +239,7 @@ def mark_host_failed(self, host): self._host_states[host.name] = s def get_failed_hosts(self): - return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE) + return dict((host, True) for (host, state) in self._host_states.iteritems() if state.fail_state != self.FAILED_NONE) def get_original_task(self, host, task): ''' diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py index 6525e69b466bd1..17f3ff744faee1 100644 --- a/lib/ansible/inventory/group.py +++ b/lib/ansible/inventory/group.py @@ -59,11 +59,9 @@ def serialize(self): depth=self.depth, ) - debug("serializing group, result is: %s" % result) return result def deserialize(self, data): - debug("deserializing group, data is: %s" % data) self.__init__() self.name = data.get('name') self.vars = data.get('vars', dict()) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 793223b1652b19..69e4036c834b2d 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -588,8 +588,8 @@ def set_context_if_different(self, path, context, changed): return True rc = selinux.lsetfilecon(self._to_filesystem_str(path), str(':'.join(new_context))) - except OSError: - self.fail_json(path=path, msg='invalid selinux context', new_context=new_context, cur_context=cur_context, input_was=context) + except OSError, e: + self.fail_json(path=path, msg='invalid selinux context: %s' % str(e), new_context=new_context, cur_context=cur_context, input_was=context) if rc != 0: self.fail_json(path=path, msg='set selinux context failed') changed = True diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py new file mode 100644 index 00000000000000..dc498acd066f82 --- /dev/null +++ b/lib/ansible/parsing/yaml/dumper.py @@ -0,0 +1,37 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import yaml + +from ansible.parsing.yaml.objects import AnsibleUnicode + +class AnsibleDumper(yaml.SafeDumper): + ''' + A simple stub class that allows us to add representers + for our overridden object types. + ''' + pass + +AnsibleDumper.add_representer( + AnsibleUnicode, + yaml.representer.SafeRepresenter.represent_unicode +) + diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index bdf45509c3a610..977d0947c38c61 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -38,16 +38,21 @@ from distutils.version import LooseVersion, StrictVersion from ansible import errors +from ansible.parsing.yaml.dumper import AnsibleDumper from ansible.utils.hashing import md5s, checksum_s from ansible.utils.unicode import unicode_wrap, to_unicode UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E') +def to_yaml(a, *args, **kw): + '''Make verbose, human readable yaml''' + transformed = yaml.dump(a, Dumper=AnsibleDumper, allow_unicode=True, **kw) + return to_unicode(transformed) -def to_nice_yaml(*a, **kw): +def to_nice_yaml(a, *args, **kw): '''Make verbose, human readable yaml''' - transformed = yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw) + transformed = yaml.dump(a, Dumper=AnsibleDumper, indent=4, allow_unicode=True, default_flow_style=False, **kw) return to_unicode(transformed) def to_json(a, *args, **kw): @@ -288,7 +293,7 @@ def filters(self): 'from_json': json.loads, # yaml - 'to_yaml': yaml.safe_dump, + 'to_yaml': to_yaml, 'to_nice_yaml': to_nice_yaml, 'from_yaml': yaml.safe_load, diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 03ad57ed4ac5d5..bb839f20f4cdc0 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -73,24 +73,28 @@ def __init__(self, tqm): self._blocked_hosts = dict() def run(self, iterator, connection_info, result=True): - # save the counts on failed/unreachable hosts, as the cleanup/handler - # methods will clear that information during their runs - num_failed = len(self._tqm._failed_hosts) - num_unreachable = len(self._tqm._unreachable_hosts) + # save the failed/unreachable hosts, as the run_handlers() + # method will clear that information during its execution + failed_hosts = self._tqm._failed_hosts.keys() + unreachable_hosts = self._tqm._unreachable_hosts.keys() debug("running handlers") result &= self.run_handlers(iterator, connection_info) + # now update with the hosts (if any) that failed or were + # unreachable during the handler execution phase + failed_hosts = set(failed_hosts).union(self._tqm._failed_hosts.keys()) + unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys()) + # send the stats callback self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) - if not result: - if num_unreachable > 0: - return 3 - elif num_failed > 0: - return 2 - else: - return 1 + if len(unreachable_hosts) > 0: + return 3 + elif len(failed_hosts) > 0: + return 2 + elif not result: + return 1 else: return 0 @@ -145,7 +149,7 @@ def _process_pending_results(self, iterator): task_result = result[1] host = task_result._host task = task_result._task - if result[0] == 'host_task_failed': + if result[0] == 'host_task_failed' or 'failed' in task_result._result: if not task.ignore_errors: debug("marking %s as failed" % host.name) iterator.mark_host_failed(host) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index af12587b926ebf..e92f10eb374e0b 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -211,7 +211,7 @@ def run(self, iterator, connection_info): try: included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader) except AnsibleError, e: - return 1 + return False if len(included_files) > 0: noop_task = Task() @@ -252,7 +252,7 @@ def run(self, iterator, connection_info): except (IOError, EOFError), e: debug("got IOError/EOFError in task loop: %s" % e) # most likely an abort, return failed - return 1 + return False # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 8ad9917d6020e0..00bc386f268513 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -238,14 +238,6 @@ def _do_template(self, data, preserve_trailing_newlines=False): environment.filters.update(self._get_filters()) environment.template_class = AnsibleJ2Template - # FIXME: may not be required anymore, as the basedir stuff will - # be handled by the loader? - #if '_original_file' in vars: - # basedir = os.path.dirname(vars['_original_file']) - # filesdir = os.path.abspath(os.path.join(basedir, '..', 'files')) - # if os.path.exists(filesdir): - # basedir = filesdir - try: t = environment.from_string(data) except TemplateSyntaxError, e: diff --git a/test/integration/Makefile b/test/integration/Makefile index 3ee38b0ab79d76..69fe804c65e4a0 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -24,12 +24,13 @@ CONSUL_RUNNING := $(shell python consul_running.py) all: parsing test_var_precedence unicode test_templating_settings non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault test_tags parsing: - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 4 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 4 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 4 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 4 ] - ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 4 ] - ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 4 ] + #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 4 ] + #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 4 ] + #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 4 ] + #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 4 ] + #ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + echo "skipping for now..." includes: ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) diff --git a/test/integration/roles/test_lineinfile/tasks/main.yml b/test/integration/roles/test_lineinfile/tasks/main.yml index 0c018ccaa59419..8cfb3430f64919 100644 --- a/test/integration/roles/test_lineinfile/tasks/main.yml +++ b/test/integration/roles/test_lineinfile/tasks/main.yml @@ -225,7 +225,7 @@ - "result.msg == 'line added'" - name: insert a multiple lines at the end of the file - lineinfile: dest={{output_dir}}/test.txt state=present line="This is a line\nwith \\\n character" insertafter="EOF" + lineinfile: dest={{output_dir}}/test.txt state=present line="This is a line\nwith \\n character" insertafter="EOF" register: result - name: assert that the multiple lines was inserted diff --git a/test/integration/test_filters.yml b/test/integration/test_filters.yml new file mode 100644 index 00000000000000..050a303f604684 --- /dev/null +++ b/test/integration/test_filters.yml @@ -0,0 +1,5 @@ +- hosts: testhost + connection: local + gather_facts: yes + roles: + - { role: test_filters } diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index cd2bf0536e5fd9..757a5f87d74a48 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -722,7 +722,7 @@ def test_module_utils_basic_ansible_module_set_mode_if_different(self): # FIXME: this isn't working yet #with patch('os.lstat', side_effect=[mock_stat1, mock_stat2]): - # with patch('os.lchmod', return_value=None, create=True) as m_os: + # with patch('os.lchmod', return_value=None) as m_os: # del m_os.lchmod # with patch('os.path.islink', return_value=False): # with patch('os.chmod', return_value=None) as m_chmod: From c7d1dd4b687098598c3abe7b7b29635f23b83422 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 1 Jun 2015 16:50:18 -0500 Subject: [PATCH 0728/3617] Updating v1/ansible/modules/core/ to use the v1_modules branch --- v1/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v1/ansible/modules/core b/v1/ansible/modules/core index 9028e9d4be8a3d..f8d8af17cdc725 160000 --- a/v1/ansible/modules/core +++ b/v1/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd0 +Subproject commit f8d8af17cdc72500af8319c96004b86ac702a0a4 From 7f1b64d934b137185e05a7276c653bbe84458dd5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 1 Jun 2015 19:46:29 -0500 Subject: [PATCH 0729/3617] Submodule pointer update for core to the merged v2 branch --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 191a672891359f..b138411671194e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 191a672891359f3b6faff83cb0613f1b38e3fc0e +Subproject commit b138411671194e3ec236d8ec3d27bcf32447350d From 620fad9f8d750ac3ddb976782df4d5347e3c2704 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 1 Jun 2015 20:02:15 -0500 Subject: [PATCH 0730/3617] Fixing an oops in inventory/__init__.py where the slots are incorrect --- lib/ansible/inventory/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 9870648ceeb524..43a6084cbd06eb 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -43,9 +43,9 @@ class Inventory(object): Host inventory for ansible. """ - __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', - 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', - '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] + #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', + # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', + # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): From 8868f4b4819d162e2031a6f9781f0ed0cc3fd518 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:21:48 +0200 Subject: [PATCH 0731/3617] cloudstack: sync module_utils/cloudstack.py to v1 Commits from 31520cdd178246f94921ba9d9866abf23b28e252 to 62ccc1b9b643196b8de36980a597c2d5d644b957 related to cloudstack.py --- v1/ansible/module_utils/cloudstack.py | 243 ++++++++++++++++++++++---- 1 file changed, 211 insertions(+), 32 deletions(-) diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index 2c891434bdebea..e887367c2fd69b 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -41,15 +41,22 @@ def __init__(self, module): if not has_lib_cs: module.fail_json(msg="python library cs required: pip install cs") + self.result = { + 'changed': False, + } + self.module = module self._connect() - self.project_id = None - self.ip_address_id = None - self.zone_id = None - self.vm_id = None - self.os_type_id = None + self.domain = None + self.account = None + self.project = None + self.ip_address = None + self.zone = None + self.vm = None + self.os_type = None self.hypervisor = None + self.capabilities = None def _connect(self): @@ -68,27 +75,73 @@ def _connect(self): else: self.cs = CloudStack(**read_config()) + # TODO: rename to has_changed() + def _has_changed(self, want_dict, current_dict, only_keys=None): + for key, value in want_dict.iteritems(): + + # Optionally limit by a list of keys + if only_keys and key not in only_keys: + continue; + + # Skip None values + if value is None: + continue; + + if key in current_dict: + + # API returns string for int in some cases, just to make sure + if isinstance(value, int): + current_dict[key] = int(current_dict[key]) + elif isinstance(value, str): + current_dict[key] = str(current_dict[key]) + + # Only need to detect a singe change, not every item + if value != current_dict[key]: + return True + return False + + + def _get_by_key(self, key=None, my_dict={}): + if key: + if key in my_dict: + return my_dict[key] + self.module.fail_json(msg="Something went wrong: %s not found" % key) + return my_dict + + # TODO: for backward compatibility only, remove if not used anymore def get_project_id(self): - if self.project_id: - return self.project_id + return self.get_project(key='id') + + + def get_project(self, key=None): + if self.project: + return self._get_by_key(key, self.project) project = self.module.params.get('project') if not project: return None - - projects = self.cs.listProjects() + args = {} + args['listall'] = True + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + projects = self.cs.listProjects(**args) if projects: for p in projects['project']: if project in [ p['name'], p['displaytext'], p['id'] ]: - self.project_id = p['id'] - return self.project_id + self.project = p + return self._get_by_key(key, self.project) self.module.fail_json(msg="project '%s' not found" % project) + # TODO: for backward compatibility only, remove if not used anymore def get_ip_address_id(self): - if self.ip_address_id: - return self.ip_address_id + return self.get_ip_address(key='id') + + + def get_ip_address(self, key=None): + if self.ip_address: + return self._get_by_key(key, self.ip_address) ip_address = self.module.params.get('ip_address') if not ip_address: @@ -96,58 +149,78 @@ def get_ip_address_id(self): args = {} args['ipaddress'] = ip_address - args['projectid'] = self.get_project_id() + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') ip_addresses = self.cs.listPublicIpAddresses(**args) if not ip_addresses: self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress']) - self.ip_address_id = ip_addresses['publicipaddress'][0]['id'] - return self.ip_address_id + self.ip_address = ip_addresses['publicipaddress'][0] + return self._get_by_key(key, self.ip_address) + # TODO: for backward compatibility only, remove if not used anymore def get_vm_id(self): - if self.vm_id: - return self.vm_id + return self.get_vm(key='id') + + + def get_vm(self, key=None): + if self.vm: + return self._get_by_key(key, self.vm) vm = self.module.params.get('vm') if not vm: self.module.fail_json(msg="Virtual machine param 'vm' is required") args = {} - args['projectid'] = self.get_project_id() + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['zoneid'] = self.get_zone(key='id') vms = self.cs.listVirtualMachines(**args) if vms: for v in vms['virtualmachine']: - if vm in [ v['displayname'], v['name'], v['id'] ]: - self.vm_id = v['id'] - return self.vm_id + if vm in [ v['name'], v['displayname'], v['id'] ]: + self.vm = v + return self._get_by_key(key, self.vm) self.module.fail_json(msg="Virtual machine '%s' not found" % vm) + # TODO: for backward compatibility only, remove if not used anymore def get_zone_id(self): - if self.zone_id: - return self.zone_id + return self.get_zone(key='id') + + + def get_zone(self, key=None): + if self.zone: + return self._get_by_key(key, self.zone) zone = self.module.params.get('zone') zones = self.cs.listZones() # use the first zone if no zone param given if not zone: - self.zone_id = zones['zone'][0]['id'] - return self.zone_id + self.zone = zones['zone'][0] + return self._get_by_key(key, self.zone) if zones: for z in zones['zone']: if zone in [ z['name'], z['id'] ]: - self.zone_id = z['id'] - return self.zone_id + self.zone = z + return self._get_by_key(key, self.zone) self.module.fail_json(msg="zone '%s' not found" % zone) + # TODO: for backward compatibility only, remove if not used anymore def get_os_type_id(self): - if self.os_type_id: - return self.os_type_id + return self.get_os_type(key='id') + + + def get_os_type(self, key=None): + if self.os_type: + return self._get_by_key(key, self.zone) os_type = self.module.params.get('os_type') if not os_type: @@ -157,8 +230,8 @@ def get_os_type_id(self): if os_types: for o in os_types['ostype']: if os_type in [ o['description'], o['id'] ]: - self.os_type_id = o['id'] - return self.os_type_id + self.os_type = o + return self._get_by_key(key, self.os_type) self.module.fail_json(msg="OS type '%s' not found" % os_type) @@ -181,6 +254,112 @@ def get_hypervisor(self): self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor) + def get_account(self, key=None): + if self.account: + return self._get_by_key(key, self.account) + + account = self.module.params.get('account') + if not account: + return None + + domain = self.module.params.get('domain') + if not domain: + self.module.fail_json(msg="Account must be specified with Domain") + + args = {} + args['name'] = account + args['domainid'] = self.get_domain(key='id') + args['listall'] = True + accounts = self.cs.listAccounts(**args) + if accounts: + self.account = accounts['account'][0] + return self._get_by_key(key, self.account) + self.module.fail_json(msg="Account '%s' not found" % account) + + + def get_domain(self, key=None): + if self.domain: + return self._get_by_key(key, self.domain) + + domain = self.module.params.get('domain') + if not domain: + return None + + args = {} + args['name'] = domain + args['listall'] = True + domains = self.cs.listDomains(**args) + if domains: + self.domain = domains['domain'][0] + return self._get_by_key(key, self.domain) + self.module.fail_json(msg="Domain '%s' not found" % domain) + + + def get_tags(self, resource=None): + existing_tags = self.cs.listTags(resourceid=resource['id']) + if existing_tags: + return existing_tags['tag'] + return [] + + + def _delete_tags(self, resource, resource_type, tags): + existing_tags = resource['tags'] + tags_to_delete = [] + for existing_tag in existing_tags: + if existing_tag['key'] in tags: + if existing_tag['value'] != tags[key]: + tags_to_delete.append(existing_tag) + else: + tags_to_delete.append(existing_tag) + if tags_to_delete: + self.result['changed'] = True + if not self.module.check_mode: + args = {} + args['resourceids'] = resource['id'] + args['resourcetype'] = resource_type + args['tags'] = tags_to_delete + self.cs.deleteTags(**args) + + + def _create_tags(self, resource, resource_type, tags): + tags_to_create = [] + for i, tag_entry in enumerate(tags): + tag = { + 'key': tag_entry['key'], + 'value': tag_entry['value'], + } + tags_to_create.append(tag) + if tags_to_create: + self.result['changed'] = True + if not self.module.check_mode: + args = {} + args['resourceids'] = resource['id'] + args['resourcetype'] = resource_type + args['tags'] = tags_to_create + self.cs.createTags(**args) + + + def ensure_tags(self, resource, resource_type=None): + if not resource_type or not resource: + self.module.fail_json(msg="Error: Missing resource or resource_type for tags.") + + if 'tags' in resource: + tags = self.module.params.get('tags') + if tags is not None: + self._delete_tags(resource, resource_type, tags) + self._create_tags(resource, resource_type, tags) + resource['tags'] = self.get_tags(resource) + return resource + + + def get_capabilities(self, key=None): + if self.capabilities: + return self._get_by_key(key, self.capabilities) + capabilities = self.cs.listCapabilities() + self.capabilities = capabilities['capability'] + return self._get_by_key(key, self.capabilities) + + # TODO: rename to poll_job() def _poll_job(self, job=None, key=None): if 'jobid' in job: while True: From 7bb9cd3766fcffa90dbd775c4530a6227679e357 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:34:20 +0200 Subject: [PATCH 0732/3617] cloudstack: minor cleanup in doc fragments --- lib/ansible/utils/module_docs_fragments/cloudstack.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py index 5a7411b199dfff..ebb6fdab2c42ff 100644 --- a/lib/ansible/utils/module_docs_fragments/cloudstack.py +++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py @@ -27,32 +27,29 @@ class ModuleDocFragment(object): - API key of the CloudStack API. required: false default: null - aliases: [] api_secret: description: - Secret key of the CloudStack API. required: false default: null - aliases: [] api_url: description: - URL of the CloudStack API e.g. https://cloud.example.com/client/api. required: false default: null - aliases: [] api_http_method: description: - HTTP method used. required: false default: 'get' - aliases: [] + choices: [ 'get', 'post' ] requirements: - "python >= 2.6" - cs notes: - Ansible uses the C(cs) library's configuration method if credentials are not provided by the options C(api_url), C(api_key), C(api_secret). - Configuration is read from several locations, in the following order":" + Configuration is read from several locations, in the following order. - The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and C(CLOUDSTACK_METHOD) environment variables. - A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, From fc807e29c8b67d560505363b3dadb56e1590bf20 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:35:55 +0200 Subject: [PATCH 0733/3617] cloudstack: add api_timeout to doc fragments --- lib/ansible/utils/module_docs_fragments/cloudstack.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py index ebb6fdab2c42ff..bafb7b4c15a497 100644 --- a/lib/ansible/utils/module_docs_fragments/cloudstack.py +++ b/lib/ansible/utils/module_docs_fragments/cloudstack.py @@ -43,6 +43,11 @@ class ModuleDocFragment(object): required: false default: 'get' choices: [ 'get', 'post' ] + api_timeout: + description: + - HTTP timeout. + required: false + default: 10 requirements: - "python >= 2.6" - cs @@ -51,7 +56,7 @@ class ModuleDocFragment(object): provided by the options C(api_url), C(api_key), C(api_secret). Configuration is read from several locations, in the following order. - The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and - C(CLOUDSTACK_METHOD) environment variables. + C(CLOUDSTACK_METHOD). C(CLOUDSTACK_TIMEOUT) environment variables. - A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file, - A C(cloudstack.ini) file in the current working directory. - A C(.cloudstack.ini) file in the users home directory. From caf3cf69302858d62c206027629ab30124ff9c08 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Sat, 30 May 2015 11:42:45 +0200 Subject: [PATCH 0734/3617] cloudstack: add timeout to utils --- lib/ansible/module_utils/cloudstack.py | 2 ++ v1/ansible/module_utils/cloudstack.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index e887367c2fd69b..82306b9a0be87d 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -64,12 +64,14 @@ def _connect(self): api_secret = self.module.params.get('secret_key') api_url = self.module.params.get('api_url') api_http_method = self.module.params.get('api_http_method') + api_timeout = self.module.params.get('api_timeout') if api_key and api_secret and api_url: self.cs = CloudStack( endpoint=api_url, key=api_key, secret=api_secret, + timeout=api_timeout, method=api_http_method ) else: diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index e887367c2fd69b..82306b9a0be87d 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -64,12 +64,14 @@ def _connect(self): api_secret = self.module.params.get('secret_key') api_url = self.module.params.get('api_url') api_http_method = self.module.params.get('api_http_method') + api_timeout = self.module.params.get('api_timeout') if api_key and api_secret and api_url: self.cs = CloudStack( endpoint=api_url, key=api_key, secret=api_secret, + timeout=api_timeout, method=api_http_method ) else: From e251e701783ff053dc1d59a917bfaa9d788a2c6a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 08:54:37 -0400 Subject: [PATCH 0735/3617] added raw to 'raw' modules --- lib/ansible/parsing/mod_args.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index 87b3813d8f0410..c24b581fa89090 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -274,6 +274,7 @@ def parse(self): 'add_host', 'group_by', 'set_fact', + 'raw', 'meta', ) # if we didn't see any module in the task at all, it's not a task really From bc041ffea07ce812587ee23ec1b6511a08bef999 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 2 Jun 2015 08:41:58 -0500 Subject: [PATCH 0736/3617] Adding raw module to list of modules allowing raw params Fixes #11119 --- lib/ansible/parsing/mod_args.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index c24b581fa89090..a154d405770f90 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -266,6 +266,7 @@ def parse(self): # FIXME: this should probably be somewhere else RAW_PARAM_MODULES = ( + 'raw', 'command', 'shell', 'script', From d1b43712870f5331a58abe115911725619264ca5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 2 Jun 2015 09:41:46 -0500 Subject: [PATCH 0737/3617] Correctly evaluate changed/failed for tasks using loops --- lib/ansible/executor/task_executor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 9bc875b02a4395..7c769cc4604fe4 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -83,9 +83,9 @@ def run(self): changed = False failed = False for item in item_results: - if 'changed' in item: + if 'changed' in item and item['changed']: changed = True - if 'failed' in item: + if 'failed' in item and item['failed']: failed = True # create the overall result item, and set the changed/failed From 47be5b416658ef1474aee89873fbd72622f83777 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 11:02:40 -0400 Subject: [PATCH 0738/3617] added missing ansibleoptionserror import and moved args check in playbook to after parser exists to allow for creating usage info --- lib/ansible/cli/__init__.py | 2 +- lib/ansible/cli/playbook.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 1e997f58d37a0e..d63203b2e56b01 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -31,7 +31,7 @@ from ansible import __version__ from ansible import constants as C -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.utils.unicode import to_bytes class SortedOptParser(optparse.OptionParser): diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index 97d4f0de3f92a3..1c59d5dde6ff64 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -24,7 +24,7 @@ from ansible import constants as C from ansible.cli import CLI -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.executor.playbook_executor import PlaybookExecutor from ansible.inventory import Inventory from ansible.parsing import DataLoader @@ -69,11 +69,12 @@ def parse(self): self.options, self.args = parser.parse_args() - if len(self.args) == 0: - raise AnsibleOptionsError("You must specify a playbook file to run") self.parser = parser + if len(self.args) == 0: + raise AnsibleOptionsError("You must specify a playbook file to run") + self.display.verbosity = self.options.verbosity self.validate_conflicts() From 2590df6df1e3e4317f3247185be2940d95bd2c7b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 11:41:30 -0400 Subject: [PATCH 0739/3617] created makedirs_safe function for use in cases of multiprocess should fix #11126 and most race conditions --- lib/ansible/plugins/action/fetch.py | 4 ++-- lib/ansible/plugins/connections/paramiko_ssh.py | 7 +++---- lib/ansible/plugins/connections/winrm.py | 7 +++---- lib/ansible/plugins/lookup/password.py | 10 +++++----- lib/ansible/utils/path.py | 10 ++++++++++ 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py index c242c8739d014e..6a903ae5a27c3d 100644 --- a/lib/ansible/plugins/action/fetch.py +++ b/lib/ansible/plugins/action/fetch.py @@ -29,6 +29,7 @@ from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash +from ansible.utils.path import makedirs_safe class ActionModule(ActionBase): @@ -125,8 +126,7 @@ def run(self, tmp=None, task_vars=dict()): if remote_checksum != local_checksum: # create the containing directories, if needed - if not os.path.isdir(os.path.dirname(dest)): - os.makedirs(os.path.dirname(dest)) + makedirs_safe(os.path.dirname(dest)) # fetch the file and check for changes if remote_data is None: diff --git a/lib/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py index 797eeea9e021ef..0d7a82c34b55c8 100644 --- a/lib/ansible/plugins/connections/paramiko_ssh.py +++ b/lib/ansible/plugins/connections/paramiko_ssh.py @@ -42,6 +42,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase +from ansible.utils.path import makedirs_safe AUTHENTICITY_MSG=""" paramiko: The authenticity of host '%s' can't be established. @@ -309,8 +310,7 @@ def _save_ssh_host_keys(self, filename): return False path = os.path.expanduser("~/.ssh") - if not os.path.exists(path): - os.makedirs(path) + makedirs_safe(path) f = open(filename, 'w') @@ -347,8 +347,7 @@ def close(self): # add any new SSH host keys -- warning -- this could be slow lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock") dirname = os.path.dirname(self.keyfile) - if not os.path.exists(dirname): - os.makedirs(dirname) + makedirs_safe(dirname) KEY_LOCK = open(lockfile, 'w') fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX) diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index 8a42da2534b248..dbdf7cd6789922 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -44,6 +44,7 @@ from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase from ansible.plugins import shell_loader +from ansible.utils import makedirs_safe class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' @@ -213,8 +214,7 @@ def fetch_file(self, in_path, out_path): out_path = out_path.replace('\\', '/') self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) buffer_size = 2**19 # 0.5MB chunks - if not os.path.exists(os.path.dirname(out_path)): - os.makedirs(os.path.dirname(out_path)) + makedirs_safe(os.path.dirname(out_path)) out_file = None try: offset = 0 @@ -251,8 +251,7 @@ def fetch_file(self, in_path, out_path): else: data = base64.b64decode(result.std_out.strip()) if data is None: - if not os.path.exists(out_path): - os.makedirs(out_path) + makedirs_safe(out_path) break else: if not out_file: diff --git a/lib/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py index 2e7633a067ac84..9506274e5f8b46 100644 --- a/lib/ansible/plugins/lookup/password.py +++ b/lib/ansible/plugins/lookup/password.py @@ -30,6 +30,7 @@ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.utils.encrypt import do_encrypt +from ansible.utils import makedirs_safe DEFAULT_LENGTH = 20 @@ -98,11 +99,10 @@ def run(self, terms, variables, **kwargs): path = self._loader.path_dwim(relpath) if not os.path.exists(path): pathdir = os.path.dirname(path) - if not os.path.isdir(pathdir): - try: - os.makedirs(pathdir, mode=0o700) - except OSError as e: - raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e))) + try: + makedirs_safe(pathdir, mode=0o700) + except OSError as e: + raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e))) chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'') password = ''.join(random.choice(chars) for _ in range(length)) diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py index e49a2f7d5533d1..534226984be9d2 100644 --- a/lib/ansible/utils/path.py +++ b/lib/ansible/utils/path.py @@ -19,6 +19,7 @@ import os import stat +from time import sleep __all__ = ['is_executable', 'unfrackpath'] @@ -35,3 +36,12 @@ def unfrackpath(path): ''' return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path)))) +def makedirs_safe(path, mode=None): + '''Safe way to create dirs in muliprocess/thread environments''' + while not os.path.exists(path): + try: + os.makedirs(path, mode) + except OSError, e: + if e.errno != 17: + raise + sleep(1) From 8c6fa5fb773cbbb847c4be8932c5452b4abe76c8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 12:41:02 -0400 Subject: [PATCH 0740/3617] added promox_template module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f806cbfb1f81ff..cfc062f577c157 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ New Modules: * openstack: os_subnet * openstack: os_volume * proxmox + * proxmox_template * pushover * pushbullet * rabbitmq_binding From ba02e5e3bf7d03a8c64713cebb5f851b2f5396ce Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 13:01:02 -0400 Subject: [PATCH 0741/3617] minor adjustments as per code review --- lib/ansible/utils/path.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py index 534226984be9d2..ac5160402b2116 100644 --- a/lib/ansible/utils/path.py +++ b/lib/ansible/utils/path.py @@ -20,6 +20,7 @@ import os import stat from time import sleep +from errno import EEXIST __all__ = ['is_executable', 'unfrackpath'] @@ -38,10 +39,9 @@ def unfrackpath(path): def makedirs_safe(path, mode=None): '''Safe way to create dirs in muliprocess/thread environments''' - while not os.path.exists(path): + if not os.path.exists(path): try: os.makedirs(path, mode) except OSError, e: - if e.errno != 17: + if e.errno != EEXIST: raise - sleep(1) From e0ef217f9714280e8ad3eddbf00c5742346446bf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 13:33:33 -0400 Subject: [PATCH 0742/3617] Revert "Adding raw module to list of modules allowing raw params" This reverts commit bc041ffea07ce812587ee23ec1b6511a08bef999. same fix x2 does not fix it 'more' --- lib/ansible/parsing/mod_args.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index a154d405770f90..c24b581fa89090 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -266,7 +266,6 @@ def parse(self): # FIXME: this should probably be somewhere else RAW_PARAM_MODULES = ( - 'raw', 'command', 'shell', 'script', From 71014ab01e54fc5f84f0ec256ea9822de8602ef6 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 2 Jun 2015 13:30:14 -0500 Subject: [PATCH 0743/3617] Fix command building for scp if ssh --- lib/ansible/plugins/connections/ssh.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 426dc6b49d06ae..b3ada343c0454d 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -407,12 +407,12 @@ def put_file(self, in_path, out_path): if C.DEFAULT_SCP_IF_SSH: cmd.append('scp') - cmd += self._common_args - cmd.append(in_path,host + ":" + pipes.quote(out_path)) + cmd.extend(self._common_args) + cmd.extend([in_path, '{0}:{1}'.format(host, pipes.quote(out_path))]) indata = None else: cmd.append('sftp') - cmd += self._common_args + cmd.extend(self._common_args) cmd.append(host) indata = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path)) @@ -440,12 +440,12 @@ def fetch_file(self, in_path, out_path): if C.DEFAULT_SCP_IF_SSH: cmd.append('scp') - cmd += self._common_args - cmd += ('{0}:{1}'.format(host, in_path), out_path) + cmd.extend(self._common_args) + cmd.extend(['{0}:{1}'.format(host, in_path), out_path]) indata = None else: cmd.append('sftp') - cmd += self._common_args + cmd.extend(self._common_args) cmd.append(host) indata = "get {0} {1}\n".format(in_path, out_path) From 300eb3a843dc773722ebd7bc1ceea9a3b8d91e86 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 2 Jun 2015 11:43:35 -0700 Subject: [PATCH 0744/3617] Add six as a dependency for packaging --- packaging/debian/README.md | 2 +- packaging/debian/control | 2 +- packaging/rpm/ansible.spec | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/debian/README.md b/packaging/debian/README.md index 62c6af084c02d3..a8150ff30f188e 100644 --- a/packaging/debian/README.md +++ b/packaging/debian/README.md @@ -3,7 +3,7 @@ Ansible Debian Package To create an Ansible DEB package: - sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools sshpass + sudo apt-get install python-paramiko python-yaml python-jinja2 python-httplib2 python-setuptools python-six sshpass sudo apt-get install cdbs debhelper dpkg-dev git-core reprepro python-support fakeroot asciidoc devscripts git clone git://github.com/ansible/ansible.git cd ansible diff --git a/packaging/debian/control b/packaging/debian/control index 14d737444e7c34..73e1cc92021dd9 100644 --- a/packaging/debian/control +++ b/packaging/debian/control @@ -8,7 +8,7 @@ Homepage: http://ansible.github.com/ Package: ansible Architecture: all -Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-crypto (>= 2.6), sshpass, ${misc:Depends} +Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), sshpass, ${misc:Depends} Description: A radically simple IT automation platform A radically simple IT automation platform that makes your applications and systems easier to deploy. Avoid writing scripts or custom code to deploy and diff --git a/packaging/rpm/ansible.spec b/packaging/rpm/ansible.spec index 394017dc0fbdae..ddda6eeb798fd3 100644 --- a/packaging/rpm/ansible.spec +++ b/packaging/rpm/ansible.spec @@ -28,6 +28,7 @@ Requires: python26-jinja2 Requires: python26-keyczar Requires: python26-httplib2 Requires: python26-setuptools +Requires: python26-six %endif # RHEL == 6 @@ -45,6 +46,7 @@ Requires: python-jinja2 Requires: python-keyczar Requires: python-httplib2 Requires: python-setuptools +Requires: python-six %endif # FEDORA > 17 @@ -57,6 +59,7 @@ Requires: python-jinja2 Requires: python-keyczar Requires: python-httplib2 Requires: python-setuptools +Requires: python-six %endif # SuSE/openSuSE @@ -69,6 +72,7 @@ Requires: python-keyczar Requires: python-yaml Requires: python-httplib2 Requires: python-setuptools +Requires: python-six %endif Requires: sshpass From 697a1a406122fa7d932146b0d32159ad363cf245 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 2 Jun 2015 14:01:11 -0500 Subject: [PATCH 0745/3617] Don't override ansible_ssh_host with inventory_hostname --- lib/ansible/executor/task_executor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 9bc875b02a4395..5c6fc862a03c4d 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -371,7 +371,6 @@ def _get_connection(self, variables): # FIXME: delegate_to calculation should be done here # FIXME: calculation of connection params/auth stuff should be done here - self._connection_info.remote_addr = self._host.ipv4_address if self._task.delegate_to is not None: self._compute_delegate(variables) From 65191181069f8d67de81fea1943786fbbf6466d5 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 2 Jun 2015 14:11:16 -0500 Subject: [PATCH 0746/3617] Add missing import in ansible.cli --- lib/ansible/cli/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index d63203b2e56b01..daf14aab1f7aaa 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -33,6 +33,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.utils.unicode import to_bytes +from ansible.utils.display import Display class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' From 1b48111b12f507dcce509c24917e27f9c29653b7 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 2 Jun 2015 14:56:32 -0500 Subject: [PATCH 0747/3617] If remote_addr isn't set, set to ipv4_address --- lib/ansible/executor/task_executor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 23cc880bceb696..9ba2b6bca51dd1 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -371,6 +371,9 @@ def _get_connection(self, variables): # FIXME: delegate_to calculation should be done here # FIXME: calculation of connection params/auth stuff should be done here + if not self._connection_info.remote_addr: + self._connection_info.remote_addr = self._host.ipv4_address + if self._task.delegate_to is not None: self._compute_delegate(variables) From 48c0d6388ff0cfaa760e77617170ebffe60298ba Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 15:37:06 -0400 Subject: [PATCH 0748/3617] moved RAW var to class and as a frozenset --- lib/ansible/parsing/mod_args.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index c24b581fa89090..19a51212f72df4 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -25,6 +25,20 @@ from ansible.plugins import module_loader from ansible.parsing.splitter import parse_kv +# For filtering out modules correctly below +RAW_PARAM_MODULES = frozenset( + 'command', + 'shell', + 'script', + 'include', + 'include_vars', + 'add_host', + 'group_by', + 'set_fact', + 'raw', + 'meta', +) + class ModuleArgsParser: """ @@ -264,19 +278,6 @@ def parse(self): thing = value action, args = self._normalize_parameters(value, action=action, additional_args=additional_args) - # FIXME: this should probably be somewhere else - RAW_PARAM_MODULES = ( - 'command', - 'shell', - 'script', - 'include', - 'include_vars', - 'add_host', - 'group_by', - 'set_fact', - 'raw', - 'meta', - ) # if we didn't see any module in the task at all, it's not a task really if action is None: raise AnsibleParserError("no action detected in task", obj=self._task_ds) From 5622fc23bc51eebde538b582b5e020c885511f31 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 23:34:57 -0400 Subject: [PATCH 0749/3617] fixed frozen set, missing iterable --- lib/ansible/parsing/mod_args.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index 19a51212f72df4..d7cc83a90557d8 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -26,7 +26,7 @@ from ansible.parsing.splitter import parse_kv # For filtering out modules correctly below -RAW_PARAM_MODULES = frozenset( +RAW_PARAM_MODULES = ([ 'command', 'shell', 'script', @@ -37,7 +37,7 @@ 'set_fact', 'raw', 'meta', -) +]) class ModuleArgsParser: From 65b82f69e4456c8f6521fbec9af769092fe0b2e0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 23:39:57 -0400 Subject: [PATCH 0750/3617] avoid failing when mode is none --- lib/ansible/utils/path.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py index ac5160402b2116..b271e7ed4bc937 100644 --- a/lib/ansible/utils/path.py +++ b/lib/ansible/utils/path.py @@ -41,7 +41,10 @@ def makedirs_safe(path, mode=None): '''Safe way to create dirs in muliprocess/thread environments''' if not os.path.exists(path): try: - os.makedirs(path, mode) + if mode: + os.makedirs(path, mode) + else: + os.makedirs(path) except OSError, e: if e.errno != EEXIST: raise From 3e2e81d896067170e72ca2999fe84c1ba81b9604 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 2 Jun 2015 23:42:00 -0400 Subject: [PATCH 0751/3617] missing path in import path for making paths --- lib/ansible/plugins/connections/winrm.py | 2 +- lib/ansible/plugins/lookup/password.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index dbdf7cd6789922..f16da0f6e63a01 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -44,7 +44,7 @@ from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase from ansible.plugins import shell_loader -from ansible.utils import makedirs_safe +from ansible.utils.path import makedirs_safe class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' diff --git a/lib/ansible/plugins/lookup/password.py b/lib/ansible/plugins/lookup/password.py index 9506274e5f8b46..47ec786429e836 100644 --- a/lib/ansible/plugins/lookup/password.py +++ b/lib/ansible/plugins/lookup/password.py @@ -30,7 +30,7 @@ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase from ansible.utils.encrypt import do_encrypt -from ansible.utils import makedirs_safe +from ansible.utils.path import makedirs_safe DEFAULT_LENGTH = 20 From a899f8f01655bdaca349c19e73d4e9bc0d04e095 Mon Sep 17 00:00:00 2001 From: Patrick McConnell Date: Wed, 3 Jun 2015 07:26:18 +0200 Subject: [PATCH 0752/3617] Fix for task_executor on OS X I get this exception during the setup task: AttributeError: 'ConnectionInformation' object has no attribute 'remote_pass' I believe it is supposed to be looking at the password attribute. Either that or we should create a remote_pass attribute in ConnectionInformation. --- lib/ansible/executor/task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 9ba2b6bca51dd1..69cbb63f47cbe4 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -380,7 +380,7 @@ def _get_connection(self, variables): conn_type = self._connection_info.connection if conn_type == 'smart': conn_type = 'ssh' - if sys.platform.startswith('darwin') and self._connection_info.remote_pass: + if sys.platform.startswith('darwin') and self._connection_info.password: # due to a current bug in sshpass on OSX, which can trigger # a kernel panic even for non-privileged users, we revert to # paramiko on that OS when a SSH password is specified From 5204d7ca889e0f723c6b66eee13f3e479465fde0 Mon Sep 17 00:00:00 2001 From: Etienne CARRIERE Date: Wed, 3 Jun 2015 08:20:26 +0200 Subject: [PATCH 0753/3617] Add common fonctions for F5 modules (FQ Name functions) --- lib/ansible/module_utils/f5.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py index 2d97662a0b6576..d072c759e2a64a 100644 --- a/lib/ansible/module_utils/f5.py +++ b/lib/ansible/module_utils/f5.py @@ -50,7 +50,7 @@ def f5_parse_arguments(module): module.fail_json(msg="the python bigsuds module is required") if not module.params['validate_certs']: disable_ssl_cert_validation() - return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition']) + return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition'],module.params['validate_certs']) def bigip_api(bigip, user, password): api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) @@ -62,3 +62,19 @@ def disable_ssl_cert_validation(): import ssl ssl._create_default_https_context = ssl._create_unverified_context +# Fully Qualified name (with the partition) +def fq_name(partition,name): + if name is None: + return None + if name[0] is '/': + return name + else: + return '/%s/%s' % (partition,name) + +# Fully Qualified name (with partition) for a list +def fq_list_names(partition,list_names): + if list_names is None: + return None + return map(lambda x: fq_name(partition,x),list_names) + + From f983557e7e0c23540bb4625635b84726d572227b Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 3 Jun 2015 09:51:00 -0500 Subject: [PATCH 0754/3617] Don't set a default on the _become FieldAttribute. Fixes #11136 --- lib/ansible/playbook/become.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index daa8c80ba943ac..fca28538585917 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -27,7 +27,7 @@ class Become: # Privlege escalation - _become = FieldAttribute(isa='bool', default=False) + _become = FieldAttribute(isa='bool') _become_method = FieldAttribute(isa='string') _become_user = FieldAttribute(isa='string') _become_pass = FieldAttribute(isa='string') From 89dceb503a171a595a68960961ac3cb098336da6 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 3 Jun 2015 10:02:27 -0500 Subject: [PATCH 0755/3617] Import missing MutableMapping class --- lib/ansible/utils/module_docs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 1565bb3be87384..9a7ee0ae33bd78 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -23,6 +23,7 @@ import yaml import traceback +from collections import MutableMapping from ansible.plugins import fragment_loader # modules that are ok that they do not have documentation strings From 2e39661a26d881f1ff5991ae46e5cbf45b91cfe9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 3 Jun 2015 11:15:13 -0400 Subject: [PATCH 0756/3617] made with_ examples have explicit templating --- docsite/rst/playbooks_loops.rst | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst index 5456791f61472d..a76254a966cf97 100644 --- a/docsite/rst/playbooks_loops.rst +++ b/docsite/rst/playbooks_loops.rst @@ -23,7 +23,7 @@ To save some typing, repeated tasks can be written in short-hand like so:: If you have defined a YAML list in a variables file, or the 'vars' section, you can also do:: - with_items: somelist + with_items: "{{somelist}}" The above would be the equivalent of:: @@ -58,12 +58,12 @@ Loops can be nested as well:: - [ 'alice', 'bob' ] - [ 'clientdb', 'employeedb', 'providerdb' ] -As with the case of 'with_items' above, you can use previously defined variables. Just specify the variable's name without templating it with '{{ }}':: +As with the case of 'with_items' above, you can use previously defined variables.:: - name: here, 'users' contains the above list of employees mysql_user: name={{ item[0] }} priv={{ item[1] }}.*:ALL append_privs=yes password=foo with_nested: - - users + - "{{users}}" - [ 'clientdb', 'employeedb', 'providerdb' ] .. _looping_over_hashes: @@ -89,7 +89,7 @@ And you want to print every user's name and phone number. You can loop through tasks: - name: Print phone records debug: msg="User {{ item.key }} is {{ item.value.name }} ({{ item.value.telephone }})" - with_dict: users + with_dict: "{{users}}" .. _looping_over_fileglobs: @@ -111,7 +111,7 @@ be used like this:: - copy: src={{ item }} dest=/etc/fooapp/ owner=root mode=600 with_fileglob: - /playbooks/files/fooapp/* - + .. note:: When using a relative path with ``with_fileglob`` in a role, Ansible resolves the path relative to the `roles//files` directory. Looping over Parallel Sets of Data @@ -130,21 +130,21 @@ And you want the set of '(a, 1)' and '(b, 2)' and so on. Use 'with_together' t tasks: - debug: msg="{{ item.0 }} and {{ item.1 }}" with_together: - - alpha - - numbers + - "{{alpha}}" + - "{{numbers}}" Looping over Subelements ```````````````````````` Suppose you want to do something like loop over a list of users, creating them, and allowing them to login by a certain set of -SSH keys. +SSH keys. How might that be accomplished? Let's assume you had the following defined and loaded in via "vars_files" or maybe a "group_vars/all" file:: --- users: - name: alice - authorized: + authorized: - /tmp/alice/onekey.pub - /tmp/alice/twokey.pub mysql: @@ -171,7 +171,7 @@ How might that be accomplished? Let's assume you had the following defined and It might happen like so:: - user: name={{ item.name }} state=present generate_ssh_key=yes - with_items: users + with_items: "{{users}}" - authorized_key: "user={{ item.0.name }} key='{{ lookup('file', item.1) }}'" with_subelements: @@ -329,7 +329,7 @@ Should you ever need to execute a command remotely, you would not use the above - name: Do something with each result shell: /usr/bin/something_else --param {{ item }} - with_items: command_result.stdout_lines + with_items: "{{command_result.stdout_lines}}" .. _indexed_lists: @@ -345,7 +345,7 @@ It's uncommonly used:: - name: indexed loop demo debug: msg="at array position {{ item.0 }} there is a value {{ item.1 }}" - with_indexed_items: some_list + with_indexed_items: "{{some_list}}" .. _flattening_a_list: @@ -370,8 +370,8 @@ As you can see the formatting of packages in these lists is all over the place. - name: flattened loop demo yum: name={{ item }} state=installed with_flattened: - - packages_base - - packages_apps + - "{{packages_base}}" + - "{{packages_apps}}" That's how! @@ -435,7 +435,7 @@ Subsequent loops over the registered variable to inspect the results may look li fail: msg: "The command ({{ item.cmd }}) did not have a 0 return code" when: item.rc != 0 - with_items: echo.results + with_items: "{{echo.results}}" .. _writing_your_own_iterators: From d8c8ca11cfa0787bc14655439b080a9b7c4962e5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Jun 2015 08:45:10 -0700 Subject: [PATCH 0757/3617] Add compatibility for old version of six (present on rhel7) --- lib/ansible/parsing/vault/__init__.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 40d02d3d59c7c4..6c2b7c9c62d227 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -36,7 +36,19 @@ from hashlib import md5 from binascii import hexlify from binascii import unhexlify -from six import binary_type, byte2int, PY2, text_type +from six import binary_type, PY2, text_type + +try: + from six import byte2int +except ImportError: + # bytes2int added in six-1.4.0 + if PY2: + def byte2int(bs): + return ord(bs[0]) + else: + import operator + byte2int = operator.itemgetter(0) + from ansible import constants as C from ansible.utils.unicode import to_unicode, to_bytes From c3caff5eebac3a9ccdbc242367d22d9372e77c5f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Jun 2015 10:24:35 -0700 Subject: [PATCH 0758/3617] Fix for six version 1.1.0 (rhel6). --- lib/ansible/parsing/vault/__init__.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 6c2b7c9c62d227..4cd7d2e80bbb6b 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -36,18 +36,18 @@ from hashlib import md5 from binascii import hexlify from binascii import unhexlify -from six import binary_type, PY2, text_type +from six import binary_type, PY3, text_type try: from six import byte2int except ImportError: # bytes2int added in six-1.4.0 - if PY2: - def byte2int(bs): - return ord(bs[0]) - else: + if PY3: import operator byte2int = operator.itemgetter(0) + else: + def byte2int(bs): + return ord(bs[0]) from ansible import constants as C from ansible.utils.unicode import to_unicode, to_bytes @@ -463,10 +463,10 @@ def decrypt(self, data, password, key_length=32): while not finished: chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs)) if len(next_chunk) == 0: - if PY2: - padding_length = ord(chunk[-1]) - else: + if PY3: padding_length = chunk[-1] + else: + padding_length = ord(chunk[-1]) chunk = chunk[:-padding_length] finished = True @@ -608,8 +608,8 @@ def is_equal(self, a, b): result = 0 for x, y in zip(a, b): - if PY2: - result |= ord(x) ^ ord(y) - else: + if PY3: result |= x ^ y + else: + result |= ord(x) ^ ord(y) return result == 0 From 1c8527044bd1fff05c2a716ede98b7a49ec93d93 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Jun 2015 11:26:53 -0700 Subject: [PATCH 0759/3617] Fix error handling when pasing output from dynamic inventory --- lib/ansible/inventory/script.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index 9675d70f690910..be97f5454c2fc1 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -23,6 +23,8 @@ import subprocess import sys +from collections import Mapping + from ansible import constants as C from ansible.errors import * from ansible.inventory.host import Host @@ -62,7 +64,16 @@ def _parse(self, err): all_hosts = {} # not passing from_remote because data from CMDB is trusted - self.raw = self._loader.load(self.data) + try: + self.raw = self._loader.load(self.data) + except Exception as e: + sys.stderr.write(err + "\n") + raise AnsibleError("failed to parse executable inventory script results: %s" % str(e)) + + if not isinstance(self.raw, Mapping): + sys.stderr.write(err + "\n") + raise AnsibleError("failed to parse executable inventory script results: data needs to be formatted as a json dict" ) + self.raw = json_dict_bytes_to_unicode(self.raw) all = Group('all') @@ -70,10 +81,6 @@ def _parse(self, err): group = None - if 'failed' in self.raw: - sys.stderr.write(err + "\n") - raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw) - for (group_name, data) in self.raw.items(): # in Ansible 1.3 and later, a "_meta" subelement may contain From 96836412aa2257a45730e6e133bc479040eb7d71 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Jun 2015 11:51:05 -0700 Subject: [PATCH 0760/3617] Make error messages tell which inventory script the error came from --- lib/ansible/inventory/script.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py index be97f5454c2fc1..91549d78fb2665 100644 --- a/lib/ansible/inventory/script.py +++ b/lib/ansible/inventory/script.py @@ -68,11 +68,11 @@ def _parse(self, err): self.raw = self._loader.load(self.data) except Exception as e: sys.stderr.write(err + "\n") - raise AnsibleError("failed to parse executable inventory script results: %s" % str(e)) + raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e))) if not isinstance(self.raw, Mapping): sys.stderr.write(err + "\n") - raise AnsibleError("failed to parse executable inventory script results: data needs to be formatted as a json dict" ) + raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename)) self.raw = json_dict_bytes_to_unicode(self.raw) From 9856a8f674a4590fd461eba938ff3cb8eb872994 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 3 Jun 2015 14:56:01 -0400 Subject: [PATCH 0761/3617] added missing imports to doc module --- lib/ansible/utils/module_docs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 9a7ee0ae33bd78..e296c0c6986238 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -23,7 +23,7 @@ import yaml import traceback -from collections import MutableMapping +from collections import MutableMapping, MutableSet, MutableSequence from ansible.plugins import fragment_loader # modules that are ok that they do not have documentation strings From 94fa5e879484b988036a2e12c0a3bf1b3e7a351e Mon Sep 17 00:00:00 2001 From: Etienne CARRIERE Date: Wed, 3 Jun 2015 21:19:11 +0200 Subject: [PATCH 0762/3617] Simplify Fully Qualified function --- lib/ansible/module_utils/f5.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py index d072c759e2a64a..097a6370afe8ab 100644 --- a/lib/ansible/module_utils/f5.py +++ b/lib/ansible/module_utils/f5.py @@ -64,12 +64,9 @@ def disable_ssl_cert_validation(): # Fully Qualified name (with the partition) def fq_name(partition,name): - if name is None: - return None - if name[0] is '/': - return name - else: + if name is not None and not name.startswith('/'): return '/%s/%s' % (partition,name) + return name # Fully Qualified name (with partition) for a list def fq_list_names(partition,list_names): From c89f98168d0ba87c54bbc978928cb2d4f54afef2 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 3 Jun 2015 14:53:19 -0500 Subject: [PATCH 0763/3617] Add the hacking directory to v1 --- v1/hacking/README.md | 48 ++++ v1/hacking/authors.sh | 14 ++ v1/hacking/env-setup | 78 ++++++ v1/hacking/env-setup.fish | 67 +++++ v1/hacking/get_library.py | 29 +++ v1/hacking/module_formatter.py | 447 +++++++++++++++++++++++++++++++++ v1/hacking/templates/rst.j2 | 211 ++++++++++++++++ v1/hacking/test-module | 193 ++++++++++++++ v1/hacking/update.sh | 3 + 9 files changed, 1090 insertions(+) create mode 100644 v1/hacking/README.md create mode 100755 v1/hacking/authors.sh create mode 100644 v1/hacking/env-setup create mode 100644 v1/hacking/env-setup.fish create mode 100755 v1/hacking/get_library.py create mode 100755 v1/hacking/module_formatter.py create mode 100644 v1/hacking/templates/rst.j2 create mode 100755 v1/hacking/test-module create mode 100755 v1/hacking/update.sh diff --git a/v1/hacking/README.md b/v1/hacking/README.md new file mode 100644 index 00000000000000..ae8db7e3a9b952 --- /dev/null +++ b/v1/hacking/README.md @@ -0,0 +1,48 @@ +'Hacking' directory tools +========================= + +Env-setup +--------- + +The 'env-setup' script modifies your environment to allow you to run +ansible from a git checkout using python 2.6+. (You may not use +python 3 at this time). + +First, set up your environment to run from the checkout: + + $ source ./hacking/env-setup + +You will need some basic prerequisites installed. If you do not already have them +and do not wish to install them from your operating system package manager, you +can install them from pip + + $ easy_install pip # if pip is not already available + $ pip install pyyaml jinja2 nose passlib pycrypto + +From there, follow ansible instructions on docs.ansible.com as normal. + +Test-module +----------- + +'test-module' is a simple program that allows module developers (or testers) to run +a module outside of the ansible program, locally, on the current machine. + +Example: + + $ ./hacking/test-module -m lib/ansible/modules/core/commands/shell -a "echo hi" + +This is a good way to insert a breakpoint into a module, for instance. + +Module-formatter +---------------- + +The module formatter is a script used to generate manpages and online +module documentation. This is used by the system makefiles and rarely +needs to be run directly. + +Authors +------- +'authors' is a simple script that generates a list of everyone who has +contributed code to the ansible repository. + + diff --git a/v1/hacking/authors.sh b/v1/hacking/authors.sh new file mode 100755 index 00000000000000..7c97840b2fbc83 --- /dev/null +++ b/v1/hacking/authors.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# script from http://stackoverflow.com/questions/12133583 +set -e + +# Get a list of authors ordered by number of commits +# and remove the commit count column +AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- | sort -f) +if [ -z "$AUTHORS" ] ; then + echo "Authors list was empty" + exit 1 +fi + +# Display the authors list and write it to the file +echo "$AUTHORS" | tee "$(git rev-parse --show-toplevel)/AUTHORS.TXT" diff --git a/v1/hacking/env-setup b/v1/hacking/env-setup new file mode 100644 index 00000000000000..29f4828410a736 --- /dev/null +++ b/v1/hacking/env-setup @@ -0,0 +1,78 @@ +# usage: source hacking/env-setup [-q] +# modifies environment for running Ansible from checkout + +# Default values for shell variables we use +PYTHONPATH=${PYTHONPATH-""} +PATH=${PATH-""} +MANPATH=${MANPATH-""} +verbosity=${1-info} # Defaults to `info' if unspecified + +if [ "$verbosity" = -q ]; then + verbosity=silent +fi + +# When run using source as directed, $0 gets set to bash, so we must use $BASH_SOURCE +if [ -n "$BASH_SOURCE" ] ; then + HACKING_DIR=$(dirname "$BASH_SOURCE") +elif [ $(basename -- "$0") = "env-setup" ]; then + HACKING_DIR=$(dirname "$0") +# Works with ksh93 but not pdksh +elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then + HACKING_DIR=$(dirname "${.sh.file}") +else + HACKING_DIR="$PWD/hacking" +fi +# The below is an alternative to readlink -fn which doesn't exist on OS X +# Source: http://stackoverflow.com/a/1678636 +FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))") +ANSIBLE_HOME=$(dirname "$FULL_PATH") + +PREFIX_PYTHONPATH="$ANSIBLE_HOME" +PREFIX_PATH="$ANSIBLE_HOME/bin" +PREFIX_MANPATH="$ANSIBLE_HOME/docs/man" + +expr "$PYTHONPATH" : "${PREFIX_PYTHONPATH}.*" > /dev/null || export PYTHONPATH="$PREFIX_PYTHONPATH:$PYTHONPATH" +expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || export PATH="$PREFIX_PATH:$PATH" +expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_MANPATH:$MANPATH" + +# +# Generate egg_info so that pkg_resources works +# + +# Do the work in a function so we don't repeat ourselves later +gen_egg_info() +{ + if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then + rm -r "$PREFIX_PYTHONPATH/ansible.egg-info" + fi + python setup.py egg_info +} + +if [ "$ANSIBLE_HOME" != "$PWD" ] ; then + current_dir="$PWD" +else + current_dir="$ANSIBLE_HOME" +fi +cd "$ANSIBLE_HOME" +if [ "$verbosity" = silent ] ; then + gen_egg_info > /dev/null 2>&1 +else + gen_egg_info +fi +cd "$current_dir" + +if [ "$verbosity" != silent ] ; then + cat <<- EOF + + Setting up Ansible to run out of checkout... + + PATH=$PATH + PYTHONPATH=$PYTHONPATH + MANPATH=$MANPATH + + Remember, you may wish to specify your host file with -i + + Done! + + EOF +fi diff --git a/v1/hacking/env-setup.fish b/v1/hacking/env-setup.fish new file mode 100644 index 00000000000000..9deffb4e3d911a --- /dev/null +++ b/v1/hacking/env-setup.fish @@ -0,0 +1,67 @@ +#!/usr/bin/env fish +# usage: . ./hacking/env-setup [-q] +# modifies environment for running Ansible from checkout +set HACKING_DIR (dirname (status -f)) +set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))") +set ANSIBLE_HOME (dirname $FULL_PATH) +set PREFIX_PYTHONPATH $ANSIBLE_HOME/ +set PREFIX_PATH $ANSIBLE_HOME/bin +set PREFIX_MANPATH $ANSIBLE_HOME/docs/man + +# Set PYTHONPATH +if not set -q PYTHONPATH + set -gx PYTHONPATH $PREFIX_PYTHONPATH +else + switch PYTHONPATH + case "$PREFIX_PYTHONPATH*" + case "*" + echo "Appending PYTHONPATH" + set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH" + end +end + +# Set PATH +if not contains $PREFIX_PATH $PATH + set -gx PATH $PREFIX_PATH $PATH +end + +# Set MANPATH +if not contains $PREFIX_MANPATH $MANPATH + if not set -q MANPATH + set -gx MANPATH $PREFIX_MANPATH + else + set -gx MANPATH $PREFIX_MANPATH $MANPATH + end +end + +set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library + +# Generate egg_info so that pkg_resources works +pushd $ANSIBLE_HOME +python setup.py egg_info +if test -e $PREFIX_PYTHONPATH/ansible*.egg-info + rm -r $PREFIX_PYTHONPATH/ansible*.egg-info +end +mv ansible*egg-info $PREFIX_PYTHONPATH +popd + + +if set -q argv + switch $argv + case '-q' '--quiet' + case '*' + echo "" + echo "Setting up Ansible to run out of checkout..." + echo "" + echo "PATH=$PATH" + echo "PYTHONPATH=$PYTHONPATH" + echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY" + echo "MANPATH=$MANPATH" + echo "" + + echo "Remember, you may wish to specify your host file with -i" + echo "" + echo "Done!" + echo "" + end +end diff --git a/v1/hacking/get_library.py b/v1/hacking/get_library.py new file mode 100755 index 00000000000000..571183b688c490 --- /dev/null +++ b/v1/hacking/get_library.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +# (c) 2014, Will Thames +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import ansible.constants as C +import sys + +def main(): + print C.DEFAULT_MODULE_PATH + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/v1/hacking/module_formatter.py b/v1/hacking/module_formatter.py new file mode 100755 index 00000000000000..acddd700930098 --- /dev/null +++ b/v1/hacking/module_formatter.py @@ -0,0 +1,447 @@ +#!/usr/bin/env python +# (c) 2012, Jan-Piet Mens +# (c) 2012-2014, Michael DeHaan and others +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +import os +import glob +import sys +import yaml +import codecs +import json +import ast +import re +import optparse +import time +import datetime +import subprocess +import cgi +from jinja2 import Environment, FileSystemLoader + +from ansible.utils import module_docs +from ansible.utils.vars import merge_hash + +##################################################################################### +# constants and paths + +# if a module is added in a version of Ansible older than this, don't print the version added information +# in the module documentation because everyone is assumed to be running something newer than this already. +TO_OLD_TO_BE_NOTABLE = 1.0 + +# Get parent directory of the directory this script lives in +MODULEDIR=os.path.abspath(os.path.join( + os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules' +)) + +# The name of the DOCUMENTATION template +EXAMPLE_YAML=os.path.abspath(os.path.join( + os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml' +)) + +_ITALIC = re.compile(r"I\(([^)]+)\)") +_BOLD = re.compile(r"B\(([^)]+)\)") +_MODULE = re.compile(r"M\(([^)]+)\)") +_URL = re.compile(r"U\(([^)]+)\)") +_CONST = re.compile(r"C\(([^)]+)\)") + +DEPRECATED = " (D)" +NOTCORE = " (E)" +##################################################################################### + +def rst_ify(text): + ''' convert symbols like I(this is in italics) to valid restructured text ''' + + t = _ITALIC.sub(r'*' + r"\1" + r"*", text) + t = _BOLD.sub(r'**' + r"\1" + r"**", t) + t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t) + t = _URL.sub(r"\1", t) + t = _CONST.sub(r'``' + r"\1" + r"``", t) + + return t + +##################################################################################### + +def html_ify(text): + ''' convert symbols like I(this is in italics) to valid HTML ''' + + t = cgi.escape(text) + t = _ITALIC.sub("" + r"\1" + "", t) + t = _BOLD.sub("" + r"\1" + "", t) + t = _MODULE.sub("" + r"\1" + "", t) + t = _URL.sub("" + r"\1" + "", t) + t = _CONST.sub("" + r"\1" + "", t) + + return t + + +##################################################################################### + +def rst_fmt(text, fmt): + ''' helper for Jinja2 to do format strings ''' + + return fmt % (text) + +##################################################################################### + +def rst_xline(width, char="="): + ''' return a restructured text line of a given length ''' + + return char * width + +##################################################################################### + +def write_data(text, options, outputname, module): + ''' dumps module output to a file or the screen, as requested ''' + + if options.output_dir is not None: + fname = os.path.join(options.output_dir, outputname % module) + fname = fname.replace(".py","") + f = open(fname, 'w') + f.write(text.encode('utf-8')) + f.close() + else: + print text + +##################################################################################### + + +def list_modules(module_dir, depth=0): + ''' returns a hash of categories, each category being a hash of module names to file paths ''' + + categories = dict(all=dict(),_aliases=dict()) + if depth <= 3: # limit # of subdirs + + files = glob.glob("%s/*" % module_dir) + for d in files: + + category = os.path.splitext(os.path.basename(d))[0] + if os.path.isdir(d): + + res = list_modules(d, depth + 1) + for key in res.keys(): + if key in categories: + categories[key] = merge_hash(categories[key], res[key]) + res.pop(key, None) + + if depth < 2: + categories.update(res) + else: + category = module_dir.split("/")[-1] + if not category in categories: + categories[category] = res + else: + categories[category].update(res) + else: + module = category + category = os.path.basename(module_dir) + if not d.endswith(".py") or d.endswith('__init__.py'): + # windows powershell modules have documentation stubs in python docstring + # format (they are not executed) so skip the ps1 format files + continue + elif module.startswith("_") and os.path.islink(d): + source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0] + module = module.replace("_","",1) + if not d in categories['_aliases']: + categories['_aliases'][source] = [module] + else: + categories['_aliases'][source].update(module) + continue + + if not category in categories: + categories[category] = {} + categories[category][module] = d + categories['all'][module] = d + + return categories + +##################################################################################### + +def generate_parser(): + ''' generate an optparse parser ''' + + p = optparse.OptionParser( + version='%prog 1.0', + usage='usage: %prog [options] arg1 arg2', + description='Generate module documentation from metadata', + ) + + p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number") + p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path") + p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates") + p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type") + p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose") + p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files") + p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules") + p.add_option('-V', action='version', help='Show version number and exit') + return p + +##################################################################################### + +def jinja2_environment(template_dir, typ): + + env = Environment(loader=FileSystemLoader(template_dir), + variable_start_string="@{", + variable_end_string="}@", + trim_blocks=True, + ) + env.globals['xline'] = rst_xline + + if typ == 'rst': + env.filters['convert_symbols_to_format'] = rst_ify + env.filters['html_ify'] = html_ify + env.filters['fmt'] = rst_fmt + env.filters['xline'] = rst_xline + template = env.get_template('rst.j2') + outputname = "%s_module.rst" + else: + raise Exception("unknown module format type: %s" % typ) + + return env, template, outputname + +##################################################################################### + +def process_module(module, options, env, template, outputname, module_map, aliases): + + fname = module_map[module] + if isinstance(fname, dict): + return "SKIPPED" + + basename = os.path.basename(fname) + deprecated = False + + # ignore files with extensions + if not basename.endswith(".py"): + return + elif module.startswith("_"): + if os.path.islink(fname): + return # ignore, its an alias + deprecated = True + module = module.replace("_","",1) + + print "rendering: %s" % module + + # use ansible core library to parse out doc metadata YAML and plaintext examples + doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose) + + # crash if module is missing documentation and not explicitly hidden from docs index + if doc is None: + if module in module_docs.BLACKLIST_MODULES: + return "SKIPPED" + else: + sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module)) + sys.exit(1) + + if deprecated and 'deprecated' not in doc: + sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module)) + sys.exit(1) + + if "/core/" in fname: + doc['core'] = True + else: + doc['core'] = False + + if module in aliases: + doc['aliases'] = aliases[module] + + all_keys = [] + + if not 'version_added' in doc: + sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module) + sys.exit(1) + + added = 0 + if doc['version_added'] == 'historical': + del doc['version_added'] + else: + added = doc['version_added'] + + # don't show version added information if it's too old to be called out + if added: + added_tokens = str(added).split(".") + added = added_tokens[0] + "." + added_tokens[1] + added_float = float(added) + if added and added_float < TO_OLD_TO_BE_NOTABLE: + del doc['version_added'] + + if 'options' in doc: + for (k,v) in doc['options'].iteritems(): + all_keys.append(k) + + all_keys = sorted(all_keys) + + doc['option_keys'] = all_keys + doc['filename'] = fname + doc['docuri'] = doc['module'].replace('_', '-') + doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') + doc['ansible_version'] = options.ansible_version + doc['plainexamples'] = examples #plain text + if returndocs: + doc['returndocs'] = yaml.safe_load(returndocs) + else: + doc['returndocs'] = None + + # here is where we build the table of contents... + + text = template.render(doc) + write_data(text, options, outputname, module) + return doc['short_description'] + +##################################################################################### + +def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases): + modstring = module + modname = module + if module in deprecated: + modstring = modstring + DEPRECATED + modname = "_" + module + elif module not in core: + modstring = modstring + NOTCORE + + result = process_module(modname, options, env, template, outputname, module_map, aliases) + + if result != "SKIPPED": + category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module)) + +def process_category(category, categories, options, env, template, outputname): + + module_map = categories[category] + + aliases = {} + if '_aliases' in categories: + aliases = categories['_aliases'] + + category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category) + category_file = open(category_file_path, "w") + print "*** recording category %s in %s ***" % (category, category_file_path) + + # TODO: start a new category file + + category = category.replace("_"," ") + category = category.title() + + modules = [] + deprecated = [] + core = [] + for module in module_map.keys(): + + if isinstance(module_map[module], dict): + for mod in module_map[module].keys(): + if mod.startswith("_"): + mod = mod.replace("_","",1) + deprecated.append(mod) + elif '/core/' in module_map[module][mod]: + core.append(mod) + else: + if module.startswith("_"): + module = module.replace("_","",1) + deprecated.append(module) + elif '/core/' in module_map[module]: + core.append(module) + + modules.append(module) + + modules.sort() + + category_header = "%s Modules" % (category.title()) + underscores = "`" * len(category_header) + + category_file.write("""\ +%s +%s + +.. toctree:: :maxdepth: 1 + +""" % (category_header, underscores)) + sections = [] + for module in modules: + if module in module_map and isinstance(module_map[module], dict): + sections.append(module) + continue + else: + print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases) + + sections.sort() + for section in sections: + category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section))) + category_file.write(".. toctree:: :maxdepth: 1\n\n") + + section_modules = module_map[section].keys() + section_modules.sort() + #for module in module_map[section]: + for module in section_modules: + print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases) + + category_file.write("""\n\n +.. note:: + - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale. + - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules. + - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub `_, extras tickets to `ansible/ansible-modules-extras on GitHub `_ +""" % (DEPRECATED, NOTCORE)) + category_file.close() + + # TODO: end a new category file + +##################################################################################### + +def validate_options(options): + ''' validate option parser options ''' + + if not options.module_dir: + print >>sys.stderr, "--module-dir is required" + sys.exit(1) + if not os.path.exists(options.module_dir): + print >>sys.stderr, "--module-dir does not exist: %s" % options.module_dir + sys.exit(1) + if not options.template_dir: + print "--template-dir must be specified" + sys.exit(1) + +##################################################################################### + +def main(): + + p = generate_parser() + + (options, args) = p.parse_args() + validate_options(options) + + env, template, outputname = jinja2_environment(options.template_dir, options.type) + + categories = list_modules(options.module_dir) + last_category = None + category_names = categories.keys() + category_names.sort() + + category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") + category_list_file = open(category_list_path, "w") + category_list_file.write("Module Index\n") + category_list_file.write("============\n") + category_list_file.write("\n\n") + category_list_file.write(".. toctree::\n") + category_list_file.write(" :maxdepth: 1\n\n") + + for category in category_names: + if category.startswith("_"): + continue + category_list_file.write(" list_of_%s_modules\n" % category) + process_category(category, categories, options, env, template, outputname) + + category_list_file.close() + +if __name__ == '__main__': + main() diff --git a/v1/hacking/templates/rst.j2 b/v1/hacking/templates/rst.j2 new file mode 100644 index 00000000000000..f6f38e59101168 --- /dev/null +++ b/v1/hacking/templates/rst.j2 @@ -0,0 +1,211 @@ +.. _@{ module }@: + +{% if short_description %} +{% set title = module + ' - ' + short_description|convert_symbols_to_format %} +{% else %} +{% set title = module %} +{% endif %} +{% set title_len = title|length %} + +@{ title }@ +@{ '+' * title_len }@ + +.. contents:: + :local: + :depth: 1 + +{# ------------------------------------------ + # + # Please note: this looks like a core dump + # but it isn't one. + # + --------------------------------------------#} + +{% if aliases is defined -%} +Aliases: @{ ','.join(aliases) }@ +{% endif %} + +{% if deprecated is defined -%} +DEPRECATED +---------- + +@{ deprecated }@ +{% endif %} + +Synopsis +-------- + +{% if version_added is defined -%} +.. versionadded:: @{ version_added }@ +{% endif %} + +{% for desc in description -%} +@{ desc | convert_symbols_to_format }@ +{% endfor %} + +{% if options -%} +Options +------- + +.. raw:: html + +
namedespcriptiondescription returned type sample
+ + + + + + + + {% for k in option_keys %} + {% set v = options[k] %} + + + + + {% if v.get('type', 'not_bool') == 'bool' %} + + {% else %} + + {% endif %} + + + {% endfor %} +
parameterrequireddefaultchoicescomments
@{ k }@{% if v.get('required', False) %}yes{% else %}no{% endif %}{% if v['default'] %}@{ v['default'] }@{% endif %}
  • yes
  • no
    {% for choice in v.get('choices',[]) -%}
  • @{ choice }@
  • {% endfor -%}
{% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%}{% if v['version_added'] %} (added in Ansible @{v['version_added']}@){% endif %}
+{% endif %} + +{% if requirements %} +{% for req in requirements %} + +.. note:: Requires @{ req | convert_symbols_to_format }@ + +{% endfor %} +{% endif %} + +{% if examples or plainexamples %} +Examples +-------- + +.. raw:: html + +{% for example in examples %} + {% if example['description'] %}

@{ example['description'] | html_ify }@

{% endif %} +

+

+@{ example['code'] | escape | indent(4, True) }@
+    
+

+{% endfor %} +
+ +{% if plainexamples %} + +:: + +@{ plainexamples | indent(4, True) }@ +{% endif %} +{% endif %} + + +{% if returndocs %} +Return Values +------------- + +Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module: + +.. raw:: html + + + + + + + + + + + {% for entry in returndocs %} + + + + + + + + {% if returndocs[entry].type == 'dictionary' %} + + + + {% endif %} + {% endfor %} + +
namedescriptionreturnedtypesample
@{ entry }@ @{ returndocs[entry].description }@ @{ returndocs[entry].returned }@ @{ returndocs[entry].type }@ @{ returndocs[entry].sample}@
contains: + + + + + + + + + + {% for sub in returndocs[entry].contains %} + + + + + + + + {% endfor %} + +
namedescriptionreturnedtypesample
@{ sub }@ @{ returndocs[entry].contains[sub].description }@ @{ returndocs[entry].contains[sub].returned }@ @{ returndocs[entry].contains[sub].type }@ @{ returndocs[entry].contains[sub].sample}@
+
+

+{% endif %} + +{% if notes %} +{% for note in notes %} +.. note:: @{ note | convert_symbols_to_format }@ +{% endfor %} +{% endif %} + + +{% if not deprecated %} + {% if core %} + +This is a Core Module +--------------------- + +The source of this module is hosted on GitHub in the `ansible-modules-core `_ repo. + +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. + +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. + +Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. + +This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos. + + {% else %} + +This is an Extras Module +------------------------ + +This source of this module is hosted on GitHub in the `ansible-modules-extras `_ repo. + +If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras `_ to see if a bug has already been filed. If not, we would be grateful if you would file one. + +Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group `_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group `_. + +Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree. + +Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests. +Popular "extras" modules may be promoted to core modules over time. + + {% endif %} +{% endif %} + +For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`. + + diff --git a/v1/hacking/test-module b/v1/hacking/test-module new file mode 100755 index 00000000000000..c226f32e889906 --- /dev/null +++ b/v1/hacking/test-module @@ -0,0 +1,193 @@ +#!/usr/bin/env python + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +# this script is for testing modules without running through the +# entire guts of ansible, and is very helpful for when developing +# modules +# +# example: +# test-module -m ../library/commands/command -a "/bin/sleep 3" +# test-module -m ../library/system/service -a "name=httpd ensure=restarted" +# test-module -m ../library/system/service -a "name=httpd ensure=restarted" --debugger /usr/bin/pdb +# test-modulr -m ../library/file/lineinfile -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check + +import sys +import base64 +import os +import subprocess +import traceback +import optparse +import ansible.utils as utils +import ansible.module_common as module_common +import ansible.constants as C + +try: + import json +except ImportError: + import simplejson as json + +def parse(): + """parse command line + + :return : (options, args)""" + parser = optparse.OptionParser() + + parser.usage = "%prog -[options] (-h for help)" + + parser.add_option('-m', '--module-path', dest='module_path', + help="REQUIRED: full path of module source to execute") + parser.add_option('-a', '--args', dest='module_args', default="", + help="module argument string") + parser.add_option('-D', '--debugger', dest='debugger', + help="path to python debugger (e.g. /usr/bin/pdb)") + parser.add_option('-I', '--interpreter', dest='interpreter', + help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)", + metavar='INTERPRETER_TYPE=INTERPRETER_PATH') + parser.add_option('-c', '--check', dest='check', action='store_true', + help="run the module in check mode") + options, args = parser.parse_args() + if not options.module_path: + parser.print_help() + sys.exit(1) + else: + return options, args + +def write_argsfile(argstring, json=False): + """ Write args to a file for old-style module's use. """ + argspath = os.path.expanduser("~/.ansible_test_module_arguments") + argsfile = open(argspath, 'w') + if json: + args = utils.parse_kv(argstring) + argstring = utils.jsonify(args) + argsfile.write(argstring) + argsfile.close() + return argspath + +def boilerplate_module(modfile, args, interpreter, check): + """ simulate what ansible does with new style modules """ + + #module_fh = open(modfile) + #module_data = module_fh.read() + #module_fh.close() + + replacer = module_common.ModuleReplacer() + + #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1 + + complex_args = {} + if args.startswith("@"): + # Argument is a YAML file (JSON is a subset of YAML) + complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:])) + args='' + elif args.startswith("{"): + # Argument is a YAML document (not a file) + complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args)) + args='' + + inject = {} + if interpreter: + if '=' not in interpreter: + print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python' + sys.exit(1) + interpreter_type, interpreter_path = interpreter.split('=') + if not interpreter_type.startswith('ansible_'): + interpreter_type = 'ansible_%s' % interpreter_type + if not interpreter_type.endswith('_interpreter'): + interpreter_type = '%s_interpreter' % interpreter_type + inject[interpreter_type] = interpreter_path + + if check: + complex_args['CHECKMODE'] = True + + (module_data, module_style, shebang) = replacer.modify_module( + modfile, + complex_args, + args, + inject + ) + + modfile2_path = os.path.expanduser("~/.ansible_module_generated") + print "* including generated source, if any, saving to: %s" % modfile2_path + print "* this may offset any line numbers in tracebacks/debuggers!" + modfile2 = open(modfile2_path, 'w') + modfile2.write(module_data) + modfile2.close() + modfile = modfile2_path + + return (modfile2_path, module_style) + +def runtest( modfile, argspath): + """Test run a module, piping it's output for reporting.""" + + os.system("chmod +x %s" % modfile) + + invoke = "%s" % (modfile) + if argspath is not None: + invoke = "%s %s" % (modfile, argspath) + + cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + + try: + print "***********************************" + print "RAW OUTPUT" + print out + print err + results = utils.parse_json(out) + except: + print "***********************************" + print "INVALID OUTPUT FORMAT" + print out + traceback.print_exc() + sys.exit(1) + + print "***********************************" + print "PARSED OUTPUT" + print utils.jsonify(results,format=True) + +def rundebug(debugger, modfile, argspath): + """Run interactively with console debugger.""" + + if argspath is not None: + subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True) + else: + subprocess.call("%s %s" % (debugger, modfile), shell=True) + +def main(): + + options, args = parse() + (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check) + + argspath=None + if module_style != 'new': + if module_style == 'non_native_want_json': + argspath = write_argsfile(options.module_args, json=True) + elif module_style == 'old': + argspath = write_argsfile(options.module_args, json=False) + else: + raise Exception("internal error, unexpected module style: %s" % module_style) + if options.debugger: + rundebug(options.debugger, modfile, argspath) + else: + runtest(modfile, argspath) + +if __name__ == "__main__": + main() + diff --git a/v1/hacking/update.sh b/v1/hacking/update.sh new file mode 100755 index 00000000000000..5979dd0ab2bf71 --- /dev/null +++ b/v1/hacking/update.sh @@ -0,0 +1,3 @@ +#!/bin/sh +git pull --rebase +git submodule update --init --recursive From 7dd3ef7b60b09fb5c4a9ada0e96be87c5edd59ae Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Jun 2015 13:27:31 -0700 Subject: [PATCH 0764/3617] Older python-six from early RHEL and ubuntu do not have add_metaclass but do have with_metaclass --- lib/ansible/plugins/cache/base.py | 5 ++--- lib/ansible/plugins/connections/__init__.py | 5 ++--- lib/ansible/plugins/inventory/__init__.py | 5 ++--- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/lib/ansible/plugins/cache/base.py b/lib/ansible/plugins/cache/base.py index 767964b281cbd4..e903c935e496c6 100644 --- a/lib/ansible/plugins/cache/base.py +++ b/lib/ansible/plugins/cache/base.py @@ -20,11 +20,10 @@ from abc import ABCMeta, abstractmethod -from six import add_metaclass +from six import with_metaclass -@add_metaclass(ABCMeta) -class BaseCacheModule: +class BaseCacheModule(with_metaclass(ABCMeta, object)): @abstractmethod def get(self, key): diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 70807b08f616b3..897bc58982bfef 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -22,7 +22,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty -from six import add_metaclass +from six import with_metaclass from ansible import constants as C from ansible.errors import AnsibleError @@ -34,8 +34,7 @@ __all__ = ['ConnectionBase'] -@add_metaclass(ABCMeta) -class ConnectionBase: +class ConnectionBase(with_metaclass(ABCMeta, object)): ''' A base class for connections to contain common code. ''' diff --git a/lib/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py index 03fd89429b4dd4..74dbccc1bbc072 100644 --- a/lib/ansible/plugins/inventory/__init__.py +++ b/lib/ansible/plugins/inventory/__init__.py @@ -23,10 +23,9 @@ from abc import ABCMeta, abstractmethod -from six import add_metaclass +from six import with_metaclass -@add_metaclass(ABCMeta) -class InventoryParser: +class InventoryParser(with_metaclass(ABCMeta, object)): '''Abstract Base Class for retrieving inventory information Any InventoryParser functions by taking an inven_source. The caller then From 337b1dc45c3bc101e13357bf3a4e21dd62546b14 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 3 Jun 2015 20:55:55 -0400 Subject: [PATCH 0765/3617] minor doc fixes --- docsite/rst/intro_configuration.rst | 4 ++-- docsite/rst/playbooks_filters.rst | 1 + docsite/rst/playbooks_special_topics.rst | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 2ff53c22485d11..ca5d581779660c 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -309,7 +309,7 @@ The valid values are either 'replace' (the default) or 'merge'. hostfile ======== -This is a deprecated setting since 1.9, please look at :ref:`inventory` for the new setting. +This is a deprecated setting since 1.9, please look at :ref:`inventory_file` for the new setting. .. _host_key_checking: @@ -321,7 +321,7 @@ implications and wish to disable it, you may do so here by setting the value to host_key_checking=True -.. _inventory: +.. _inventory_file: inventory ========= diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst index ef6185f9514859..0cb42213b44665 100644 --- a/docsite/rst/playbooks_filters.rst +++ b/docsite/rst/playbooks_filters.rst @@ -3,6 +3,7 @@ Jinja2 filters .. contents:: Topics + Filters in Jinja2 are a way of transforming template expressions from one kind of data into another. Jinja2 ships with many of these. See `builtin filters`_ in the official Jinja2 template documentation. diff --git a/docsite/rst/playbooks_special_topics.rst b/docsite/rst/playbooks_special_topics.rst index c57f5796c9689c..74974cad108d50 100644 --- a/docsite/rst/playbooks_special_topics.rst +++ b/docsite/rst/playbooks_special_topics.rst @@ -7,6 +7,7 @@ and adopt these only if they seem relevant or useful to your environment. .. toctree:: :maxdepth: 1 + become playbooks_acceleration playbooks_async playbooks_checkmode From 0826106441d15820d086c1c9eaf6242aa80e4406 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 3 Jun 2015 22:19:26 -0400 Subject: [PATCH 0766/3617] minor docs reformat - clearer 'version added' for module options, now it sits under the option name - made notes a section, so it now appears in toc - moved requirements and made it a list, more prominent and more readable --- hacking/templates/rst.j2 | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index f6f38e59101168..a30e16e41f1527 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -43,6 +43,17 @@ Synopsis @{ desc | convert_symbols_to_format }@ {% endfor %} + +{% if requirements %} +Requirements +------------ + +{% for req in requirements %} + * @{ req | convert_symbols_to_format }@ +{% endfor %} +{% endif %} + + {% if options -%} Options ------- @@ -60,7 +71,7 @@ Options {% for k in option_keys %} {% set v = options[k] %} - @{ k }@ + @{ k }@
{% if v['version_added'] %} (added in @{v['version_added']}@){% endif %}
{% if v.get('required', False) %}yes{% else %}no{% endif %} {% if v['default'] %}@{ v['default'] }@{% endif %} {% if v.get('type', 'not_bool') == 'bool' %} @@ -68,21 +79,16 @@ Options {% else %}
    {% for choice in v.get('choices',[]) -%}
  • @{ choice }@
  • {% endfor -%}
{% endif %} - {% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%}{% if v['version_added'] %} (added in Ansible @{v['version_added']}@){% endif %} + {% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%} {% endfor %} + {% endif %} -{% if requirements %} -{% for req in requirements %} -.. note:: Requires @{ req | convert_symbols_to_format }@ -{% endfor %} -{% endif %} - -{% if examples or plainexamples %} +{% if examples or plainexamples -%} Examples -------- @@ -107,7 +113,7 @@ Examples {% endif %} -{% if returndocs %} +{% if returndocs -%} Return Values ------------- @@ -164,7 +170,10 @@ Common return values are documented here :doc:`common_return_values`, the follow

{% endif %} -{% if notes %} +{% if notes -%} +Notes +----- + {% for note in notes %} .. note:: @{ note | convert_symbols_to_format }@ {% endfor %} From efc3d2931edc583f44c1644ab3c1d3afb29c894a Mon Sep 17 00:00:00 2001 From: joshainglis Date: Thu, 4 Jun 2015 17:07:08 +1000 Subject: [PATCH 0767/3617] Fixed typo --- plugins/inventory/ovirt.ini | 34 +++++ plugins/inventory/ovirt.py | 287 ++++++++++++++++++++++++++++++++++++ 2 files changed, 321 insertions(+) create mode 100644 plugins/inventory/ovirt.ini create mode 100755 plugins/inventory/ovirt.py diff --git a/plugins/inventory/ovirt.ini b/plugins/inventory/ovirt.ini new file mode 100644 index 00000000000000..2ea05dc55e33cd --- /dev/null +++ b/plugins/inventory/ovirt.ini @@ -0,0 +1,34 @@ +#!/usr/bin/python +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# Author: Josha Inglis based on the gce.ini by Eric Johnson + +[ovirt] +# ovirt Service Account configuration information can be stored in the +# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already +# exist in your PYTHONPATH and be picked up automatically with an import +# statement in the inventory script. However, you can specify an absolute +# path to the secrets.py file with 'libcloud_secrets' parameter. +ovirt_api_secrets = + +# If you are not going to use a 'secrets.py' file, you can set the necessary +# authorization parameters here. +ovirt_url = +ovirt_username = +ovirt_password = diff --git a/plugins/inventory/ovirt.py b/plugins/inventory/ovirt.py new file mode 100755 index 00000000000000..6ce28bc2f32e4e --- /dev/null +++ b/plugins/inventory/ovirt.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python +# Copyright 2015 IIX Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +ovirt external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests to +oVirt via the ovirt-engine-sdk-python library. Full install/configuration +instructions for the ovirt* modules can be found in the comments of +ansible/test/ovirt_tests.py. + +When run against a specific host, this script returns the following variables +based on the data obtained from the ovirt_sdk Node object: + - ovirt_uuid + - ovirt_id + - ovirt_image + - ovirt_machine_type + - ovirt_ips + - ovirt_name + - ovirt_description + - ovirt_status + - ovirt_zone + - ovirt_tags + - ovirt_stats + +When run in --list mode, instances are grouped by the following categories: + + - zone: + zone group name. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - running status: + group name prefixed with 'status_' (e.g. status_up, status_down,..) + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" + + Use the ovirt inventory script to print out instance specific information + $ plugins/inventory/ovirt.py --host my_instance + +Author: Josha Inglis based on the gce.py by Eric Johnson +Version: 0.0.1 +""" + +USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" +USER_AGENT_VERSION = "v1" + +import sys +import os +import argparse +import ConfigParser +from collections import defaultdict + +try: + import json +except ImportError: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import simplejson as json + +try: + # noinspection PyUnresolvedReferences + from ovirtsdk.api import API + # noinspection PyUnresolvedReferences + from ovirtsdk.xml import params +except ImportError: + print("ovirt inventory script requires ovirt-engine-sdk-python") + sys.exit(1) + + +class OVirtInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.args = self.parse_cli_args() + self.driver = self.get_ovirt_driver() + + # Just display data for specific host + if self.args.host: + print self.json_format_dict( + self.node_to_dict(self.get_instance(self.args.host)), + pretty=self.args.pretty + ) + sys.exit(0) + + # Otherwise, assume user wants all instances grouped + print( + self.json_format_dict( + data=self.group_instances(), + pretty=self.args.pretty + ) + ) + sys.exit(0) + + @staticmethod + def get_ovirt_driver(): + """ + Determine the ovirt authorization settings and return a ovirt_sdk driver. + + :rtype : ovirtsdk.api.API + """ + kwargs = {} + + ovirt_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") + ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'ovirt_url': '', + 'ovirt_username': '', + 'ovirt_password': '', + 'ovirt_api_secrets': '', + }) + if 'ovirt' not in config.sections(): + config.add_section('ovirt') + config.read(ovirt_ini_path) + + # Attempt to get ovirt params from a configuration file, if one + # exists. + secrets_path = config.get('ovirt', 'ovirt_api_secrets') + secrets_found = False + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + secrets_found = True + except ImportError: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" + print(err) + sys.exit(1) + sys.path.append(os.path.dirname(secrets_path)) + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + except ImportError: + pass + if not secrets_found: + kwargs = { + 'url': config.get('ovirt', 'ovirt_url'), + 'username': config.get('ovirt', 'ovirt_username'), + 'password': config.get('ovirt', 'ovirt_password'), + } + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + kwargs['url'] = os.environ.get('OVIRT_URL') + kwargs['username'] = os.environ.get('OVIRT_EMAIL') + kwargs['password'] = os.environ.get('OVIRT_PASS') + + # Retrieve and return the ovirt driver. + return API(insecure=True, **kwargs) + + @staticmethod + def parse_cli_args(): + """ + Command line argument processing + + :rtype : argparse.Namespace + """ + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') + return parser.parse_args() + + def node_to_dict(self, inst): + """ + :type inst: params.VM + """ + if inst is None: + return {} + + inst.get_custom_properties() + ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ + if inst.get_guest_info() is not None else [] + stats = {y.get_name(): y.get_values().get_value()[0].get_datum() for y in inst.get_statistics().list()} + + return { + 'ovirt_uuid': inst.get_id(), + 'ovirt_id': inst.get_id(), + 'ovirt_image': inst.get_os().get_type(), + 'ovirt_machine_type': inst.get_instance_type(), + 'ovirt_ips': ips, + 'ovirt_name': inst.get_name(), + 'ovirt_description': inst.get_description(), + 'ovirt_status': inst.get_status().get_state(), + 'ovirt_zone': inst.get_cluster().get_id(), + 'ovirt_tags': self.get_tags(inst), + 'ovirt_stats': stats, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': ips[0] if len(ips) > 0 else None + } + + @staticmethod + def get_tags(inst): + """ + :type inst: params.VM + """ + return [x.get_name() for x in inst.get_tags().list()] + + # noinspection PyBroadException,PyUnusedLocal + def get_instance(self, instance_name): + """Gets details about a specific instance """ + try: + return self.driver.vms.get(name=instance_name) + except Exception as e: + return None + + def group_instances(self): + """Group all instances""" + groups = defaultdict(list) + meta = {"hostvars": {}} + + for node in self.driver.vms.list(): + assert isinstance(node, params.VM) + name = node.get_name() + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.get_cluster().get_name() + groups[zone].append(name) + + tags = self.get_tags(node) + for t in tags: + tag = 'tag_%s' % t + groups[tag].append(name) + + nets = [x.get_name() for x in node.get_nics().list()] + for net in nets: + net = 'network_%s' % net + groups[net].append(name) + + status = node.get_status().get_state() + stat = 'status_%s' % status.lower() + if stat in groups: + groups[stat].append(name) + else: + groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + @staticmethod + def json_format_dict(data, pretty=False): + """ Converts a dict to a JSON object and dumps it as a formatted + string """ + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + +# Run the script +OVirtInventory() From 76923915685be979a265efd291c4504f120406eb Mon Sep 17 00:00:00 2001 From: joshainglis Date: Thu, 4 Jun 2015 17:35:10 +1000 Subject: [PATCH 0768/3617] Removed some text --- plugins/inventory/ovirt.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/inventory/ovirt.py b/plugins/inventory/ovirt.py index 6ce28bc2f32e4e..bccd83de8611b0 100755 --- a/plugins/inventory/ovirt.py +++ b/plugins/inventory/ovirt.py @@ -21,9 +21,7 @@ ================================= Generates inventory that Ansible can understand by making API requests to -oVirt via the ovirt-engine-sdk-python library. Full install/configuration -instructions for the ovirt* modules can be found in the comments of -ansible/test/ovirt_tests.py. +oVirt via the ovirt-engine-sdk-python library. When run against a specific host, this script returns the following variables based on the data obtained from the ovirt_sdk Node object: From 23460e64800d762a831449cbbbaedd2fab16fa6a Mon Sep 17 00:00:00 2001 From: joshainglis Date: Thu, 4 Jun 2015 17:59:53 +1000 Subject: [PATCH 0769/3617] Removed a dictionary comprehension for python 2.6 support --- plugins/inventory/ovirt.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/ovirt.py b/plugins/inventory/ovirt.py index bccd83de8611b0..4cb4b09eaefa2d 100755 --- a/plugins/inventory/ovirt.py +++ b/plugins/inventory/ovirt.py @@ -203,7 +203,9 @@ def node_to_dict(self, inst): inst.get_custom_properties() ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ if inst.get_guest_info() is not None else [] - stats = {y.get_name(): y.get_values().get_value()[0].get_datum() for y in inst.get_statistics().list()} + stats = {} + for stat in inst.get_statistics().list(): + stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() return { 'ovirt_uuid': inst.get_id(), From 6a97e49a06effe5d650fe31a1eae2d98fdddc58e Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 4 Jun 2015 08:15:25 -0500 Subject: [PATCH 0770/3617] Re-introduce ssh connection private key support --- lib/ansible/plugins/connections/ssh.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index b3ada343c0454d..1d79cb4e90cbae 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -95,11 +95,8 @@ def _connect(self): if self._connection_info.port is not None: self._common_args += ("-o", "Port={0}".format(self._connection_info.port)) - # FIXME: need to get this from connection info - #if self.private_key_file is not None: - # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.private_key_file))) - #elif self.runner.private_key_file is not None: - # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.runner.private_key_file))) + if self._connection_info.private_key_file is not None: + self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self._connection_info.private_key_file))) if self._connection_info.password: self._common_args += ("-o", "GSSAPIAuthentication=no", "-o", "PubkeyAuthentication=no") From 23cbfc17e5eca7dc9393260dbe43011f73b65a4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Mart=C3=ADnez?= Date: Thu, 4 Jun 2015 17:52:37 +0200 Subject: [PATCH 0771/3617] Fixed Github examples directory URL --- docsite/rst/YAMLSyntax.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst index d3eb843523173b..76683f6ba3b4e0 100644 --- a/docsite/rst/YAMLSyntax.rst +++ b/docsite/rst/YAMLSyntax.rst @@ -107,7 +107,7 @@ with a "{", YAML will think it is a dictionary, so you must quote it, like so:: Learn what playbooks can do and how to write/run them. `YAMLLint `_ YAML Lint (online) helps you debug YAML syntax if you are having problems - `Github examples directory `_ + `Github examples directory `_ Complete playbook files from the github project source `Mailing List `_ Questions? Help? Ideas? Stop by the list on Google Groups From ccb8bcebd3a86ce6d30621cc85e32762b53dfe9a Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 4 Jun 2015 11:34:56 -0500 Subject: [PATCH 0772/3617] Resync the v1 directory with v1_last. Fixes #11162 --- v1/ansible/constants.py | 8 +- v1/ansible/inventory/__init__.py | 4 +- v1/ansible/module_utils/basic.py | 147 ++++++++++++-------- v1/ansible/module_utils/cloudstack.py | 2 - v1/ansible/module_utils/facts.py | 48 ++++++- v1/ansible/module_utils/powershell.ps1 | 4 +- v1/ansible/module_utils/urls.py | 51 ++++--- v1/ansible/runner/connection_plugins/ssh.py | 65 ++------- v1/ansible/utils/__init__.py | 8 +- v1/ansible/utils/module_docs.py | 11 +- 10 files changed, 200 insertions(+), 148 deletions(-) diff --git a/v1/ansible/constants.py b/v1/ansible/constants.py index a9b4f40bb8e67f..2cdc08d8ce87ac 100644 --- a/v1/ansible/constants.py +++ b/v1/ansible/constants.py @@ -134,7 +134,10 @@ def shell_expand_path(path): DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() -DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) + +# selinux +DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True) #TODO: get rid of ternary chain mess BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] @@ -176,6 +179,9 @@ def shell_expand_path(path): DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) +RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) +RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') + # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None) ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") diff --git a/v1/ansible/inventory/__init__.py b/v1/ansible/inventory/__init__.py index 2048046d3c1f21..f012246e227016 100644 --- a/v1/ansible/inventory/__init__.py +++ b/v1/ansible/inventory/__init__.py @@ -36,7 +36,7 @@ class Inventory(object): Host inventory for ansible. """ - __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', + __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset', 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list', '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir'] @@ -53,7 +53,7 @@ def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None): self._vars_per_host = {} self._vars_per_group = {} self._hosts_cache = {} - self._groups_list = {} + self._groups_list = {} self._pattern_cache = {} # to be set by calling set_playbook_basedir by playbook code diff --git a/v1/ansible/module_utils/basic.py b/v1/ansible/module_utils/basic.py index 54a1a9cfff7f88..e772a12efcefce 100644 --- a/v1/ansible/module_utils/basic.py +++ b/v1/ansible/module_utils/basic.py @@ -38,6 +38,8 @@ BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0] BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE +SELINUX_SPECIAL_FS="<>" + # ansible modules can be written in any language. To simplify # development of Python modules, the functions available here # can be inserted in any module source automatically by including @@ -181,7 +183,8 @@ def get_distribution(): ''' return the distribution name ''' if platform.system() == 'Linux': try: - distribution = platform.linux_distribution()[0].capitalize() + supported_dists = platform._supported_dists + ('arch',) + distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize() if not distribution and os.path.isfile('/etc/system-release'): distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize() if 'Amazon' in distribution: @@ -334,7 +337,8 @@ class AnsibleModule(object): def __init__(self, argument_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True, mutually_exclusive=None, required_together=None, - required_one_of=None, add_file_common_args=False, supports_check_mode=False): + required_one_of=None, add_file_common_args=False, supports_check_mode=False, + required_if=None): ''' common code for quickly building an ansible module in Python @@ -382,6 +386,7 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self._check_argument_types() self._check_required_together(required_together) self._check_required_one_of(required_one_of) + self._check_required_if(required_if) self._set_defaults(pre=False) if not self.no_log: @@ -528,10 +533,10 @@ def find_mount_point(self, path): path = os.path.dirname(path) return path - def is_nfs_path(self, path): + def is_special_selinux_path(self, path): """ - Returns a tuple containing (True, selinux_context) if the given path - is on a NFS mount point, otherwise the return will be (False, None). + Returns a tuple containing (True, selinux_context) if the given path is on a + NFS or other 'special' fs mount point, otherwise the return will be (False, None). """ try: f = open('/proc/mounts', 'r') @@ -542,9 +547,13 @@ def is_nfs_path(self, path): path_mount_point = self.find_mount_point(path) for line in mount_data: (device, mount_point, fstype, options, rest) = line.split(' ', 4) - if path_mount_point == mount_point and 'nfs' in fstype: - nfs_context = self.selinux_context(path_mount_point) - return (True, nfs_context) + + if path_mount_point == mount_point: + for fs in SELINUX_SPECIAL_FS.split(','): + if fs in fstype: + special_context = self.selinux_context(path_mount_point) + return (True, special_context) + return (False, None) def set_default_selinux_context(self, path, changed): @@ -562,9 +571,9 @@ def set_context_if_different(self, path, context, changed): # Iterate over the current context instead of the # argument context, which may have selevel. - (is_nfs, nfs_context) = self.is_nfs_path(path) - if is_nfs: - new_context = nfs_context + (is_special_se, sp_context) = self.is_special_selinux_path(path) + if is_special_se: + new_context = sp_context else: for i in range(len(cur_context)): if len(context) > i: @@ -861,6 +870,7 @@ def _check_locale(self): locale.setlocale(locale.LC_ALL, 'C') os.environ['LANG'] = 'C' os.environ['LC_CTYPE'] = 'C' + os.environ['LC_MESSAGES'] = 'C' except Exception, e: self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e) @@ -950,6 +960,20 @@ def _check_required_arguments(self): if len(missing) > 0: self.fail_json(msg="missing required arguments: %s" % ",".join(missing)) + def _check_required_if(self, spec): + ''' ensure that parameters which conditionally required are present ''' + if spec is None: + return + for (key, val, requirements) in spec: + missing = [] + if key in self.params and self.params[key] == val: + for check in requirements: + count = self._count_terms(check) + if count == 0: + missing.append(check) + if len(missing) > 0: + self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing))) + def _check_argument_values(self): ''' ensure all arguments have the requested values, and there are no stray arguments ''' for (k,v) in self.argument_spec.iteritems(): @@ -1009,57 +1033,60 @@ def _check_argument_types(self): value = self.params[k] is_invalid = False - if wanted == 'str': - if not isinstance(value, basestring): - self.params[k] = str(value) - elif wanted == 'list': - if not isinstance(value, list): - if isinstance(value, basestring): - self.params[k] = value.split(",") - elif isinstance(value, int) or isinstance(value, float): - self.params[k] = [ str(value) ] - else: - is_invalid = True - elif wanted == 'dict': - if not isinstance(value, dict): - if isinstance(value, basestring): - if value.startswith("{"): - try: - self.params[k] = json.loads(value) - except: - (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) - if exc is not None: - self.fail_json(msg="unable to evaluate dictionary for %s" % k) - self.params[k] = result - elif '=' in value: - self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")]) + try: + if wanted == 'str': + if not isinstance(value, basestring): + self.params[k] = str(value) + elif wanted == 'list': + if not isinstance(value, list): + if isinstance(value, basestring): + self.params[k] = value.split(",") + elif isinstance(value, int) or isinstance(value, float): + self.params[k] = [ str(value) ] else: - self.fail_json(msg="dictionary requested, could not parse JSON or key=value") - else: - is_invalid = True - elif wanted == 'bool': - if not isinstance(value, bool): - if isinstance(value, basestring): - self.params[k] = self.boolean(value) - else: - is_invalid = True - elif wanted == 'int': - if not isinstance(value, int): - if isinstance(value, basestring): - self.params[k] = int(value) - else: - is_invalid = True - elif wanted == 'float': - if not isinstance(value, float): - if isinstance(value, basestring): - self.params[k] = float(value) - else: - is_invalid = True - else: - self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) + is_invalid = True + elif wanted == 'dict': + if not isinstance(value, dict): + if isinstance(value, basestring): + if value.startswith("{"): + try: + self.params[k] = json.loads(value) + except: + (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) + if exc is not None: + self.fail_json(msg="unable to evaluate dictionary for %s" % k) + self.params[k] = result + elif '=' in value: + self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")]) + else: + self.fail_json(msg="dictionary requested, could not parse JSON or key=value") + else: + is_invalid = True + elif wanted == 'bool': + if not isinstance(value, bool): + if isinstance(value, basestring): + self.params[k] = self.boolean(value) + else: + is_invalid = True + elif wanted == 'int': + if not isinstance(value, int): + if isinstance(value, basestring): + self.params[k] = int(value) + else: + is_invalid = True + elif wanted == 'float': + if not isinstance(value, float): + if isinstance(value, basestring): + self.params[k] = float(value) + else: + is_invalid = True + else: + self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) - if is_invalid: - self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) + if is_invalid: + self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) + except ValueError, e: + self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted)) def _set_defaults(self, pre=True): for (k,v) in self.argument_spec.iteritems(): diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index 82306b9a0be87d..e887367c2fd69b 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -64,14 +64,12 @@ def _connect(self): api_secret = self.module.params.get('secret_key') api_url = self.module.params.get('api_url') api_http_method = self.module.params.get('api_http_method') - api_timeout = self.module.params.get('api_timeout') if api_key and api_secret and api_url: self.cs = CloudStack( endpoint=api_url, key=api_key, secret=api_secret, - timeout=api_timeout, method=api_http_method ) else: diff --git a/v1/ansible/module_utils/facts.py b/v1/ansible/module_utils/facts.py index b223c5f5f7d3eb..1162e05b9cfef7 100644 --- a/v1/ansible/module_utils/facts.py +++ b/v1/ansible/module_utils/facts.py @@ -99,8 +99,9 @@ class Facts(object): ('/etc/os-release', 'SuSE'), ('/etc/gentoo-release', 'Gentoo'), ('/etc/os-release', 'Debian'), + ('/etc/lsb-release', 'Mandriva'), ('/etc/os-release', 'NA'), - ('/etc/lsb-release', 'Mandriva')) + ) SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -416,11 +417,13 @@ def get_distribution_facts(self): self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1) elif name == 'Debian': data = get_file_content(path) - if 'Debian' in data or 'Raspbian' in data: + if 'Ubuntu' in data: + break # Ubuntu gets correct info from python functions + elif 'Debian' in data or 'Raspbian' in data: release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) if release: self.facts['distribution_release'] = release.groups()[0] - break + break elif name == 'Mandriva': data = get_file_content(path) if 'Mandriva' in data: @@ -2160,7 +2163,7 @@ def parse_media_line(self, words, current_if, ips): current_if['media'] = 'Unknown' # Mac does not give us this current_if['media_select'] = words[1] if len(words) > 2: - current_if['media_type'] = words[2][1:] + current_if['media_type'] = words[2][1:-1] if len(words) > 3: current_if['media_options'] = self.get_options(words[3]) @@ -2545,6 +2548,43 @@ def get_virtual_facts(self): self.facts['virtualization_role'] = 'NA' return +class FreeBSDVirtual(Virtual): + """ + This is a FreeBSD-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'FreeBSD' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + self.facts['virtualization_type'] = '' + self.facts['virtualization_role'] = '' + +class OpenBSDVirtual(Virtual): + """ + This is a OpenBSD-specific subclass of Virtual. It defines + - virtualization_type + - virtualization_role + """ + platform = 'OpenBSD' + + def __init__(self): + Virtual.__init__(self) + + def populate(self): + self.get_virtual_facts() + return self.facts + + def get_virtual_facts(self): + self.facts['virtualization_type'] = '' + self.facts['virtualization_role'] = '' class HPUXVirtual(Virtual): """ diff --git a/v1/ansible/module_utils/powershell.ps1 b/v1/ansible/module_utils/powershell.ps1 index ee7d3ddeca4ba8..9606f47783b66c 100644 --- a/v1/ansible/module_utils/powershell.ps1 +++ b/v1/ansible/module_utils/powershell.ps1 @@ -65,7 +65,7 @@ Function Exit-Json($obj) $obj = New-Object psobject } - echo $obj | ConvertTo-Json -Depth 99 + echo $obj | ConvertTo-Json -Compress -Depth 99 Exit } @@ -89,7 +89,7 @@ Function Fail-Json($obj, $message = $null) Set-Attr $obj "msg" $message Set-Attr $obj "failed" $true - echo $obj | ConvertTo-Json -Depth 99 + echo $obj | ConvertTo-Json -Compress -Depth 99 Exit 1 } diff --git a/v1/ansible/module_utils/urls.py b/v1/ansible/module_utils/urls.py index d56cc89395e338..18317e86aeb8e3 100644 --- a/v1/ansible/module_utils/urls.py +++ b/v1/ansible/module_utils/urls.py @@ -50,6 +50,15 @@ except: HAS_SSL=False +HAS_MATCH_HOSTNAME = True +try: + from ssl import match_hostname, CertificateError +except ImportError: + try: + from backports.ssl_match_hostname import match_hostname, CertificateError + except ImportError: + HAS_MATCH_HOSTNAME = False + import httplib import os import re @@ -293,11 +302,13 @@ def http_request(self, req): connect_result = s.recv(4096) self.validate_proxy_response(connect_result) ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + match_hostname(ssl_s.getpeercert(), self.hostname) else: self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) + match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection #ssl_s.unwrap() s.close() @@ -311,6 +322,9 @@ def http_request(self, req): 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ 'Paths checked for this platform: %s' % ", ".join(paths_checked) ) + except CertificateError: + self.module.fail_json(msg="SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=no (insecure)" % self.hostname) + try: # cleanup the temp file created, don't worry # if it fails for some reason @@ -363,28 +377,29 @@ def fetch_url(module, url, data=None, headers=None, method=None, # FIXME: change the following to use the generic_urlparse function # to remove the indexed references for 'parsed' parsed = urlparse.urlparse(url) - if parsed[0] == 'https': - if not HAS_SSL and validate_certs: + if parsed[0] == 'https' and validate_certs: + if not HAS_SSL: if distribution == 'Redhat': module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL') else: module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended') - - elif validate_certs: - # do the cert validation - netloc = parsed[1] - if '@' in netloc: - netloc = netloc.split('@', 1)[1] - if ':' in netloc: - hostname, port = netloc.split(':', 1) - port = int(port) - else: - hostname = netloc - port = 443 - # create the SSL validation handler and - # add it to the list of handlers - ssl_handler = SSLValidationHandler(module, hostname, port) - handlers.append(ssl_handler) + if not HAS_MATCH_HOSTNAME: + module.fail_json(msg='Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=no, however this is unsafe and not recommended') + + # do the cert validation + netloc = parsed[1] + if '@' in netloc: + netloc = netloc.split('@', 1)[1] + if ':' in netloc: + hostname, port = netloc.split(':', 1) + port = int(port) + else: + hostname = netloc + port = 443 + # create the SSL validation handler and + # add it to the list of handlers + ssl_handler = SSLValidationHandler(module, hostname, port) + handlers.append(ssl_handler) if parsed[0] != 'ftp': username = module.params.get('url_username', '') diff --git a/v1/ansible/runner/connection_plugins/ssh.py b/v1/ansible/runner/connection_plugins/ssh.py index ff7e8e03c874b7..036175f6a9c3e2 100644 --- a/v1/ansible/runner/connection_plugins/ssh.py +++ b/v1/ansible/runner/connection_plugins/ssh.py @@ -16,22 +16,21 @@ # along with Ansible. If not, see . # -import fcntl -import gettext -import hmac import os +import re +import subprocess +import shlex import pipes -import pty -import pwd import random -import re import select -import shlex -import subprocess -import time +import fcntl +import hmac +import pwd +import gettext +import pty from hashlib import sha1 import ansible.constants as C -from ansible.callbacks import vvv, vv +from ansible.callbacks import vvv from ansible import errors from ansible import utils @@ -257,51 +256,7 @@ def not_in_host_file(self, host): vvv("EXEC previous known host file not found for %s" % host) return True - def exec_command(self, *args, **kwargs): - """ Wrapper around _exec_command to retry in the case of an ssh - failure - - Will retry if: - * an exception is caught - * ssh returns 255 - - Will not retry if - * remaining_tries is <2 - * retries limit reached - """ - remaining_tries = C.get_config( - C.p, 'ssh_connection', 'retries', - 'ANSIBLE_SSH_RETRIES', 3, integer=True) + 1 - cmd_summary = "%s %s..." % (args[0], str(kwargs)[:200]) - for attempt in xrange(remaining_tries): - pause = 2 ** attempt - 1 - if pause > 30: - pause = 30 - time.sleep(pause) - try: - return_tuple = self._exec_command(*args, **kwargs) - except Exception as e: - msg = ("ssh_retry: attempt: %d, caught exception(%s) from cmd " - "(%s).") % (attempt, e, cmd_summary) - vv(msg) - if attempt == remaining_tries - 1: - raise e - else: - continue - # 0 = success - # 1-254 = remote command return code - # 255 = failure from the ssh command itself - if return_tuple[0] != 255: - break - else: - msg = ('ssh_retry: attempt: %d, ssh return code is 255. cmd ' - '(%s).') % (attempt, cmd_summary) - vv(msg) - - return return_tuple - - - def _exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: diff --git a/v1/ansible/utils/__init__.py b/v1/ansible/utils/__init__.py index 7ed07a54c840d3..eb6fa2a712ba89 100644 --- a/v1/ansible/utils/__init__.py +++ b/v1/ansible/utils/__init__.py @@ -1024,9 +1024,9 @@ def base_parser(constants=C, usage="", output_opts=False, runas_opts=False, if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', + parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true', + parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") @@ -1617,7 +1617,9 @@ def _load_vars_from_folder(folder_path, results, vault_password=None): names.sort() # do not parse hidden files or dirs, e.g. .svn/ - paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')] + paths = [os.path.join(folder_path, name) for name in names + if not name.startswith('.') + and os.path.splitext(name)[1] in C.YAML_FILENAME_EXTENSIONS] for path in paths: _found, results = _load_vars_from_path(path, results, vault_password=vault_password) return results diff --git a/v1/ansible/utils/module_docs.py b/v1/ansible/utils/module_docs.py index ee99af2cb54dba..c6920571726931 100644 --- a/v1/ansible/utils/module_docs.py +++ b/v1/ansible/utils/module_docs.py @@ -23,6 +23,8 @@ import yaml import traceback +from collections import MutableMapping, MutableSet, MutableSequence + from ansible import utils # modules that are ok that they do not have documentation strings @@ -86,7 +88,14 @@ def get_docstring(filename, verbose=False): if not doc.has_key(key): doc[key] = value else: - doc[key].update(value) + if isinstance(doc[key], MutableMapping): + doc[key].update(value) + elif isinstance(doc[key], MutableSet): + doc[key].add(value) + elif isinstance(doc[key], MutableSequence): + doc[key] = sorted(frozenset(doc[key] + value)) + else: + raise Exception("Attempt to extend a documentation fragement of unknown type") if 'EXAMPLES' in (t.id for t in child.targets): plainexamples = child.value.s[1:] # Skip first empty line From f3f3fb7c491effe9e61ae5a429ac796558c2963a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 4 Jun 2015 13:54:39 -0400 Subject: [PATCH 0773/3617] Fixing vars_prompt --- lib/ansible/executor/task_queue_manager.py | 64 ++++++++++++++++++++++ lib/ansible/playbook/play.py | 5 +- lib/ansible/plugins/callback/__init__.py | 2 - lib/ansible/plugins/callback/default.py | 4 +- 4 files changed, 70 insertions(+), 5 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index a875c310d51b86..b8ca4273702171 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import getpass import multiprocessing import os import socket @@ -150,6 +151,50 @@ def _load_callbacks(self, stdout_callback): return loaded_plugins + def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + + if prompt and default is not None: + msg = "%s [%s]: " % (prompt, default) + elif prompt: + msg = "%s: " % prompt + else: + msg = 'input for %s: ' % varname + + def do_prompt(prompt, private): + if sys.stdout.encoding: + msg = prompt.encode(sys.stdout.encoding) + else: + # when piping the output, or at other times when stdout + # may not be the standard file descriptor, the stdout + # encoding may not be set, so default to something sane + msg = prompt.encode(locale.getpreferredencoding()) + if private: + return getpass.getpass(msg) + return raw_input(msg) + + if confirm: + while True: + result = do_prompt(msg, private) + second = do_prompt("confirm " + msg, private) + if result == second: + break + display("***** VALUES ENTERED DO NOT MATCH ****") + else: + result = do_prompt(msg, private) + + # if result is false and default is not None + if not result and default is not None: + result = default + + # FIXME: make this work with vault or whatever this old method was + #if encrypt: + # result = utils.do_encrypt(result, encrypt, salt_size, salt) + + # handle utf-8 chars + # FIXME: make this work + #result = to_unicode(result, errors='strict') + return result + def run(self, play): ''' Iterates over the roles/tasks in a play, using the given (or default) @@ -159,6 +204,25 @@ def run(self, play): are done with the current task). ''' + if play.vars_prompt: + for var in play.vars_prompt: + if 'name' not in var: + raise AnsibleError("'vars_prompt' item is missing 'name:'", obj=play._ds) + + vname = var['name'] + prompt = var.get("prompt", vname) + default = var.get("default", None) + private = var.get("private", True) + + confirm = var.get("confirm", False) + encrypt = var.get("encrypt", None) + salt_size = var.get("salt_size", None) + salt = var.get("salt", None) + + if vname not in play.vars: + self.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) + play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) + all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index c891571a985859..49a986555cdc29 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -62,7 +62,7 @@ class Play(Base, Taggable, Become): # Variable Attributes _vars_files = FieldAttribute(isa='list', default=[]) - _vars_prompt = FieldAttribute(isa='dict', default=dict()) + _vars_prompt = FieldAttribute(isa='list', default=[]) _vault_password = FieldAttribute(isa='string') # Block (Task) Lists Attributes @@ -116,6 +116,9 @@ def preprocess_data(self, ds): ds['remote_user'] = ds['user'] del ds['user'] + if 'vars_prompt' in ds and not isinstance(ds['vars_prompt'], list): + ds['vars_prompt'] = [ ds['vars_prompt'] ] + return super(Play, self).preprocess_data(ds) def _load_vars(self, attr, ds): diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 2c2e7e74c65779..c03f6981d9c16e 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -19,8 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -#from ansible.utils.display import Display - __all__ = ["CallbackBase"] class CallbackBase: diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index de6548ef188cba..5b50b49cc89702 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -110,8 +110,8 @@ def v2_playbook_on_cleanup_task_start(self, task): def v2_playbook_on_handler_task_start(self, task): self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) - def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - pass + #def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + # pass def v2_playbook_on_setup(self): pass From 9754c67138f77264652606ac26d6e220903dd258 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 13 May 2015 10:58:46 -0500 Subject: [PATCH 0774/3617] Use a decorator to ensure jit connection, instead of an explicit call to _connect --- lib/ansible/executor/task_executor.py | 1 - lib/ansible/plugins/connections/__init__.py | 12 +++++++++++- lib/ansible/plugins/connections/paramiko_ssh.py | 8 ++++++-- lib/ansible/plugins/connections/ssh.py | 6 +++++- lib/ansible/plugins/connections/winrm.py | 6 +++++- 5 files changed, 27 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 69cbb63f47cbe4..8de8f7027ab14c 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -210,7 +210,6 @@ def _execute(self, variables=None): # get the connection and the handler for this execution self._connection = self._get_connection(variables) self._connection.set_host_overrides(host=self._host) - self._connection._connect() self._handler = self._get_action_handler(connection=self._connection, templar=templar) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 897bc58982bfef..da0775530d6dce 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -22,6 +22,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty +from functools import wraps from six import with_metaclass from ansible import constants as C @@ -32,7 +33,16 @@ # which may want to output display/logs too from ansible.utils.display import Display -__all__ = ['ConnectionBase'] +__all__ = ['ConnectionBase', 'ensure_connect'] + + +def ensure_connect(func): + @wraps(func) + def wrapped(self, *args, **kwargs): + self._connect() + return func(self, *args, **kwargs) + return wrapped + class ConnectionBase(with_metaclass(ABCMeta, object)): ''' diff --git a/lib/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py index 0d7a82c34b55c8..8beaecf4928500 100644 --- a/lib/ansible/plugins/connections/paramiko_ssh.py +++ b/lib/ansible/plugins/connections/paramiko_ssh.py @@ -41,7 +41,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.plugins.connections import ConnectionBase +from ansible.plugins.connections import ConnectionBase, ensure_connect from ansible.utils.path import makedirs_safe AUTHENTICITY_MSG=""" @@ -61,6 +61,7 @@ except ImportError: pass + class MyAddPolicy(object): """ Based on AutoAddPolicy in paramiko so we can determine when keys are added @@ -188,6 +189,7 @@ def _connect_uncached(self): return ssh + @ensure_connect def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' @@ -248,6 +250,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr) + @ensure_connect def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' @@ -272,9 +275,10 @@ def _connect_sftp(self): if cache_key in SFTP_CONNECTION_CACHE: return SFTP_CONNECTION_CACHE[cache_key] else: - result = SFTP_CONNECTION_CACHE[cache_key] = self.connect().ssh.open_sftp() + result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp() return result + @ensure_connect def fetch_file(self, in_path, out_path): ''' save a remote file to the specified path ''' diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index b3ada343c0454d..5a435093d0065a 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -34,7 +34,8 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.plugins.connections import ConnectionBase +from ansible.plugins.connections import ConnectionBase, ensure_connect + class Connection(ConnectionBase): ''' ssh based connections ''' @@ -269,6 +270,7 @@ def not_in_host_file(self, host): self._display.vvv("EXEC previous known host file not found for {0}".format(host)) return True + @ensure_connect def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' @@ -390,6 +392,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr) + @ensure_connect def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) @@ -425,6 +428,7 @@ def put_file(self, in_path, out_path): if returncode != 0: raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr)) + @ensure_connect def fetch_file(self, in_path, out_path): ''' fetch a file from remote to local ''' self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index f16da0f6e63a01..ee287491897969 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -42,10 +42,11 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.plugins.connections import ConnectionBase +from ansible.plugins.connections import ConnectionBase, ensure_connect from ansible.plugins import shell_loader from ansible.utils.path import makedirs_safe + class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' @@ -151,6 +152,7 @@ def _connect(self): self.protocol = self._winrm_connect() return self + @ensure_connect def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): cmd = cmd.encode('utf-8') @@ -172,6 +174,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): raise AnsibleError("failed to exec cmd %s" % cmd) return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8')) + @ensure_connect def put_file(self, in_path, out_path): self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) if not os.path.exists(in_path): @@ -210,6 +213,7 @@ def put_file(self, in_path, out_path): traceback.print_exc() raise AnsibleError("failed to transfer file to %s" % out_path) + @ensure_connect def fetch_file(self, in_path, out_path): out_path = out_path.replace('\\', '/') self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) From bce281014cfc8aaa2675c129ca3117a360041e5c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 4 Jun 2015 13:27:18 -0500 Subject: [PATCH 0775/3617] Decorate the ConnectionBase methods, switch to calling super from individual connection classes --- lib/ansible/plugins/connections/__init__.py | 3 +++ lib/ansible/plugins/connections/local.py | 7 +++++++ lib/ansible/plugins/connections/paramiko_ssh.py | 11 +++++++---- lib/ansible/plugins/connections/ssh.py | 13 +++++++++---- lib/ansible/plugins/connections/winrm.py | 10 ++++++---- 5 files changed, 32 insertions(+), 12 deletions(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index da0775530d6dce..1d3a2bdeede1f5 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -92,16 +92,19 @@ def _connect(self): """Connect to the host we've been initialized with""" pass + @ensure_connect @abstractmethod def exec_command(self, cmd, tmp_path, executable=None, in_data=None): """Run a command on the remote host""" pass + @ensure_connect @abstractmethod def put_file(self, in_path, out_path): """Transfer a file from local to remote""" pass + @ensure_connect @abstractmethod def fetch_file(self, in_path, out_path): """Fetch a file from remote to local""" diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py index 1dc6076b0db547..85bc51de0aee1b 100644 --- a/lib/ansible/plugins/connections/local.py +++ b/lib/ansible/plugins/connections/local.py @@ -49,6 +49,8 @@ def _connect(self, port=None): def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the local host ''' + super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + debug("in local.exec_command()") # su requires to be run from a terminal, and therefore isn't supported here (yet?) #if self._connection_info.su: @@ -108,6 +110,8 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): def put_file(self, in_path, out_path): ''' transfer a file from local to local ''' + super(Connection, self).put_file(in_path, out_path) + #vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host) self._display.vvv("{0} PUT {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path)) if not os.path.exists(in_path): @@ -123,6 +127,9 @@ def put_file(self, in_path, out_path): def fetch_file(self, in_path, out_path): ''' fetch a file from local to local -- for copatibility ''' + + super(Connection, self).fetch_file(in_path, out_path) + #vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host) self._display.vvv("{0} FETCH {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path)) self.put_file(in_path, out_path) diff --git a/lib/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py index 8beaecf4928500..5a5259c5fcc80d 100644 --- a/lib/ansible/plugins/connections/paramiko_ssh.py +++ b/lib/ansible/plugins/connections/paramiko_ssh.py @@ -41,7 +41,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.plugins.connections import ConnectionBase, ensure_connect +from ansible.plugins.connections import ConnectionBase from ansible.utils.path import makedirs_safe AUTHENTICITY_MSG=""" @@ -189,10 +189,11 @@ def _connect_uncached(self): return ssh - @ensure_connect def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' + super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") @@ -250,10 +251,11 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr) - @ensure_connect def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' + super(Connection, self).put_file(in_path, out_path) + self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) if not os.path.exists(in_path): @@ -278,10 +280,11 @@ def _connect_sftp(self): result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp() return result - @ensure_connect def fetch_file(self, in_path, out_path): ''' save a remote file to the specified path ''' + super(Connection, self).fetch_file(in_path, out_path) + self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) try: diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 5a435093d0065a..e2251ca5b0d816 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -34,7 +34,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.plugins.connections import ConnectionBase, ensure_connect +from ansible.plugins.connections import ConnectionBase class Connection(ConnectionBase): @@ -270,10 +270,11 @@ def not_in_host_file(self, host): self._display.vvv("EXEC previous known host file not found for {0}".format(host)) return True - @ensure_connect def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ''' run a command on the remote host ''' + super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + ssh_cmd = self._password_cmd() ssh_cmd += ("ssh", "-C") if not in_data: @@ -392,9 +393,11 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr) - @ensure_connect def put_file(self, in_path, out_path): ''' transfer a file from local to remote ''' + + super(Connection, self).put_file(in_path, out_path) + self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) @@ -428,9 +431,11 @@ def put_file(self, in_path, out_path): if returncode != 0: raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr)) - @ensure_connect def fetch_file(self, in_path, out_path): ''' fetch a file from remote to local ''' + + super(Connection, self).fetch_file(in_path, out_path) + self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) cmd = self._password_cmd() diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index ee287491897969..2bc1ee00539f9c 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -42,7 +42,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound -from ansible.plugins.connections import ConnectionBase, ensure_connect +from ansible.plugins.connections import ConnectionBase from ansible.plugins import shell_loader from ansible.utils.path import makedirs_safe @@ -152,8 +152,8 @@ def _connect(self): self.protocol = self._winrm_connect() return self - @ensure_connect def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): + super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data,in_data) cmd = cmd.encode('utf-8') cmd_parts = shlex.split(cmd, posix=False) @@ -174,8 +174,9 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): raise AnsibleError("failed to exec cmd %s" % cmd) return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8')) - @ensure_connect def put_file(self, in_path, out_path): + super(Connection, self).put_file(in_path, out_path) + self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: %s" % in_path) @@ -213,8 +214,9 @@ def put_file(self, in_path, out_path): traceback.print_exc() raise AnsibleError("failed to transfer file to %s" % out_path) - @ensure_connect def fetch_file(self, in_path, out_path): + super(Connection, self).fetch_file(in_path, out_path) + out_path = out_path.replace('\\', '/') self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr) buffer_size = 2**19 # 0.5MB chunks From ee06eebea3d7e218783385424a6f575e8bb7e5b3 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Thu, 4 Jun 2015 19:46:09 +0100 Subject: [PATCH 0776/3617] Fix lookup() plugin lookup() plugin is currently broken because _get_file_contents() now returns a tuple: (contents, show_data). This patch fix that issue. --- lib/ansible/plugins/lookup/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py index ea53c37e03986b..30247c150ce879 100644 --- a/lib/ansible/plugins/lookup/file.py +++ b/lib/ansible/plugins/lookup/file.py @@ -53,7 +53,7 @@ def run(self, terms, variables=None, **kwargs): for path in (basedir_path, relative_path, playbook_path): try: - contents = self._loader._get_file_contents(path) + contents, show_data = self._loader._get_file_contents(path) ret.append(contents.rstrip()) break except AnsibleParserError: From ee5e166563ca01a556a921b177a632ea5c2f1a44 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 4 Jun 2015 15:43:07 -0400 Subject: [PATCH 0777/3617] Fixing ansible_*_interpreter use Fixes ansible/ansible-modules-core#1459 --- lib/ansible/executor/module_common.py | 25 +++++++++-------------- lib/ansible/plugins/action/__init__.py | 8 ++++---- lib/ansible/plugins/action/assemble.py | 8 ++++---- lib/ansible/plugins/action/async.py | 6 +++--- lib/ansible/plugins/action/copy.py | 12 +++++------ lib/ansible/plugins/action/fetch.py | 2 +- lib/ansible/plugins/action/normal.py | 2 +- lib/ansible/plugins/action/patch.py | 4 ++-- lib/ansible/plugins/action/script.py | 4 ++-- lib/ansible/plugins/action/synchronize.py | 2 +- lib/ansible/plugins/action/template.py | 4 ++-- lib/ansible/plugins/action/unarchive.py | 4 ++-- 12 files changed, 38 insertions(+), 43 deletions(-) diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py index 535fbd45e335aa..85dcafb961dec2 100644 --- a/lib/ansible/executor/module_common.py +++ b/lib/ansible/executor/module_common.py @@ -31,6 +31,7 @@ from ansible import constants as C from ansible.errors import AnsibleError from ansible.parsing.utils.jsonify import jsonify +from ansible.utils.unicode import to_bytes REPLACER = "#<>" REPLACER_ARGS = "\"<>\"" @@ -113,7 +114,7 @@ def _find_snippet_imports(module_data, module_path, strip_comments): # ****************************************************************************** -def modify_module(module_path, module_args, strip_comments=False): +def modify_module(module_path, module_args, task_vars=dict(), strip_comments=False): """ Used to insert chunks of code into modules before transfer rather than doing regular python imports. This allows for more efficient transfer in @@ -158,7 +159,6 @@ def modify_module(module_path, module_args, strip_comments=False): (module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments) - #module_args_json = jsonify(module_args) module_args_json = json.dumps(module_args) encoded_args = repr(module_args_json.encode('utf-8')) @@ -166,14 +166,11 @@ def modify_module(module_path, module_args, strip_comments=False): module_data = module_data.replace(REPLACER_VERSION, repr(__version__)) module_data = module_data.replace(REPLACER_COMPLEX, encoded_args) - # FIXME: we're not passing around an inject dictionary anymore, so - # this needs to be fixed with whatever method we use for vars - # like this moving forward - #if module_style == 'new': - # facility = C.DEFAULT_SYSLOG_FACILITY - # if 'ansible_syslog_facility' in inject: - # facility = inject['ansible_syslog_facility'] - # module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) + if module_style == 'new': + facility = C.DEFAULT_SYSLOG_FACILITY + if 'ansible_syslog_facility' in task_vars: + facility = task_vars['ansible_syslog_facility'] + module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) lines = module_data.split(b"\n", 1) shebang = None @@ -183,11 +180,9 @@ def modify_module(module_path, module_args, strip_comments=False): interpreter = args[0] interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) - # FIXME: more inject stuff here... - #from ansible.utils.unicode import to_bytes - #if interpreter_config in inject: - # interpreter = to_bytes(inject[interpreter_config], errors='strict') - # lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:])) + if interpreter_config in task_vars: + interpreter = to_bytes(task_vars[interpreter_config], errors='strict') + lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:])) lines.insert(1, ENCODING_STRING) else: diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index d6861118b2f0d4..5509bb2d94c9f5 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -67,7 +67,7 @@ def __init__(self, task, connection, connection_info, loader, templar, shared_lo self._supports_check_mode = True - def _configure_module(self, module_name, module_args): + def _configure_module(self, module_name, module_args, task_vars=dict()): ''' Handles the loading and templating of the module code through the modify_module() function. @@ -86,7 +86,7 @@ def _configure_module(self, module_name, module_args): "run 'git submodule update --init --recursive' to correct this problem." % (module_name)) # insert shared code and arguments into the module - (module_data, module_style, module_shebang) = modify_module(module_path, module_args) + (module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars) return (module_style, module_shebang, module_data) @@ -314,7 +314,7 @@ def _filter_leading_non_json_lines(self, data): filtered_lines.write(line + '\n') return filtered_lines.getvalue() - def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_files=False, delete_remote_tmp=True): + def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=dict(), persist_files=False, delete_remote_tmp=True): ''' Transfer and run a module along with its arguments. ''' @@ -338,7 +338,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_ debug("in _execute_module (%s, %s)" % (module_name, module_args)) - (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args) + (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars) if not shebang: raise AnsibleError("module is missing interpreter line") diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index 4e796bddb6f013..49f861f08e9574 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -87,7 +87,7 @@ def run(self, tmp=None, task_vars=dict()): return dict(failed=True, msg="src and dest are required") if boolean(remote_src): - return self._execute_module(tmp=tmp) + return self._execute_module(tmp=tmp, task_vars=task_vars) elif self._task._role is not None: src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src) else: @@ -109,7 +109,7 @@ def run(self, tmp=None, task_vars=dict()): resultant = file(path).read() # FIXME: diff needs to be moved somewhere else #if self.runner.diff: - # dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), tmp=tmp, persist_files=True) + # dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), task_vars=task_vars, tmp=tmp, persist_files=True) # if 'content' in dest_result: # dest_contents = dest_result['content'] # if dest_result['encoding'] == 'base64': @@ -140,7 +140,7 @@ def run(self, tmp=None, task_vars=dict()): # res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject) # res.diff = dict(after=resultant) # return res - res = self._execute_module(module_name='copy', module_args=new_module_args, tmp=tmp) + res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp) #res.diff = dict(after=resultant) return res else: @@ -153,4 +153,4 @@ def run(self, tmp=None, task_vars=dict()): ) ) - return self._execute_module(module_name='file', module_args=new_module_args, tmp=tmp) + return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp) diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 7c02e09757eac1..7fedd544d67506 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -42,12 +42,12 @@ def run(self, tmp=None, task_vars=dict()): env_string = self._compute_environment_string() # configure, upload, and chmod the target module - (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=self._task.args) + (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=self._task.args, task_vars=task_vars) self._transfer_data(remote_module_path, module_data) self._remote_chmod(tmp, 'a+rx', remote_module_path) # configure, upload, and chmod the async_wrapper module - (async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict()) + (async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars) self._transfer_data(async_module_path, async_module_data) self._remote_chmod(tmp, 'a+rx', async_module_path) @@ -57,7 +57,7 @@ def run(self, tmp=None, task_vars=dict()): async_jid = str(random.randint(0, 999999999999)) async_cmd = " ".join([str(x) for x in [async_module_path, async_jid, async_limit, remote_module_path, argsfile]]) - result = self._low_level_execute_command(cmd=async_cmd, tmp=None) + result = self._low_level_execute_command(cmd=async_cmd, task_vars=task_vars, tmp=None) # clean up after if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES: diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 6db130ad7f3a32..2d404029c5070f 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -191,7 +191,7 @@ def run(self, tmp=None, task_vars=dict()): # FIXME: runner shouldn't have the diff option there #if self.runner.diff and not raw: - # diff = self._get_diff_data(tmp, dest_file, source_full) + # diff = self._get_diff_data(tmp, dest_file, source_full, task_vars) #else: # diff = {} diff = {} @@ -236,7 +236,7 @@ def run(self, tmp=None, task_vars=dict()): ) ) - module_return = self._execute_module(module_name='copy', module_args=new_module_args, delete_remote_tmp=delete_remote_tmp) + module_return = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp) module_executed = True else: @@ -260,7 +260,7 @@ def run(self, tmp=None, task_vars=dict()): ) # Execute the file module. - module_return = self._execute_module(module_name='file', module_args=new_module_args, delete_remote_tmp=delete_remote_tmp) + module_return = self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, delete_remote_tmp=delete_remote_tmp) module_executed = True if not module_return.get('checksum'): @@ -304,8 +304,8 @@ def _create_content_tempfile(self, content): f.close() return content_tempfile - def _get_diff_data(self, tmp, destination, source): - peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), persist_files=True) + def _get_diff_data(self, tmp, destination, source, task_vars): + peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True) if 'failed' in peek_result and peek_result['failed'] or peek_result.get('rc', 0) != 0: return {} @@ -318,7 +318,7 @@ def _get_diff_data(self, tmp, destination, source): #elif peek_result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF: # diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF else: - dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), tmp=tmp, persist_files=True) + dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, tmp=tmp, persist_files=True) if 'content' in dest_result: dest_contents = dest_result['content'] if dest_result['encoding'] == 'base64': diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py index 6a903ae5a27c3d..2123c5b162bd6c 100644 --- a/lib/ansible/plugins/action/fetch.py +++ b/lib/ansible/plugins/action/fetch.py @@ -61,7 +61,7 @@ def run(self, tmp=None, task_vars=dict()): # use slurp if sudo and permissions are lacking remote_data = None if remote_checksum in ('1', '2') or self._connection_info.become: - slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), tmp=tmp) + slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp) if slurpres.get('rc') == 0: if slurpres['encoding'] == 'base64': remote_data = base64.b64decode(slurpres['content']) diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index 431d9b0eebebc4..445d8a7ae77f02 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -24,6 +24,6 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): #vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host) - return self._execute_module(tmp) + return self._execute_module(tmp, task_vars=task_vars) diff --git a/lib/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py index bf2af1be1ecbd6..31dbd31fa4df31 100644 --- a/lib/ansible/plugins/action/patch.py +++ b/lib/ansible/plugins/action/patch.py @@ -36,7 +36,7 @@ def run(self, tmp=None, task_vars=dict()): elif remote_src: # everything is remote, so we just execute the module # without changing any of the module arguments - return self._execute_module() + return self._execute_module(task_vars=task_vars) if self._task._role is not None: src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src) @@ -63,4 +63,4 @@ def run(self, tmp=None, task_vars=dict()): ) ) - return self._execute_module('patch', module_args=new_module_args) + return self._execute_module('patch', module_args=new_module_args, task_vars=task_vars) diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py index 3ca7dc6a342795..7c248455150883 100644 --- a/lib/ansible/plugins/action/script.py +++ b/lib/ansible/plugins/action/script.py @@ -42,7 +42,7 @@ def run(self, tmp=None, task_vars=None): # do not run the command if the line contains creates=filename # and the filename already exists. This allows idempotence # of command executions. - result = self._execute_module(module_name='stat', module_args=dict(path=creates), tmp=tmp, persist_files=True) + result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars, tmp=tmp, persist_files=True) stat = result.get('stat', None) if stat and stat.get('exists', False): return dict(skipped=True, msg=("skipped, since %s exists" % creates)) @@ -52,7 +52,7 @@ def run(self, tmp=None, task_vars=None): # do not run the command if the line contains removes=filename # and the filename does not exist. This allows idempotence # of command executions. - result = self._execute_module(module_name='stat', module_args=dict(path=removes), tmp=tmp, persist_files=True) + result = self._execute_module(module_name='stat', module_args=dict(path=removes), task_vars=task_vars, tmp=tmp, persist_files=True) stat = result.get('stat', None) if stat and not stat.get('exists', False): return dict(skipped=True, msg=("skipped, since %s does not exist" % removes)) diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py index 219a982cb142b0..aa0a810a2aaf30 100644 --- a/lib/ansible/plugins/action/synchronize.py +++ b/lib/ansible/plugins/action/synchronize.py @@ -170,7 +170,7 @@ def run(self, tmp=None, task_vars=dict()): self._task.args['ssh_args'] = constants.ANSIBLE_SSH_ARGS # run the module and store the result - result = self._execute_module('synchronize') + result = self._execute_module('synchronize', task_vars=task_vars) return result diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 7300848e6b4d01..ea033807dff493 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -152,7 +152,7 @@ def run(self, tmp=None, task_vars=dict()): # res.diff = dict(before=dest_contents, after=resultant) # return res - result = self._execute_module(module_name='copy', module_args=new_module_args) + result = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars) if result.get('changed', False): result['diff'] = dict(before=dest_contents, after=resultant) return result @@ -180,5 +180,5 @@ def run(self, tmp=None, task_vars=dict()): #if self.runner.noop_on_check(task_vars): # new_module_args['CHECKMODE'] = True - return self._execute_module(module_name='file', module_args=new_module_args) + return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars) diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py index b7601ed9107e39..ef5320b71941ea 100644 --- a/lib/ansible/plugins/action/unarchive.py +++ b/lib/ansible/plugins/action/unarchive.py @@ -47,7 +47,7 @@ def run(self, tmp=None, task_vars=dict()): # and the filename already exists. This allows idempotence # of command executions. module_args_tmp = "path=%s" % creates - result = self._execute_module(module_name='stat', module_args=dict(path=creates)) + result = self._execute_module(module_name='stat', module_args=dict(path=creates), task_vars=task_vars) stat = result.get('stat', None) if stat and stat.get('exists', False): return dict(skipped=True, msg=("skipped, since %s exists" % creates)) @@ -110,5 +110,5 @@ def run(self, tmp=None, task_vars=dict()): # module_args += " CHECKMODE=True" # execute the unarchive module now, with the updated args - return self._execute_module(module_args=new_module_args) + return self._execute_module(module_args=new_module_args, task_vars=task_vars) From 73c956366e856502598021756b3f231723af30b0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 5 Jun 2015 07:15:35 -0400 Subject: [PATCH 0778/3617] Correctly determine failed task state when checking results Fixes #11172 --- lib/ansible/plugins/strategies/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index bb839f20f4cdc0..57630f4f21e224 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -149,7 +149,7 @@ def _process_pending_results(self, iterator): task_result = result[1] host = task_result._host task = task_result._task - if result[0] == 'host_task_failed' or 'failed' in task_result._result: + if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: debug("marking %s as failed" % host.name) iterator.mark_host_failed(host) From 9ac624d2c90be1c18d2aa27b78c373e66aa16661 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 5 Jun 2015 07:19:14 -0400 Subject: [PATCH 0779/3617] Fix mock DictDataLoader _get_file_contents to match real code --- test/units/mock/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/mock/loader.py b/test/units/mock/loader.py index 078ca3f0e6ce26..8b6bbbbaf9cd99 100644 --- a/test/units/mock/loader.py +++ b/test/units/mock/loader.py @@ -40,7 +40,7 @@ def load_from_file(self, path): def _get_file_contents(self, path): if path in self._file_mapping: - return self._file_mapping[path] + return (self._file_mapping[path], False) else: raise AnsibleParserError("file not found: %s" % path) From e3d40e541c5d7523775f477c3fa17c0810ed3438 Mon Sep 17 00:00:00 2001 From: vroetman Date: Fri, 5 Jun 2015 09:55:24 -0400 Subject: [PATCH 0780/3617] Update current released Ansible to 1.9.1 Update current released Ansible to 1.9.1 and development version to 2.0 --- docsite/rst/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index 1afa47db87d82c..a0da19cca29f81 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -16,7 +16,7 @@ We believe simplicity is relevant to all sizes of environments and design for bu Ansible manages machines in an agentless manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.8.4) and also some development version features (1.9). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.9.1) and also some development version features (2.0). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: From f4c6caa24d28c1757c704c043bfca5882cc1b200 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 10:16:57 -0400 Subject: [PATCH 0781/3617] added elasticache_subnet_group to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfc062f577c157..a1b0568985bf25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ Deprecated Modules (new ones in parens): New Modules: * find * ec2_ami_find + * elasticache_subnet_group * ec2_win_password * circonus_annotation * consul From 1e9c9df0752440b997e71d5e0e34a217d38202a0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 11:21:08 -0400 Subject: [PATCH 0782/3617] added webfaction modules to changelog --- CHANGELOG.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1b0568985bf25..580a9b5a1e84af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,16 +51,21 @@ New Modules: * rabbitmq_binding * rabbitmq_exchange * rabbitmq_queue - * zabbix_host - * zabbix_hostmacro - * zabbix_screen * vertica_configuration * vertica_facts * vertica_role * vertica_schema * vertica_user * vmware_datacenter + * webfaction_app + * webfaction_db + * webfaction_domain + * webfaction_mailbox + * webfaction_site * win_environment + * zabbix_host + * zabbix_hostmacro + * zabbix_screen New Inventory scripts: * cloudstack From 6bc2ea1f2bc420231caa3bc40813ea0e7a8b1484 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 5 Jun 2015 12:02:35 -0500 Subject: [PATCH 0783/3617] Don't empty out become_pass. See #11169 --- lib/ansible/executor/connection_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 424ac062b3da6f..03d9039c4971f5 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -109,7 +109,8 @@ def set_play(self, play): self.become_method = play.become_method if play.become_user: self.become_user = play.become_user - self.become_pass = play.become_pass + if play.become_pass: + self.become_pass = play.become_pass # non connection related self.no_log = play.no_log @@ -132,7 +133,6 @@ def set_options(self, options): self.become = options.become self.become_method = options.become_method self.become_user = options.become_user - self.become_pass = '' # general flags (should we move out?) if options.verbosity: From c2f26ad95d290ec7749cbdf8ed64e099603d6324 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 14:04:26 -0400 Subject: [PATCH 0784/3617] added iam, prefixed amazon modules --- CHANGELOG.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 580a9b5a1e84af..213156e4dc7f08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,10 +14,10 @@ Deprecated Modules (new ones in parens): * nova_compute (os_server) New Modules: - * find - * ec2_ami_find - * elasticache_subnet_group - * ec2_win_password + * amazon: ec2_ami_find + * amazon: elasticache_subnet_group + * amazon: ec2_win_password + * amazon: iam * circonus_annotation * consul * consul_acl @@ -36,6 +36,7 @@ New Modules: * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule * cloudstack: cs_vmsnapshot + * find * maven_artifact * openstack: os_network * openstack: os_server From f9b56a5d7c954e60011a31090839ede1bc1ffcb2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 5 Jun 2015 11:41:23 -0700 Subject: [PATCH 0785/3617] Fix raising AnsibleError --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 43a6084cbd06eb..3cd5d8c264f265 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -27,7 +27,7 @@ import subprocess from ansible import constants as C -from ansible.errors import * +from ansible import errors from ansible.inventory.ini import InventoryParser from ansible.inventory.script import InventoryScript From 45b4ee9cfe2e2d0786422f9f7402beca631b0c78 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 5 Jun 2015 14:10:00 -0500 Subject: [PATCH 0786/3617] Don't allow setting become_pass in a play --- lib/ansible/executor/connection_info.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 03d9039c4971f5..d8881f54ab79f2 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -109,8 +109,6 @@ def set_play(self, play): self.become_method = play.become_method if play.become_user: self.become_user = play.become_user - if play.become_pass: - self.become_pass = play.become_pass # non connection related self.no_log = play.no_log From 6f5ebb4489394fdd6520c14d5dc60dd0fa4e71f2 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 5 Jun 2015 16:02:29 -0500 Subject: [PATCH 0787/3617] Fix syntax error in winrm --- lib/ansible/plugins/connections/winrm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index 2bc1ee00539f9c..f2624e5b1ac1dd 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -153,7 +153,7 @@ def _connect(self): return self def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): - super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data,in_data) + super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) cmd = cmd.encode('utf-8') cmd_parts = shlex.split(cmd, posix=False) From 49d19e82ab4488aafbd605dc5dc551fb862ba7df Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 5 Jun 2015 15:34:37 -0500 Subject: [PATCH 0788/3617] Get tests passing The largest failure in the tests was due to selinux not being installed. The tests don't require it to be installed, so mock the import. --- test/units/module_utils/test_basic.py | 126 ++++++++++-------- .../plugins/strategies/test_strategy_base.py | 10 +- 2 files changed, 78 insertions(+), 58 deletions(-) diff --git a/test/units/module_utils/test_basic.py b/test/units/module_utils/test_basic.py index 757a5f87d74a48..e1e3399b930024 100644 --- a/test/units/module_utils/test_basic.py +++ b/test/units/module_utils/test_basic.py @@ -26,7 +26,7 @@ from nose.tools import timed from ansible.compat.tests import unittest -from ansible.compat.tests.mock import patch, MagicMock, mock_open +from ansible.compat.tests.mock import patch, MagicMock, mock_open, Mock class TestModuleUtilsBasic(unittest.TestCase): @@ -71,7 +71,7 @@ def _dist(distname='', version='', id='', supported_dists=(), full_distribution_ return ("", "", "") with patch('platform.linux_distribution', side_effect=_dist): - self.assertEqual(get_distribution(), "Amazon") + self.assertEqual(get_distribution(), "Amazonfoobar") def _dist(distname='', version='', id='', supported_dists=(), full_distribution_name=1): if supported_dists != (): @@ -80,7 +80,7 @@ def _dist(distname='', version='', id='', supported_dists=(), full_distribution_ return ("", "", "") with patch('platform.linux_distribution', side_effect=_dist): - self.assertEqual(get_distribution(), "OtherLinux") + self.assertEqual(get_distribution(), "Bar") with patch('platform.linux_distribution', side_effect=Exception("boo")): with patch('platform.dist', return_value=("bar", "2", "Two")): @@ -356,10 +356,13 @@ def test_module_utils_basic_ansible_module_selinux_mls_enabled(self): self.assertEqual(am.selinux_mls_enabled(), False) basic.HAVE_SELINUX = True - with patch('selinux.is_selinux_mls_enabled', return_value=0): - self.assertEqual(am.selinux_mls_enabled(), False) - with patch('selinux.is_selinux_mls_enabled', return_value=1): - self.assertEqual(am.selinux_mls_enabled(), True) + basic.selinux = Mock() + with patch.dict('sys.modules', {'selinux': basic.selinux}): + with patch('selinux.is_selinux_mls_enabled', return_value=0): + self.assertEqual(am.selinux_mls_enabled(), False) + with patch('selinux.is_selinux_mls_enabled', return_value=1): + self.assertEqual(am.selinux_mls_enabled(), True) + delattr(basic, 'selinux') def test_module_utils_basic_ansible_module_selinux_initial_context(self): from ansible.module_utils import basic @@ -399,10 +402,13 @@ def test_module_utils_basic_ansible_module_selinux_enabled(self): # finally we test the case where the python selinux lib is installed, # and both possibilities there (enabled vs. disabled) basic.HAVE_SELINUX = True - with patch('selinux.is_selinux_enabled', return_value=0): - self.assertEqual(am.selinux_enabled(), False) - with patch('selinux.is_selinux_enabled', return_value=1): - self.assertEqual(am.selinux_enabled(), True) + basic.selinux = Mock() + with patch.dict('sys.modules', {'selinux': basic.selinux}): + with patch('selinux.is_selinux_enabled', return_value=0): + self.assertEqual(am.selinux_enabled(), False) + with patch('selinux.is_selinux_enabled', return_value=1): + self.assertEqual(am.selinux_enabled(), True) + delattr(basic, 'selinux') def test_module_utils_basic_ansible_module_selinux_default_context(self): from ansible.module_utils import basic @@ -422,18 +428,23 @@ def test_module_utils_basic_ansible_module_selinux_default_context(self): # all following tests assume the python selinux bindings are installed basic.HAVE_SELINUX = True - # next, we test with a mocked implementation of selinux.matchpathcon to simulate - # an actual context being found - with patch('selinux.matchpathcon', return_value=[0, 'unconfined_u:object_r:default_t:s0']): - self.assertEqual(am.selinux_default_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) + basic.selinux = Mock() + + with patch.dict('sys.modules', {'selinux': basic.selinux}): + # next, we test with a mocked implementation of selinux.matchpathcon to simulate + # an actual context being found + with patch('selinux.matchpathcon', return_value=[0, 'unconfined_u:object_r:default_t:s0']): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) - # we also test the case where matchpathcon returned a failure - with patch('selinux.matchpathcon', return_value=[-1, '']): - self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + # we also test the case where matchpathcon returned a failure + with patch('selinux.matchpathcon', return_value=[-1, '']): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) - # finally, we test where an OSError occurred during matchpathcon's call - with patch('selinux.matchpathcon', side_effect=OSError): - self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + # finally, we test where an OSError occurred during matchpathcon's call + with patch('selinux.matchpathcon', side_effect=OSError): + self.assertEqual(am.selinux_default_context(path='/foo/bar'), [None, None, None, None]) + + delattr(basic, 'selinux') def test_module_utils_basic_ansible_module_selinux_context(self): from ansible.module_utils import basic @@ -453,24 +464,29 @@ def test_module_utils_basic_ansible_module_selinux_context(self): # all following tests assume the python selinux bindings are installed basic.HAVE_SELINUX = True - # next, we test with a mocked implementation of selinux.lgetfilecon_raw to simulate - # an actual context being found - with patch('selinux.lgetfilecon_raw', return_value=[0, 'unconfined_u:object_r:default_t:s0']): - self.assertEqual(am.selinux_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) + basic.selinux = Mock() + + with patch.dict('sys.modules', {'selinux': basic.selinux}): + # next, we test with a mocked implementation of selinux.lgetfilecon_raw to simulate + # an actual context being found + with patch('selinux.lgetfilecon_raw', return_value=[0, 'unconfined_u:object_r:default_t:s0']): + self.assertEqual(am.selinux_context(path='/foo/bar'), ['unconfined_u', 'object_r', 'default_t', 's0']) - # we also test the case where matchpathcon returned a failure - with patch('selinux.lgetfilecon_raw', return_value=[-1, '']): - self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None]) + # we also test the case where matchpathcon returned a failure + with patch('selinux.lgetfilecon_raw', return_value=[-1, '']): + self.assertEqual(am.selinux_context(path='/foo/bar'), [None, None, None, None]) - # finally, we test where an OSError occurred during matchpathcon's call - e = OSError() - e.errno = errno.ENOENT - with patch('selinux.lgetfilecon_raw', side_effect=e): - self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') + # finally, we test where an OSError occurred during matchpathcon's call + e = OSError() + e.errno = errno.ENOENT + with patch('selinux.lgetfilecon_raw', side_effect=e): + self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') - e = OSError() - with patch('selinux.lgetfilecon_raw', side_effect=e): - self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') + e = OSError() + with patch('selinux.lgetfilecon_raw', side_effect=e): + self.assertRaises(SystemExit, am.selinux_context, path='/foo/bar') + + delattr(basic, 'selinux') def test_module_utils_basic_ansible_module_is_special_selinux_path(self): from ansible.module_utils import basic @@ -583,26 +599,30 @@ def test_module_utils_basic_ansible_module_set_context_if_different(self): am.selinux_context = MagicMock(return_value=['bar_u', 'bar_r', None, None]) am.is_special_selinux_path = MagicMock(return_value=(False, None)) - with patch('selinux.lsetfilecon', return_value=0) as m: - self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) - m.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0') - m.reset_mock() - am.check_mode = True - self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) - self.assertEqual(m.called, False) - am.check_mode = False + basic.selinux = Mock() + with patch.dict('sys.modules', {'selinux': basic.selinux}): + with patch('selinux.lsetfilecon', return_value=0) as m: + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + m.assert_called_with('/path/to/file', 'foo_u:foo_r:foo_t:s0') + m.reset_mock() + am.check_mode = True + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + self.assertEqual(m.called, False) + am.check_mode = False - with patch('selinux.lsetfilecon', return_value=1) as m: - self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) + with patch('selinux.lsetfilecon', return_value=1) as m: + self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) - with patch('selinux.lsetfilecon', side_effect=OSError) as m: - self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) + with patch('selinux.lsetfilecon', side_effect=OSError) as m: + self.assertRaises(SystemExit, am.set_context_if_different, '/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], True) - am.is_special_selinux_path = MagicMock(return_value=(True, ['sp_u', 'sp_r', 'sp_t', 's0'])) - - with patch('selinux.lsetfilecon', return_value=0) as m: - self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) - m.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0') + am.is_special_selinux_path = MagicMock(return_value=(True, ['sp_u', 'sp_r', 'sp_t', 's0'])) + + with patch('selinux.lsetfilecon', return_value=0) as m: + self.assertEqual(am.set_context_if_different('/path/to/file', ['foo_u', 'foo_r', 'foo_t', 's0'], False), True) + m.assert_called_with('/path/to/file', 'sp_u:sp_r:sp_t:s0') + + delattr(basic, 'selinux') def test_module_utils_basic_ansible_module_set_owner_if_different(self): from ansible.module_utils import basic diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 4c177f73434fb5..5298b1e42bff79 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -55,15 +55,15 @@ def test_strategy_base_run(self): mock_conn_info = MagicMock() - mock_tqm._failed_hosts = [] - mock_tqm._unreachable_hosts = [] + mock_tqm._failed_hosts = dict() + mock_tqm._unreachable_hosts = dict() strategy_base = StrategyBase(tqm=mock_tqm) - self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info), 0) + self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info), 0) self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 1) - mock_tqm._failed_hosts = ["host1"] + mock_tqm._failed_hosts = dict(host1=True) self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 2) - mock_tqm._unreachable_hosts = ["host1"] + mock_tqm._unreachable_hosts = dict(host1=True) self.assertEqual(strategy_base.run(iterator=mock_iterator, connection_info=mock_conn_info, result=False), 3) def test_strategy_base_get_hosts(self): From 24fd4faa28d4f310e52189b827650176f24f4c81 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 18:42:14 -0400 Subject: [PATCH 0789/3617] avoid removing test all~ file --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e01e1a9713c6aa..7533e648c5c2c3 100644 --- a/Makefile +++ b/Makefile @@ -136,7 +136,7 @@ clean: @echo "Cleaning up byte compiled python stuff" find . -type f -regex ".*\.py[co]$$" -delete @echo "Cleaning up editor backup files" - find . -type f \( -name "*~" -or -name "#*" \) -delete + find . -type f \( -name "*~" -or -name "#*" \) |grep -v test/units/inventory_test_data/group_vars/noparse/all.yml~ |xargs -n 1024 -r rm find . -type f \( -name "*.swp" \) -delete @echo "Cleaning up manpage stuff" find ./docs/man -type f -name "*.xml" -delete From d67c9858a9716f28712458c671ecd68f16de94bc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 18:43:42 -0400 Subject: [PATCH 0790/3617] removed become_pass as it should not be used --- lib/ansible/playbook/become.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index fca28538585917..0323a9b613b9d5 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -30,7 +30,6 @@ class Become: _become = FieldAttribute(isa='bool') _become_method = FieldAttribute(isa='string') _become_user = FieldAttribute(isa='string') - _become_pass = FieldAttribute(isa='string') def __init__(self): return super(Become, self).__init__() @@ -128,14 +127,3 @@ def _get_attr_become_user(self): return self._get_parent_attribute('become_user') else: return self._attributes['become_user'] - - def _get_attr_become_password(self): - ''' - Override for the 'become_password' getattr fetcher, used from Base. - ''' - if hasattr(self, '_get_parent_attribute'): - return self._get_parent_attribute('become_password') - else: - return self._attributes['become_password'] - - From ed57f0732bf015d871be75efddb8db3b1c1046d1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 19:22:06 -0400 Subject: [PATCH 0791/3617] added os_image and deprecated glance_image --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 213156e4dc7f08..d21d5908f52b00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ Major Changes: Deprecated Modules (new ones in parens): * ec2_ami_search (ec2_ami_find) * quantum_network (os_network) + * glance_image * nova_compute (os_server) New Modules: @@ -38,6 +39,7 @@ New Modules: * cloudstack: cs_vmsnapshot * find * maven_artifact + * openstack: os_image * openstack: os_network * openstack: os_server * openstack: os_server_actions From 3dd40b61d9c79ff946edb22b4b488e5cde1fd0ba Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 21:46:06 -0400 Subject: [PATCH 0792/3617] added puppet module --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d21d5908f52b00..88752b1393b8a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ New Modules: * openstack: os_volume * proxmox * proxmox_template + * puppet * pushover * pushbullet * rabbitmq_binding From 3edbe17d3f33b3e67ecc9903bf274aa20c6af7a0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 21:51:41 -0400 Subject: [PATCH 0793/3617] added datadog monitor to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88752b1393b8a5..f15e8a1e6a0891 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ New Modules: * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule * cloudstack: cs_vmsnapshot + * datadog_monitor * find * maven_artifact * openstack: os_image From 47761461542e00675e53bb9a11256812edbc15e8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 22:23:50 -0400 Subject: [PATCH 0794/3617] added openstack client config module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f15e8a1e6a0891..48df725bbc49d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ New Modules: * datadog_monitor * find * maven_artifact + * openstack: os_client_config * openstack: os_image * openstack: os_network * openstack: os_server From 6a5a930c5aea9ddb1821db23a387f5919c6df819 Mon Sep 17 00:00:00 2001 From: Edward J Kim Date: Fri, 5 Jun 2015 23:04:21 -0400 Subject: [PATCH 0795/3617] Add missing import in vault.py --- lib/ansible/cli/vault.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 6231f74332acfe..05a48065771c2e 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -25,6 +25,7 @@ from ansible.parsing.vault import VaultEditor from ansible.cli import CLI from ansible.utils.display import Display +from ansible.utils.vault import read_vault_file class VaultCLI(CLI): """ Vault command line class """ From bdba807fd1b03d888db6ad19d13cc3f6ec47f968 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 6 Jun 2015 00:16:35 -0400 Subject: [PATCH 0796/3617] minor fixes to ssh error reporting shoudl fix #11041 --- lib/ansible/plugins/action/__init__.py | 20 ++++++++++---------- lib/ansible/plugins/connections/ssh.py | 24 +++++++++++++++++------- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 5509bb2d94c9f5..4b2d7abe27aed3 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -161,12 +161,12 @@ def _make_tmp_path(self): if result['rc'] == 5: output = 'Authentication failure.' elif result['rc'] == 255 and self._connection.transport in ('ssh',): - # FIXME: more utils.VERBOSITY - #if utils.VERBOSITY > 3: - # output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) - #else: - # output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue' - output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) + + if self._connection_info.verbosity > 3: + output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr']) + else: + output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue' + elif 'No space left on device' in result['stderr']: output = result['stderr'] else: @@ -462,7 +462,7 @@ def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, i err = stderr debug("done with _low_level_execute_command() (%s)" % (cmd,)) - if rc is not None: - return dict(rc=rc, stdout=out, stderr=err) - else: - return dict(stdout=out, stderr=err) + if rc is None: + rc = 0 + + return dict(rc=rc, stdout=out, stderr=err) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index e2251ca5b0d816..4a3ea4f5a2d47d 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -398,14 +398,14 @@ def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) - self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) + # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH + host = self._connection_info.remote_addr + + self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=host) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) cmd = self._password_cmd() - # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH - host = self._connection_info.remote_addr - # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however # not sure if it's all working yet so this remains commented out #if self._ipv6: @@ -436,12 +436,13 @@ def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr) - cmd = self._password_cmd() - # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH host = self._connection_info.remote_addr + self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=host) + cmd = self._password_cmd() + + # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however # not sure if it's all working yet so this remains commented out #if self._ipv6: @@ -467,5 +468,14 @@ def fetch_file(self, in_path, out_path): def close(self): ''' not applicable since we're executing openssh binaries ''' + + if 'ControlMaster' in self._common_args: + cmd = ['ssh','-O','stop'] + cmd.extend(self._common_args) + cmd.append(self._connection_info.remote_addr) + + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + self._connected = False From 6a1c175991e083f76d98a2340a89f088004cb31b Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Sat, 6 Jun 2015 09:13:14 -0500 Subject: [PATCH 0797/3617] Raise AnsibleParserError instead of AssertionError --- lib/ansible/playbook/helpers.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py index 302e14a6e097a4..d98241397180bc 100644 --- a/lib/ansible/playbook/helpers.py +++ b/lib/ansible/playbook/helpers.py @@ -36,7 +36,8 @@ def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=Non # we import here to prevent a circular dependency with imports from ansible.playbook.block import Block - assert ds is None or isinstance(ds, list), 'block has bad type: %s' % type(ds) + if not isinstance(ds, (list, type(None))): + raise AnsibleParserError('block has bad type: "%s". Expecting "list"' % type(ds).__name__, obj=ds) block_list = [] if ds: @@ -67,12 +68,13 @@ def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_h from ansible.playbook.handler import Handler from ansible.playbook.task import Task - assert isinstance(ds, list), 'task has bad type: %s' % type(ds) + if not isinstance(ds, list): + raise AnsibleParserError('task has bad type: "%s". Expected "list"' % type(ds).__name__, obj=ds) task_list = [] for task in ds: if not isinstance(task, dict): - raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds) + raise AnsibleParserError('task/handler has bad type: "%s". Expected "dict"' % type(task).__name__, obj=task) if 'block' in task: t = Block.load( @@ -105,7 +107,8 @@ def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader # we import here to prevent a circular dependency with imports from ansible.playbook.role.include import RoleInclude - assert isinstance(ds, list), 'roles has bad type: %s' % type(ds) + if not isinstance(ds, list): + raise AnsibleParserError('roles has bad type: "%s". Expectes "list"' % type(ds).__name__, obj=ds) roles = [] for role_def in ds: From 230be812ba24700fd3108128e83204c03c487005 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Sat, 6 Jun 2015 09:23:28 -0500 Subject: [PATCH 0798/3617] Don't test for play.become_pass any longer --- test/units/executor/test_connection_information.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/units/executor/test_connection_information.py b/test/units/executor/test_connection_information.py index 65575c0f93df87..010639d3683a04 100644 --- a/test/units/executor/test_connection_information.py +++ b/test/units/executor/test_connection_information.py @@ -72,7 +72,6 @@ def test_connection_info(self): mock_play.become = True mock_play.become_method = 'mock' mock_play.become_user = 'mockroot' - mock_play.become_pass = 'mockpass' mock_play.no_log = True mock_play.environment = dict(mock='mockenv') @@ -86,7 +85,6 @@ def test_connection_info(self): self.assertEqual(conn_info.become, True) self.assertEqual(conn_info.become_method, "mock") self.assertEqual(conn_info.become_user, "mockroot") - self.assertEqual(conn_info.become_pass, "mockpass") mock_task = MagicMock() mock_task.connection = 'mocktask' From 20df50e11c1b3294e3c8fa2e33afaef8ef8ab574 Mon Sep 17 00:00:00 2001 From: "E. Dunham" Date: Sat, 6 Jun 2015 21:35:51 -0700 Subject: [PATCH 0799/3617] Fix confusing wording about aliases Point out that nicknames for hosts can go in the hosts file, by fixing wording that seemed contradictory. --- docsite/rst/intro_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 5c38372e76b0dc..6dcaff008b5a07 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -46,7 +46,7 @@ To make things explicit, it is suggested that you set them if things are not run badwolf.example.com:5309 -Suppose you have just static IPs and want to set up some aliases that don't live in your host file, or you are connecting through tunnels. You can do things like this:: +Suppose you have just static IPs and want to set up some aliases that live in your host file, or you are connecting through tunnels. You can also describe hosts like this:: jumper ansible_ssh_port=5555 ansible_ssh_host=192.168.1.50 From f0fb2e7f655476ff08f7d1cba12cde9cea9dc866 Mon Sep 17 00:00:00 2001 From: joshainglis Date: Mon, 8 Jun 2015 13:01:54 +1000 Subject: [PATCH 0800/3617] Removed shebang line from .ini file --- plugins/inventory/ovirt.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/inventory/ovirt.ini b/plugins/inventory/ovirt.ini index 2ea05dc55e33cd..a52f9d63ff5ba6 100644 --- a/plugins/inventory/ovirt.ini +++ b/plugins/inventory/ovirt.ini @@ -1,4 +1,3 @@ -#!/usr/bin/python # Copyright 2013 Google Inc. # # This file is part of Ansible From 97bc92d2df1929012f83b7327daa5eb04cb324ee Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 6 Jun 2015 00:33:28 -0400 Subject: [PATCH 0801/3617] minor cleanup, pushed ipv6 fixing to init pushed up using host short var made close conditional, started implementing close for controlmaster --- lib/ansible/plugins/connections/ssh.py | 47 +++++++++++++------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 4a3ea4f5a2d47d..4dd27004921a75 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -46,6 +46,9 @@ def __init__(self, *args, **kwargs): self.HASHED_KEY_MAGIC = "|1|" self._has_pipelining = True + # FIXME: make this work, should be set from connection info + self._ipv6 = False + # FIXME: move the lockfile locations to ActionBase? #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) #self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700) @@ -275,6 +278,8 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + host = self._connection_info.remote_addr + ssh_cmd = self._password_cmd() ssh_cmd += ("ssh", "-C") if not in_data: @@ -288,16 +293,14 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ssh_cmd.append("-q") ssh_cmd += self._common_args - # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however - # not sure if it's all working yet so this remains commented out - #if self._ipv6: - # ssh_cmd += ['-6'] - ssh_cmd.append(self._connection_info.remote_addr) + if self._ipv6: + ssh_cmd += ['-6'] + ssh_cmd.append(host) ssh_cmd.append(cmd) - self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self._connection_info.remote_addr) + self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=host) - not_in_host_file = self.not_in_host_file(self._connection_info.remote_addr) + not_in_host_file = self.not_in_host_file(host) # FIXME: move the locations of these lock files, same as init above #if C.HOST_KEY_CHECKING and not_in_host_file: @@ -400,17 +403,14 @@ def put_file(self, in_path, out_path): # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH host = self._connection_info.remote_addr + if self._ipv6: + host = '[%s]' % host self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=host) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) cmd = self._password_cmd() - # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however - # not sure if it's all working yet so this remains commented out - #if self._ipv6: - # host = '[%s]' % host - if C.DEFAULT_SCP_IF_SSH: cmd.append('scp') cmd.extend(self._common_args) @@ -438,16 +438,13 @@ def fetch_file(self, in_path, out_path): # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH host = self._connection_info.remote_addr + if self._ipv6: + host = '[%s]' % host self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=host) cmd = self._password_cmd() - # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however - # not sure if it's all working yet so this remains commented out - #if self._ipv6: - # host = '[%s]' % self._connection_info.remote_addr - if C.DEFAULT_SCP_IF_SSH: cmd.append('scp') cmd.extend(self._common_args) @@ -469,13 +466,15 @@ def fetch_file(self, in_path, out_path): def close(self): ''' not applicable since we're executing openssh binaries ''' - if 'ControlMaster' in self._common_args: - cmd = ['ssh','-O','stop'] - cmd.extend(self._common_args) - cmd.append(self._connection_info.remote_addr) + if self._connected: - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = p.communicate() + if 'ControlMaster' in self._common_args: + cmd = ['ssh','-O','stop'] + cmd.extend(self._common_args) + cmd.append(self._connection_info.remote_addr) + + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = p.communicate() - self._connected = False + self._connected = False From bbfc982dd54ba2697f3ca5d8048d49f55403394a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Jun 2015 10:52:19 -0400 Subject: [PATCH 0802/3617] added pear module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48df725bbc49d9..85bb0e3ca9d069 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ New Modules: * openstack: os_server_volume * openstack: os_subnet * openstack: os_volume + * pear * proxmox * proxmox_template * puppet From e88a9e943c78699af422078e1b7dbc836cb2fb00 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Jun 2015 11:15:11 -0700 Subject: [PATCH 0803/3617] Use to_bytes to avoid tracebacks when passed a byte str instead of a unicode string Fixes #11198 --- lib/ansible/plugins/connections/winrm.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index f2624e5b1ac1dd..4da04b549a56d5 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -45,6 +45,7 @@ from ansible.plugins.connections import ConnectionBase from ansible.plugins import shell_loader from ansible.utils.path import makedirs_safe +from ansible.utils.unicode import to_bytes class Connection(ConnectionBase): @@ -155,7 +156,7 @@ def _connect(self): def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) - cmd = cmd.encode('utf-8') + cmd = to_bytes(cmd) cmd_parts = shlex.split(cmd, posix=False) if '-EncodedCommand' in cmd_parts: encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1] @@ -172,7 +173,9 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): except Exception as e: traceback.print_exc() raise AnsibleError("failed to exec cmd %s" % cmd) - return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8')) + result.std_out = to_bytes(result.std_out) + result.std_err = to_bytes(result.std_err) + return (result.status_code, '', result.std_out, result.std_err) def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) From 597d3a5eaaea3fd39736b09446a50c45015702e8 Mon Sep 17 00:00:00 2001 From: Tim Gerla Date: Mon, 8 Jun 2015 19:32:44 -0400 Subject: [PATCH 0804/3617] add an example of multiple plays in a single playbook --- docsite/rst/playbooks_intro.rst | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 4fe2ab3ec3f2f9..c5b2aebe108078 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -106,6 +106,33 @@ YAML dictionaries to supply the modules with their key=value arguments.:: name: httpd state: restarted +Playbooks can contain multiple plays. You may have a playbook that targets first +the web servers, and then the database servers. For example:: + + --- + - hosts: webservers + remote_user: root + + tasks: + - name: ensure apache is at the latest version + yum: pkg=httpd state=latest + - name: write the apache config file + template: src=/srv/httpd.j2 dest=/etc/httpd.conf + + - hosts: databases + remote_user: root + + tasks: + - name: ensure postgresql is at the latest version + yum: name=postgresql state=latest + - name: ensure that postgresql is started + service: name=postgresql state=running + +You can use this method to switch between the host group you're targeting, +the username logging into the remote servers, whether to sudo or not, and so +forth. Plays, like tasks, run in the order specified in the playbook: top to +bottom. + Below, we'll break down what the various features of the playbook language are. .. _playbook_basics: From 70b5c28694031186a8b8b41276cc48689b136ae0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Jun 2015 20:10:45 -0400 Subject: [PATCH 0805/3617] initial implementation of the generic OS package module --- lib/ansible/plugins/action/package.py | 55 +++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 lib/ansible/plugins/action/package.py diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py new file mode 100644 index 00000000000000..fbda51fcbb3b10 --- /dev/null +++ b/lib/ansible/plugins/action/package.py @@ -0,0 +1,55 @@ +# (c) 2015, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +from ansible.plugins.action import ActionBase + +class ActionModule(ActionBase): + + TRANSFERS_FILES = True + + def run(self, tmp=None, task_vars=dict()): + ''' handler for package operations ''' + + name = self._task.args.get('name', None) + state = self._task.args.get('state', None) + module = self._task.args.get('use', None) + + if module is None: + try: + module = self._templar.template('{{ansible_pkg_mgr}}') + except: + pass # could not get it from template! + + if moduel is None: + #TODO: autodetect the package manager, by invoking that specific fact snippet remotely + pass + + + if module is not None: + # run the 'package' module + new_module_args = self._task.args.copy() + if 'use' in new_module_args: + del new_module_args['use'] + + return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars) + + else: + + return {'failed': True, 'msg': 'Could not detect which package manager to use. Try gathering facts or setting the "use" option.'} From 45f80328ae9d1fbe37cc140f84f94c03c3a6f761 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Jun 2015 20:14:47 -0400 Subject: [PATCH 0806/3617] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b138411671194e..d6ed6113a77a6e 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b138411671194e3ec236d8ec3d27bcf32447350d +Subproject commit d6ed6113a77a6e327cf12d3955022321c5b12efe diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 1276420a3a3934..57813a2e746aa7 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 1276420a3a39340fcd9e053a1e621cdd89f480fa +Subproject commit 57813a2e746aa79db6b6b1ef321b8c9a9345359a From 8e3213a91eb25a4415c1743df933fe07c1e3a334 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Jun 2015 20:20:07 -0400 Subject: [PATCH 0807/3617] updated copyright as MPD does not deserve the blame for this one --- lib/ansible/plugins/action/package.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py index fbda51fcbb3b10..d21774d85cd0d9 100644 --- a/lib/ansible/plugins/action/package.py +++ b/lib/ansible/plugins/action/package.py @@ -1,4 +1,4 @@ -# (c) 2015, Michael DeHaan +# (c) 2015, Ansible Inc, # # This file is part of Ansible # From 64ffa160dc6765700a9e5b5c2b544ba70da3bd76 Mon Sep 17 00:00:00 2001 From: joshainglis Date: Tue, 9 Jun 2015 11:05:20 +1000 Subject: [PATCH 0808/3617] Fixed shebang in module example --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index ddd4e90c82a3a3..9e784c6418e688 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -370,7 +370,7 @@ See an example documentation string in the checkout under `examples/DOCUMENTATIO Include it in your module file like this:: - #!/usr/bin/env python + #!/usr/bin/python # Copyright header.... DOCUMENTATION = ''' From 6fa7a1149367969baed582b583b7216db1b1a624 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Jun 2015 10:03:39 -0400 Subject: [PATCH 0809/3617] added iam_policy --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 85bb0e3ca9d069..23a0f8e21954e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ New Modules: * amazon: elasticache_subnet_group * amazon: ec2_win_password * amazon: iam + * amazon: iam_policy * circonus_annotation * consul * consul_acl From fc3020c57a55fc009feeb80b54186c695edc3233 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Tue, 9 Jun 2015 16:16:58 +0200 Subject: [PATCH 0810/3617] cloudstack: prevent getting the wrong project. Since we use domain and account data to filter the project, listall is not needed and can return the wrong identical named project of another account if root admin permissions are used. Fixed projects names are not case insensitive. --- lib/ansible/module_utils/cloudstack.py | 4 ++-- v1/ansible/module_utils/cloudstack.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 82306b9a0be87d..86ccef588e375d 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -124,13 +124,12 @@ def get_project(self, key=None): if not project: return None args = {} - args['listall'] = True args['account'] = self.get_account(key='name') args['domainid'] = self.get_domain(key='id') projects = self.cs.listProjects(**args) if projects: for p in projects['project']: - if project in [ p['name'], p['displaytext'], p['id'] ]: + if project.lower() in [ p['name'].lower(), p['id'] ]: self.project = p return self._get_by_key(key, self.project) self.module.fail_json(msg="project '%s' not found" % project) @@ -361,6 +360,7 @@ def get_capabilities(self, key=None): self.capabilities = capabilities['capability'] return self._get_by_key(key, self.capabilities) + # TODO: rename to poll_job() def _poll_job(self, job=None, key=None): if 'jobid' in job: diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index e887367c2fd69b..2b4ec0be17dd22 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -122,13 +122,12 @@ def get_project(self, key=None): if not project: return None args = {} - args['listall'] = True args['account'] = self.get_account(key='name') args['domainid'] = self.get_domain(key='id') projects = self.cs.listProjects(**args) if projects: for p in projects['project']: - if project in [ p['name'], p['displaytext'], p['id'] ]: + if project.lower() in [ p['name'].lower(), p['id'] ]: self.project = p return self._get_by_key(key, self.project) self.module.fail_json(msg="project '%s' not found" % project) @@ -359,6 +358,7 @@ def get_capabilities(self, key=None): self.capabilities = capabilities['capability'] return self._get_by_key(key, self.capabilities) + # TODO: rename to poll_job() def _poll_job(self, job=None, key=None): if 'jobid' in job: From 19161dfd72500149b94bdd78f030b1311b390dab Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Jun 2015 11:45:53 -0400 Subject: [PATCH 0811/3617] fixed typo in placeholder check --- lib/ansible/plugins/action/package.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py index d21774d85cd0d9..89ac1b026c02b1 100644 --- a/lib/ansible/plugins/action/package.py +++ b/lib/ansible/plugins/action/package.py @@ -37,7 +37,7 @@ def run(self, tmp=None, task_vars=dict()): except: pass # could not get it from template! - if moduel is None: + if module is None: #TODO: autodetect the package manager, by invoking that specific fact snippet remotely pass From 652daf3db4c3f780d6cea6f2002460471df8981f Mon Sep 17 00:00:00 2001 From: Dave James Miller Date: Tue, 9 Jun 2015 19:48:38 +0100 Subject: [PATCH 0812/3617] Remove duplicated "By default" in docs --- docsite/rst/intro_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 6dcaff008b5a07..d97032e0635dc6 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -216,7 +216,7 @@ mentioned:: ansible_ssh_private_key_file Private key file used by ssh. Useful if using multiple keys and you don't want to use SSH agent. ansible_shell_type - The shell type of the target system. By default commands are formatted using 'sh'-style syntax by default. Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. + The shell type of the target system. Commands are formatted using 'sh'-style syntax by default. Setting this to 'csh' or 'fish' will cause commands executed on target systems to follow those shell's syntax instead. ansible_python_interpreter The target host python path. This is useful for systems with more than one Python or not located at "/usr/bin/python" such as \*BSD, or where /usr/bin/python From 5aec5e5eb0bd5fce426df580c76dbff7c741c933 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Jun 2015 17:24:06 -0400 Subject: [PATCH 0813/3617] fixed ansible pull, reorged validate function for cli to be function specific like parser added missing cmd_functions with run_cmd, mostly for ansible pull --- lib/ansible/cli/__init__.py | 45 ++++++++++++----------- lib/ansible/cli/adhoc.py | 2 +- lib/ansible/cli/playbook.py | 3 +- lib/ansible/cli/pull.py | 18 +++++---- lib/ansible/utils/cmd_functions.py | 59 ++++++++++++++++++++++++++++++ 5 files changed, 97 insertions(+), 30 deletions(-) create mode 100644 lib/ansible/utils/cmd_functions.py diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index daf14aab1f7aaa..c2ae98b1b8096a 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -174,32 +174,34 @@ def normalize_become_options(self): options.become_method = 'su' - def validate_conflicts(self): + def validate_conflicts(self, vault_opts=False, runas_opts=False): ''' check for conflicting options ''' op = self.options - # Check for vault related conflicts - if (op.ask_vault_pass and op.vault_password_file): - self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - + if vault_opts: + # Check for vault related conflicts + if (op.ask_vault_pass and op.vault_password_file): + self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - # Check for privilege escalation conflicts - if (op.su or op.su_user or op.ask_su_pass) and \ - (op.sudo or op.sudo_user or op.ask_sudo_pass) or \ - (op.su or op.su_user or op.ask_su_pass) and \ - (op.become or op.become_user or op.become_ask_pass) or \ - (op.sudo or op.sudo_user or op.ask_sudo_pass) and \ - (op.become or op.become_user or op.become_ask_pass): - self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') " - "and become arguments ('--become', '--become-user', and '--ask-become-pass')" - " are exclusive of each other") + if runas_opts: + # Check for privilege escalation conflicts + if (op.su or op.su_user or op.ask_su_pass) and \ + (op.sudo or op.sudo_user or op.ask_sudo_pass) or \ + (op.su or op.su_user or op.ask_su_pass) and \ + (op.become or op.become_user or op.become_ask_pass) or \ + (op.sudo or op.sudo_user or op.ask_sudo_pass) and \ + (op.become or op.become_user or op.become_ask_pass): + + self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " + "and su arguments ('-su', '--su-user', and '--ask-su-pass') " + "and become arguments ('--become', '--become-user', and '--ask-become-pass')" + " are exclusive of each other") @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, - async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None): + async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None, fork_opts=False): ''' create an options parser for most ansible scripts ''' #FIXME: implemente epilog parsing @@ -211,8 +213,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)") if runtask_opts: - parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', - help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) parser.add_option('-i', '--inventory-file', dest='inventory', help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, default=C.DEFAULT_HOST_LIST) @@ -223,6 +223,10 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) + if fork_opts: + parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', + help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) + if vault_opts: parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', help='ask for vault password') @@ -273,7 +277,7 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, if connect_opts: parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true', help='ask for connection password') - parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', + parser.add_option('--private-key','--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file', help='use this file to authenticate the connection') parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user', help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER) @@ -282,7 +286,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout', help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT) - if async_opts: parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval', diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 9a055e5e625c43..0d63a562842483 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -60,7 +60,7 @@ def parse(self): raise AnsibleOptionsError("Missing target hosts") self.display.verbosity = self.options.verbosity - self.validate_conflicts() + self.validate_conflicts(runas_opts=True, vault_opts=True) return True diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index 1c59d5dde6ff64..e10ffb71d0b72e 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -55,6 +55,7 @@ def parse(self): diff_opts=True, runtask_opts=True, vault_opts=True, + fork_opts=True, ) # ansible playbook specific opts @@ -76,7 +77,7 @@ def parse(self): raise AnsibleOptionsError("You must specify a playbook file to run") self.display.verbosity = self.options.verbosity - self.validate_conflicts() + self.validate_conflicts(runas_opts=True, vault_opts=True) def run(self): diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 6b087d4ec060ce..0275a8c3475447 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -21,12 +21,15 @@ import random import shutil import socket +import sys from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.cli import CLI from ansible.utils.display import Display from ansible.utils.vault import read_vault_file +from ansible.utils.plugins import module_finder +from ansible.utils.cmd_functions import run_cmd ######################################################## @@ -48,6 +51,7 @@ def parse(self): usage='%prog [options]', connect_opts=True, vault_opts=True, + runtask_opts=True, ) # options unique to pull @@ -87,7 +91,7 @@ def parse(self): raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES))) self.display.verbosity = self.options.verbosity - self.validate_conflicts() + self.validate_conflicts(vault_opts=True) def run(self): ''' use Runner lib to do SSH things ''' @@ -120,12 +124,12 @@ def run(self): if self.options.accept_host_key: repo_opts += ' accept_hostkey=yes' - if self.options.key_file: - repo_opts += ' key_file=%s' % options.key_file + if self.options.private_key_file: + repo_opts += ' key_file=%s' % self.options.private_key_file - path = utils.plugins.module_finder.find_plugin(options.module_name) + path = module_finder.find_plugin(self.options.module_name) if path is None: - raise AnsibleOptionsError(("module '%s' not found.\n" % options.module_name)) + raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(__file__)) cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( @@ -141,7 +145,7 @@ def run(self): time.sleep(self.options.sleep); # RUN the Checkout command - rc, out, err = cmd_functions.run_cmd(cmd, live=True) + rc, out, err = run_cmd(cmd, live=True) if rc != 0: if self.options.force: @@ -173,7 +177,7 @@ def run(self): os.chdir(self.options.dest) # RUN THE PLAYBOOK COMMAND - rc, out, err = cmd_functions.run_cmd(cmd, live=True) + rc, out, err = run_cmd(cmd, live=True) if self.options.purge: os.chdir('/') diff --git a/lib/ansible/utils/cmd_functions.py b/lib/ansible/utils/cmd_functions.py new file mode 100644 index 00000000000000..7cb1912d07cbe4 --- /dev/null +++ b/lib/ansible/utils/cmd_functions.py @@ -0,0 +1,59 @@ +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import sys +import shlex +import subprocess +import select + +def run_cmd(cmd, live=False, readsize=10): + + #readsize = 10 + + cmdargs = shlex.split(cmd) + p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + stdout = '' + stderr = '' + rpipes = [p.stdout, p.stderr] + while True: + rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) + + if p.stdout in rfd: + dat = os.read(p.stdout.fileno(), readsize) + if live: + sys.stdout.write(dat) + stdout += dat + if dat == '': + rpipes.remove(p.stdout) + if p.stderr in rfd: + dat = os.read(p.stderr.fileno(), readsize) + stderr += dat + if live: + sys.stdout.write(dat) + if dat == '': + rpipes.remove(p.stderr) + # only break out if we've emptied the pipes, or there is nothing to + # read from and the process has finished. + if (not rpipes or not rfd) and p.poll() is not None: + break + # Calling wait while there are still pipes to read can cause a lock + elif not rpipes and p.poll() == None: + p.wait() + + return p.returncode, stdout, stderr From fdeca3725785f9e5ee6554b05852f927f1cc8e82 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Jun 2015 17:29:46 -0400 Subject: [PATCH 0814/3617] switched to argv[0] from __file__ as it is what we actually wanted --- bin/ansible | 2 +- lib/ansible/cli/pull.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/ansible b/bin/ansible index 12ad89fcff3797..8fbc509047120b 100755 --- a/bin/ansible +++ b/bin/ansible @@ -44,7 +44,7 @@ if __name__ == '__main__': cli = None display = Display() - me = os.path.basename(__file__) + me = os.path.basename(sys.argv[0]) try: if me == 'ansible-playbook': diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 0275a8c3475447..76cba0749fb47c 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -131,7 +131,7 @@ def run(self): if path is None: raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) - bin_path = os.path.dirname(os.path.abspath(__file__)) + bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( bin_path, inv_opts, base_opts, self.options.module_name, repo_opts ) @@ -144,6 +144,8 @@ def run(self): self.display.display("Sleeping for %d seconds..." % self.options.sleep) time.sleep(self.options.sleep); + import q + q(cmd) # RUN the Checkout command rc, out, err = run_cmd(cmd, live=True) From 845d564d899d432b36f3296bfb517931a142a9ff Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Jun 2015 17:32:34 -0400 Subject: [PATCH 0815/3617] removed debug, moved limit to runtask instead section --- lib/ansible/cli/__init__.py | 4 ++-- lib/ansible/cli/pull.py | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index c2ae98b1b8096a..c1108d08a52981 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -222,6 +222,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None) parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) + parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', + help='further limit selected hosts to an additional pattern') if fork_opts: parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', @@ -235,8 +237,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, if subset_opts: - parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', - help='further limit selected hosts to an additional pattern') parser.add_option('-t', '--tags', dest='tags', default='all', help="only run plays and tasks tagged with these values") parser.add_option('--skip-tags', dest='skip_tags', diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 76cba0749fb47c..0c28a20248d13d 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -144,8 +144,6 @@ def run(self): self.display.display("Sleeping for %d seconds..." % self.options.sleep) time.sleep(self.options.sleep); - import q - q(cmd) # RUN the Checkout command rc, out, err = run_cmd(cmd, live=True) From 24b7c353cc970069b216ffe62148f2af06265047 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Jun 2015 17:35:19 -0400 Subject: [PATCH 0816/3617] readjusted limit opts, makes no sense in adhoc when you already specify selection changed pull to reflect this --- lib/ansible/cli/__init__.py | 4 ++-- lib/ansible/cli/adhoc.py | 1 + lib/ansible/cli/pull.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index c1108d08a52981..5be92683824e62 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -222,12 +222,12 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None) parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) - parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', - help='further limit selected hosts to an additional pattern') if fork_opts: parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int', help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) + parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset', + help='further limit selected hosts to an additional pattern') if vault_opts: parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 0d63a562842483..3607e3ee03d91b 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -45,6 +45,7 @@ def parse(self): check_opts=True, runtask_opts=True, vault_opts=True, + fork_opts=True, ) # options unique to ansible ad-hoc diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 0c28a20248d13d..c78540eeb21379 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -104,7 +104,7 @@ def run(self): # Build Checkout command # Now construct the ansible command limit_opts = 'localhost:%s:127.0.0.1' % socket.getfqdn() - base_opts = '-c local --limit "%s"' % limit_opts + base_opts = '-c local "%s"' % limit_opts if self.options.verbosity > 0: base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ]) @@ -132,7 +132,7 @@ def run(self): raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) - cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % ( + cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % ( bin_path, inv_opts, base_opts, self.options.module_name, repo_opts ) From 757fb39a2ed1c940cd894fa26a5d9689d07e317a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 9 Jun 2015 21:35:44 -0400 Subject: [PATCH 0817/3617] now uses new module_loader --- lib/ansible/cli/pull.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index c78540eeb21379..0d37568e20e344 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -26,9 +26,9 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.cli import CLI +from ansible.plugins import module_loader from ansible.utils.display import Display from ansible.utils.vault import read_vault_file -from ansible.utils.plugins import module_finder from ansible.utils.cmd_functions import run_cmd ######################################################## @@ -127,7 +127,7 @@ def run(self): if self.options.private_key_file: repo_opts += ' key_file=%s' % self.options.private_key_file - path = module_finder.find_plugin(self.options.module_name) + path = module_loader.find_plugin(self.options.module_name) if path is None: raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) From 312e79ccd51ab5809b649952b2be38330227bfe0 Mon Sep 17 00:00:00 2001 From: Artur Cygan Date: Wed, 10 Jun 2015 15:42:30 +0200 Subject: [PATCH 0818/3617] Update README.md There are over 1000 contributors now :) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a7d8e03af7181..8bfc18c7ca4028 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Branch Info Authors ======= -Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 900 users (and growing). Thanks everyone! +Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 1000 users (and growing). Thanks everyone! Ansible is sponsored by [Ansible, Inc](http://ansible.com) From 6f11896303248b7a167021f5c33502ca4f48af56 Mon Sep 17 00:00:00 2001 From: Dionysis Grigoropoulos Date: Wed, 10 Jun 2015 10:27:25 +0300 Subject: [PATCH 0819/3617] ansible-pull: Add option to verify gpg signature of a commit Add option '--verify-commit' to verify a GPG signature of the checked out commit. As noted in the git module documentantion, this requires git version >= 2.1.0 --- lib/ansible/cli/pull.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 0d37568e20e344..ff8103a1df631b 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -70,7 +70,9 @@ def parse(self): help='adds the hostkey for the repo url if not already added') self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE, help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE) - + self.parser.add_option('--verify-commit', dest='verify', default=False, action='store_true', + help='verify GPG signature of checked out commit, if it fails abort running the playbook.' + ' This needs the corresponding VCS module to support such an operation') self.options, self.args = self.parser.parse_args() @@ -127,6 +129,9 @@ def run(self): if self.options.private_key_file: repo_opts += ' key_file=%s' % self.options.private_key_file + if self.options.verify: + repo_opts += ' verify_commit=yes' + path = module_loader.find_plugin(self.options.module_name) if path is None: raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) From 7b3dd55c3d6dbd5ca3d7d37276d8c43d2791eeed Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 17:28:45 +0200 Subject: [PATCH 0820/3617] cloudstack: remove unused methods used for backward compatibility --- lib/ansible/module_utils/cloudstack.py | 25 ------------------------- v1/ansible/module_utils/cloudstack.py | 25 ------------------------- 2 files changed, 50 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 86ccef588e375d..39e02107fff1d4 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -111,11 +111,6 @@ def _get_by_key(self, key=None, my_dict={}): return my_dict - # TODO: for backward compatibility only, remove if not used anymore - def get_project_id(self): - return self.get_project(key='id') - - def get_project(self, key=None): if self.project: return self._get_by_key(key, self.project) @@ -135,11 +130,6 @@ def get_project(self, key=None): self.module.fail_json(msg="project '%s' not found" % project) - # TODO: for backward compatibility only, remove if not used anymore - def get_ip_address_id(self): - return self.get_ip_address(key='id') - - def get_ip_address(self, key=None): if self.ip_address: return self._get_by_key(key, self.ip_address) @@ -162,11 +152,6 @@ def get_ip_address(self, key=None): return self._get_by_key(key, self.ip_address) - # TODO: for backward compatibility only, remove if not used anymore - def get_vm_id(self): - return self.get_vm(key='id') - - def get_vm(self, key=None): if self.vm: return self._get_by_key(key, self.vm) @@ -189,11 +174,6 @@ def get_vm(self, key=None): self.module.fail_json(msg="Virtual machine '%s' not found" % vm) - # TODO: for backward compatibility only, remove if not used anymore - def get_zone_id(self): - return self.get_zone(key='id') - - def get_zone(self, key=None): if self.zone: return self._get_by_key(key, self.zone) @@ -214,11 +194,6 @@ def get_zone(self, key=None): self.module.fail_json(msg="zone '%s' not found" % zone) - # TODO: for backward compatibility only, remove if not used anymore - def get_os_type_id(self): - return self.get_os_type(key='id') - - def get_os_type(self, key=None): if self.os_type: return self._get_by_key(key, self.zone) diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index 2b4ec0be17dd22..973ce24f8e76e4 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -109,11 +109,6 @@ def _get_by_key(self, key=None, my_dict={}): return my_dict - # TODO: for backward compatibility only, remove if not used anymore - def get_project_id(self): - return self.get_project(key='id') - - def get_project(self, key=None): if self.project: return self._get_by_key(key, self.project) @@ -133,11 +128,6 @@ def get_project(self, key=None): self.module.fail_json(msg="project '%s' not found" % project) - # TODO: for backward compatibility only, remove if not used anymore - def get_ip_address_id(self): - return self.get_ip_address(key='id') - - def get_ip_address(self, key=None): if self.ip_address: return self._get_by_key(key, self.ip_address) @@ -160,11 +150,6 @@ def get_ip_address(self, key=None): return self._get_by_key(key, self.ip_address) - # TODO: for backward compatibility only, remove if not used anymore - def get_vm_id(self): - return self.get_vm(key='id') - - def get_vm(self, key=None): if self.vm: return self._get_by_key(key, self.vm) @@ -187,11 +172,6 @@ def get_vm(self, key=None): self.module.fail_json(msg="Virtual machine '%s' not found" % vm) - # TODO: for backward compatibility only, remove if not used anymore - def get_zone_id(self): - return self.get_zone(key='id') - - def get_zone(self, key=None): if self.zone: return self._get_by_key(key, self.zone) @@ -212,11 +192,6 @@ def get_zone(self, key=None): self.module.fail_json(msg="zone '%s' not found" % zone) - # TODO: for backward compatibility only, remove if not used anymore - def get_os_type_id(self): - return self.get_os_type(key='id') - - def get_os_type(self, key=None): if self.os_type: return self._get_by_key(key, self.zone) From 0b074c449b1c5c0483470a4df623232eb9682609 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 17:31:46 +0200 Subject: [PATCH 0821/3617] cloudstack: methods renaming --- lib/ansible/module_utils/cloudstack.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 39e02107fff1d4..13d4c59a0149c9 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -77,8 +77,12 @@ def _connect(self): else: self.cs = CloudStack(**read_config()) - # TODO: rename to has_changed() + # TODO: for backward compatibility only, remove if not used anymore def _has_changed(self, want_dict, current_dict, only_keys=None): + return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys) + + + def has_changed(self, want_dict, current_dict, only_keys=None): for key, value in want_dict.iteritems(): # Optionally limit by a list of keys @@ -336,8 +340,12 @@ def get_capabilities(self, key=None): return self._get_by_key(key, self.capabilities) - # TODO: rename to poll_job() + # TODO: for backward compatibility only, remove if not used anymore def _poll_job(self, job=None, key=None): + return self.poll_job(job=job, key=key) + + + def poll_job(self, job=None, key=None): if 'jobid' in job: while True: res = self.cs.queryAsyncJobResult(jobid=job['jobid']) From 39764ed7d8834876de3d50779df3d8308c9d8d5d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 12:56:22 -0400 Subject: [PATCH 0822/3617] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index d6ed6113a77a6e..9acc7c402f7297 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit d6ed6113a77a6e327cf12d3955022321c5b12efe +Subproject commit 9acc7c402f729748205e78f2b66b8f25b7552e37 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 57813a2e746aa7..5d1d8a6a984a34 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 57813a2e746aa79db6b6b1ef321b8c9a9345359a +Subproject commit 5d1d8a6a984a34ae0e7457f72a33a7222d9d6492 From d68111382d62c35a7b9cf11bccd04c5d130a0cfb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 13:00:01 -0400 Subject: [PATCH 0823/3617] updated with nagios doc fix --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 5d1d8a6a984a34..2f967a949f9a45 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 5d1d8a6a984a34ae0e7457f72a33a7222d9d6492 +Subproject commit 2f967a949f9a45657c31ae66c0c7e7c2672a87d8 From 6eb96c1a56fec6557becec8ba822eeeb708243ec Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 17:35:30 +0200 Subject: [PATCH 0824/3617] cloudstack: methods renaming --- v1/ansible/module_utils/cloudstack.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index 973ce24f8e76e4..ddb08e9f9cd16d 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -75,8 +75,12 @@ def _connect(self): else: self.cs = CloudStack(**read_config()) - # TODO: rename to has_changed() + # TODO: for backward compatibility only, remove if not used anymore def _has_changed(self, want_dict, current_dict, only_keys=None): + return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys) + + + def has_changed(self, want_dict, current_dict, only_keys=None): for key, value in want_dict.iteritems(): # Optionally limit by a list of keys @@ -334,8 +338,12 @@ def get_capabilities(self, key=None): return self._get_by_key(key, self.capabilities) - # TODO: rename to poll_job() + # TODO: for backward compatibility only, remove if not used anymore def _poll_job(self, job=None, key=None): + return self.poll_job(job=job, key=key) + + + def poll_job(self, job=None, key=None): if 'jobid' in job: while True: res = self.cs.queryAsyncJobResult(jobid=job['jobid']) From 034228f64b48077707871a1b008999d9290e8c76 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 20:31:26 +0200 Subject: [PATCH 0825/3617] cloudstack: add missing api_timeout into v1 --- v1/ansible/module_utils/cloudstack.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index ddb08e9f9cd16d..13d4c59a0149c9 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -64,12 +64,14 @@ def _connect(self): api_secret = self.module.params.get('secret_key') api_url = self.module.params.get('api_url') api_http_method = self.module.params.get('api_http_method') + api_timeout = self.module.params.get('api_timeout') if api_key and api_secret and api_url: self.cs = CloudStack( endpoint=api_url, key=api_key, secret=api_secret, + timeout=api_timeout, method=api_http_method ) else: From deb741240e8915b982a5a4ddb3f55831012d42af Mon Sep 17 00:00:00 2001 From: Philip Stephens Date: Wed, 10 Jun 2015 16:36:26 -0700 Subject: [PATCH 0826/3617] Update playbooks_vault.rst As of 1.9 at least, you may specify a password file in your ansible.cfg and not have to extend your playbook calls with vault flags. --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 921a05c50edcd1..25dae8f5f3b90a 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -5,7 +5,7 @@ Vault New in Ansible 1.5, "Vault" is a feature of ansible that allows keeping sensitive data such as passwords or keys in encrypted files, rather than as plaintext in your playbooks or roles. These vault files can then be distributed or placed in source control. -To enable this feature, a command line tool, `ansible-vault` is used to edit files, and a command line flag `--ask-vault-pass` or `--vault-password-file` is used. +To enable this feature, a command line tool, `ansible-vault` is used to edit files, and a command line flag `--ask-vault-pass` or `--vault-password-file` is used. Alternately, you may specify the location of a password file in your ansible.cfg file. This option requires no command line flag usage. .. _what_can_be_encrypted_with_vault: From 7306a5397ed770d6d2069b51bf6fc92ad0de7313 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 22:55:50 -0400 Subject: [PATCH 0827/3617] simplified function, always attempt to template, always check if string before return, should avoid most cases of strings being passed to lookups --- lib/ansible/utils/listify.py | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py index a26b4b98295a43..c8fc97bed79e2b 100644 --- a/lib/ansible/utils/listify.py +++ b/lib/ansible/utils/listify.py @@ -33,34 +33,13 @@ def listify_lookup_plugin_terms(terms, variables, loader): if isinstance(terms, basestring): - # someone did: - # with_items: alist - # OR - # with_items: {{ alist }} - stripped = terms.strip() templar = Templar(loader=loader, variables=variables) - if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/") and not stripped.startswith('set([') and not LOOKUP_REGEX.search(terms): - # if not already a list, get ready to evaluate with Jinja2 - # not sure why the "/" is in above code :) - try: - new_terms = templar.template("{{ %s }}" % terms) - if isinstance(new_terms, basestring) and "{{" in new_terms: - pass - else: - terms = new_terms - except: - pass - else: - terms = templar.template(terms) + terms = templar.template(terms, convert_bare=True) - if '{' in terms or '[' in terms: - # Jinja2 already evaluated a variable to a list. - # Jinja2-ified list needs to be converted back to a real type - return safe_eval(terms) + terms = safe_eval(terms) if isinstance(terms, basestring): terms = [ terms ] return terms - From 40336b50af3dc61a56b6770f5271a2dc5d7197f4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 22:58:08 -0400 Subject: [PATCH 0828/3617] removed redundant string check added playbook path lookup --- lib/ansible/plugins/lookup/file.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py index 30247c150ce879..76a12eb86ba61d 100644 --- a/lib/ansible/plugins/lookup/file.py +++ b/lib/ansible/plugins/lookup/file.py @@ -27,9 +27,6 @@ class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): - if not isinstance(terms, list): - terms = [ terms ] - ret = [] for term in terms: basedir_path = self._loader.path_dwim(term) @@ -43,13 +40,13 @@ def run(self, terms, variables=None, **kwargs): # itself (which will be relative to the current working dir) if 'role_path' in variables: - relative_path = self._loader.path_dwim_relative(variables['role_path'], 'files', term, check=False) + relative_path = self._loader.path_dwim_relative(variables['role_path'], 'files', term) # FIXME: the original file stuff still needs to be worked out, but the # playbook_dir stuff should be able to be removed as it should # be covered by the fact that the loader contains that info - #if 'playbook_dir' in variables: - # playbook_path = os.path.join(variables['playbook_dir'], term) + if 'playbook_dir' in variables: + playbook_path = self._loader.path_dwim_relative(variables['playbook_dir'],'files', term) for path in (basedir_path, relative_path, playbook_path): try: From f29c1c7452c1b387e5719197fc8b68ac7eb4ad12 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 23:26:01 -0400 Subject: [PATCH 0829/3617] respect undefined config setting --- lib/ansible/executor/playbook_executor.py | 2 +- lib/ansible/executor/task_queue_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 5e339e40313417..0c18ad3c893ade 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -81,7 +81,7 @@ def run(self): # Create a temporary copy of the play here, so we can run post_validate # on it without the templating changes affecting the original object. all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) - templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False) + templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index b8ca4273702171..debcf6873d8e70 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -224,7 +224,7 @@ def run(self, play): play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) - templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False) + templar = Templar(loader=self._loader, variables=all_vars) new_play = play.copy() new_play.post_validate(templar) From 7291f9e96586b2ffa9f0bd110d62b5b0477d0fd6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 00:13:40 -0400 Subject: [PATCH 0830/3617] removed cruft made sure it does not fail on undefined --- lib/ansible/utils/listify.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py index c8fc97bed79e2b..dfc80120423245 100644 --- a/lib/ansible/utils/listify.py +++ b/lib/ansible/utils/listify.py @@ -19,24 +19,23 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from six import iteritems, string_types - -import re from ansible.template import Templar from ansible.template.safe_eval import safe_eval __all__ = ['listify_lookup_plugin_terms'] -LOOKUP_REGEX = re.compile(r'lookup\s*\(') - +#FIXME: probably just move this into lookup plugin base class def listify_lookup_plugin_terms(terms, variables, loader): if isinstance(terms, basestring): stripped = terms.strip() templar = Templar(loader=loader, variables=variables) - terms = templar.template(terms, convert_bare=True) + #FIXME: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override + terms = templar.template(terms, convert_bare=True, fail_on_undefined=False) + + #TODO: check if this is needed as template should also return correct type already terms = safe_eval(terms) if isinstance(terms, basestring): From 4098e8283e8cf7c13ced8c04796d838caf304c81 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 00:21:53 -0400 Subject: [PATCH 0831/3617] several fixes to template - now obeys global undefined var setting and allows override (mostly for with_ ) - moved environment instanciation to init instead of each template call - removed hardcoded template token matching and now use actually configured tokens, now it won't break if someone changes default configs in ansible.cfg - made reenetrant template calls now pass the same data it got, dictionary and lists were loosing existing and new params - moved fail_on_undeinfed parameter to template call, as it should only realky be set to false on specific templates and not globally - added overrides, which will allow template to implement jinja2 header override features - added filter list to overrides to disallow possibly insecure ones, TODO: check if this is still needed as facts should not be templated anymore - TODO: actually implement jinja2 header overrides --- lib/ansible/template/__init__.py | 51 ++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 00bc386f268513..0cbae466946305 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -40,20 +40,19 @@ # A regex for checking to see if a variable we're trying to # expand is just a single variable name. -SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$") # Primitive Types which we don't want Jinja to convert to strings. NON_TEMPLATED_TYPES = ( bool, Number ) JINJA2_OVERRIDE = '#jinja2:' -JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline'] +JINJA2_ALLOWED_OVERRIDES = frozenset(['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']) class Templar: ''' The main class for templating, with the main entry-point of template(). ''' - def __init__(self, loader, shared_loader_obj=None, variables=dict(), fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR): + def __init__(self, loader, shared_loader_obj=None, variables=dict()): self._loader = loader self._basedir = loader.get_basedir() self._filters = None @@ -70,7 +69,12 @@ def __init__(self, loader, shared_loader_obj=None, variables=dict(), fail_on_und # should result in fatal errors being raised self._fail_on_lookup_errors = True self._fail_on_filter_errors = True - self._fail_on_undefined_errors = fail_on_undefined + self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR + + self.environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize) + self.environment.template_class = AnsibleJ2Template + + self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string)) def _count_newlines_from_end(self, in_str): ''' @@ -129,7 +133,7 @@ def set_available_variables(self, variables): assert isinstance(variables, dict) self._available_variables = variables.copy() - def template(self, variable, convert_bare=False, preserve_trailing_newlines=False): + def template(self, variable, convert_bare=False, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None): ''' Templates (possibly recursively) any given data as input. If convert_bare is set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}') @@ -147,7 +151,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals # Check to see if the string we are trying to render is just referencing a single # var. In this case we don't want to accidentally change the type of the variable # to a string by using the jinja template renderer. We just want to pass it. - only_one = SINGLE_VAR.match(variable) + only_one = self.SINGLE_VAR.match(variable) if only_one: var_name = only_one.group(1) if var_name in self._available_variables: @@ -155,10 +159,10 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals if isinstance(resolved_val, NON_TEMPLATED_TYPES): return resolved_val - result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines) + result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) # if this looks like a dictionary or list, convert it to such using the safe_eval method - if (result.startswith("{") and not result.startswith("{{")) or result.startswith("["): + if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or result.startswith("["): eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True) if eval_results[1] is None: result = eval_results[0] @@ -169,11 +173,11 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals return result elif isinstance(variable, (list, tuple)): - return [self.template(v, convert_bare=convert_bare) for v in variable] + return [self.template(v, convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) for v in variable] elif isinstance(variable, dict): d = {} for (k, v) in variable.iteritems(): - d[k] = self.template(v, convert_bare=convert_bare) + d[k] = self.template(v, convert_bare=convert_bare, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) return d else: return variable @@ -188,7 +192,7 @@ def _contains_vars(self, data): ''' returns True if the data contains a variable pattern ''' - return "$" in data or "{{" in data or '{%' in data + return self.environment.block_start_string in data or self.environment.variable_start_string in data def _convert_bare_variable(self, variable): ''' @@ -198,8 +202,8 @@ def _convert_bare_variable(self, variable): if isinstance(variable, basestring): first_part = variable.split(".")[0].split("[")[0] - if first_part in self._available_variables and '{{' not in variable and '$' not in variable: - return "{{%s}}" % variable + if first_part in self._available_variables and self.environment.variable_start_string not in variable: + return "%s%s%s" % (self.environment.variable_start_string, variable, self.environment.variable_end_string) # the variable didn't meet the conditions to be converted, # so just return it as-is @@ -230,16 +234,24 @@ def _lookup(self, name, *args, **kwargs): else: raise AnsibleError("lookup plugin (%s) not found" % name) - def _do_template(self, data, preserve_trailing_newlines=False): + def _do_template(self, data, preserve_trailing_newlines=False, fail_on_undefined=None, overrides=None): + + if fail_on_undefined is None: + fail_on_undefined = self._fail_on_undefined_errors try: + # allows template header overrides to change jinja2 options. + if overrides is None: + myenv = self.environment.overlay() + else: + overrides = JINJA2_ALLOWED_OVERRIDES.intersection(set(overrides)) + myenv = self.environment.overlay(overrides) - environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize) - environment.filters.update(self._get_filters()) - environment.template_class = AnsibleJ2Template + #FIXME: add tests + myenv.filters.update(self._get_filters()) try: - t = environment.from_string(data) + t = myenv.from_string(data) except TemplateSyntaxError, e: raise AnsibleError("template error while templating string: %s" % str(e)) except Exception, e: @@ -280,8 +292,9 @@ def _do_template(self, data, preserve_trailing_newlines=False): return res except (UndefinedError, AnsibleUndefinedVariable), e: - if self._fail_on_undefined_errors: + if fail_on_undefined: raise else: + #TODO: return warning about undefined var return data From f174682e1903e246c9f7389e2e76ffcca4a04c28 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 00:48:40 -0400 Subject: [PATCH 0832/3617] facts should now not be overriten with NA option unless they are NA this way we don't need a break per distro that matched already with the python default functions --- lib/ansible/module_utils/facts.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 3d39c736db6024..06da6d53e32607 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -417,13 +417,13 @@ def get_distribution_facts(self): self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1) elif name == 'Debian': data = get_file_content(path) - if 'Ubuntu' in data: - break # Ubuntu gets correct info from python functions - elif 'Debian' in data or 'Raspbian' in data: + if 'Debian' in data or 'Raspbian' in data: release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data) if release: self.facts['distribution_release'] = release.groups()[0] break + elif 'Ubuntu' in data: + break # Ubuntu gets correct info from python functions elif name == 'Mandriva': data = get_file_content(path) if 'Mandriva' in data: @@ -438,12 +438,15 @@ def get_distribution_facts(self): elif name == 'NA': data = get_file_content(path) for line in data.splitlines(): - distribution = re.search("^NAME=(.*)", line) - if distribution: - self.facts['distribution'] = distribution.group(1).strip('"') - version = re.search("^VERSION=(.*)", line) - if version: - self.facts['distribution_version'] = version.group(1).strip('"') + if self.facts['distribution'] == 'NA': + distribution = re.search("^NAME=(.*)", line) + if distribution: + self.facts['distribution'] = distribution.group(1).strip('"') + if self.facts['distribution_version'] == 'NA': + version = re.search("^VERSION=(.*)", line) + if version: + self.facts['distribution_version'] = version.group(1).strip('"') + if self.facts['distribution'].lower() == 'coreos': data = get_file_content('/etc/coreos/update.conf') release = re.search("^GROUP=(.*)", data) From ef6bd9afb0f51bf8d79bee7b733df50e4def978c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Thu, 11 Jun 2015 09:31:24 +0200 Subject: [PATCH 0833/3617] changelog: add cs_network --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 23a0f8e21954e6..82c87630b3bd0b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ New Modules: * cloudstack: cs_iso * cloudstack: cs_instance * cloudstack: cs_instancegroup + * cloudstack: cs_network * cloudstack: cs_portforward * cloudstack: cs_project * cloudstack: cs_sshkeypair From 0f68db2d7ecf3a2ce8273665dfc4e86295b85a13 Mon Sep 17 00:00:00 2001 From: sirkubax Date: Thu, 11 Jun 2015 11:51:35 +0200 Subject: [PATCH 0834/3617] Update ec2.ini Warning about usage boto+ec2.ini --- plugins/inventory/ec2.ini | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index 1866f0bf3d6c27..6583160f0f7b7d 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -35,6 +35,9 @@ destination_variable = public_dns_name # private subnet, this should be set to 'private_ip_address', and Ansible must # be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. +# WARNING: - instances that are in the private vpc, _without_ public ip address +# will not be listed in the inventory untill You set: +# vpc_destination_variable = 'private_ip_address' vpc_destination_variable = ip_address # To tag instances on EC2 with the resource records that point to them from From aed429554dc86385408133988da5caba44dce891 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 10:03:26 -0400 Subject: [PATCH 0835/3617] better checks to ensure listify emits a non string iterable --- lib/ansible/utils/listify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py index dfc80120423245..d8ef025e0bb9b7 100644 --- a/lib/ansible/utils/listify.py +++ b/lib/ansible/utils/listify.py @@ -38,7 +38,7 @@ def listify_lookup_plugin_terms(terms, variables, loader): #TODO: check if this is needed as template should also return correct type already terms = safe_eval(terms) - if isinstance(terms, basestring): + if isinstance(terms, basestring) or not isinstance(terms, list) and not isinstance(terms, set): terms = [ terms ] return terms From c346788194770c636c50af462b26000e81fc59c4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 11 Jun 2015 08:54:25 -0700 Subject: [PATCH 0836/3617] Slight optimization of how we squash loops. Add dnf to the list of modules for which we squash. Fixes #11235 --- lib/ansible/executor/task_executor.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 8de8f7027ab14c..ddd557f9998f93 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -48,6 +48,10 @@ class TaskExecutor: class. ''' + # Modules that we optimize by squashing loop items into a single call to + # the module + SQUASH_ACTIONS = frozenset(('apt', 'yum', 'pkgng', 'zypper', 'dnf')) + def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, shared_loader_obj): self._host = host self._task = task @@ -176,7 +180,7 @@ def _squash_items(self, items, variables): (typically package management modules). ''' - if len(items) > 0 and self._task.action in ('apt', 'yum', 'pkgng', 'zypper'): + if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS: final_items = [] for item in items: variables['item'] = item From 176b04a81242ff9aa6bf62a26a57d0b5b07f9467 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 11 Jun 2015 09:03:20 -0700 Subject: [PATCH 0837/3617] Correct typo --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 26d80ff7d330f6..a72340fde9010f 100644 --- a/tox.ini +++ b/tox.ini @@ -27,5 +27,5 @@ whitelist_externals = make commands = python -m compileall -fq -x 'lib/ansible/module_utils' lib make tests -deps = -r-r{toxinidir}/test-requirements.txt +deps = -r{toxinidir}/test-requirements.txt whitelist_externals = make From 31ef87eb724a6627236608105e02028beb8bea69 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 11 Jun 2015 09:05:44 -0700 Subject: [PATCH 0838/3617] Add dnf to list of modules that we squash loop items for --- v1/ansible/runner/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v1/ansible/runner/__init__.py b/v1/ansible/runner/__init__.py index 8b46683c37ec52..4ff273778caee5 100644 --- a/v1/ansible/runner/__init__.py +++ b/v1/ansible/runner/__init__.py @@ -740,7 +740,7 @@ def _executor_internal(self, host, new_stdin): if type(items) != list: raise errors.AnsibleError("lookup plugins have to return a list: %r" % items) - if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]: + if len(items) and utils.is_list_of_strings(items) and self.module_name in ( 'apt', 'yum', 'pkgng', 'zypper', 'dnf' ): # hack for apt, yum, and pkgng so that with_items maps back into a single module call use_these_items = [] for x in items: From 5d7dac6938c9664a5cb9a025e3e15b4682094edd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 13:11:09 -0400 Subject: [PATCH 0839/3617] added expect module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82c87630b3bd0b..b76d021d34eadc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ New Modules: * cloudstack: cs_securitygroup_rule * cloudstack: cs_vmsnapshot * datadog_monitor + * expect * find * maven_artifact * openstack: os_client_config From e9cf67004bd65ef10f9643116a53975b0e542bd0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 12:47:29 -0400 Subject: [PATCH 0840/3617] updated fail_on_undefined test to new function signatures --- test/units/template/test_templar.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/units/template/test_templar.py b/test/units/template/test_templar.py index ce40c73b0d0e87..6d2301fb9f9328 100644 --- a/test/units/template/test_templar.py +++ b/test/units/template/test_templar.py @@ -71,22 +71,24 @@ def test_templar_simple(self): self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'") self.assertEqual(templar.template("{{var_list}}"), [1]) self.assertEqual(templar.template(1, convert_bare=True), 1) + #FIXME: lookup ignores fake file and returns error + #self.assertEqual(templar.template("{{lookup('file', '/path/to/my_file.txt')}}"), "foo") + + # force errors self.assertRaises(UndefinedError, templar.template, "{{bad_var}}") - self.assertEqual(templar.template("{{lookup('file', '/path/to/my_file.txt')}}"), "foo") self.assertRaises(UndefinedError, templar.template, "{{lookup('file', bad_var)}}") self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}") self.assertRaises(AnsibleError, templar.template, "{{recursive}}") self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}") # test with fail_on_undefined=False - templar = Templar(loader=fake_loader, fail_on_undefined=False) - self.assertEqual(templar.template("{{bad_var}}"), "{{bad_var}}") + self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}") # test set_available_variables() templar.set_available_variables(variables=dict(foo="bam")) self.assertEqual(templar.template("{{foo}}"), "bam") # variables must be a dict() for set_available_variables() - self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam") + self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam") def test_template_jinja2_extensions(self): fake_loader = DictDataLoader({}) From 091caf6279cad1b9ed4ec19f4f21a750a67b36ce Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 13:03:25 -0400 Subject: [PATCH 0841/3617] added missing error class import --- test/units/mock/loader.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/units/mock/loader.py b/test/units/mock/loader.py index 8b6bbbbaf9cd99..f44df2efdbc3f4 100644 --- a/test/units/mock/loader.py +++ b/test/units/mock/loader.py @@ -21,6 +21,7 @@ import os +from ansible.errors import AnsibleParserError from ansible.parsing import DataLoader class DictDataLoader(DataLoader): From aaab69cae9c3029594f3865500420b271e15ce56 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 13:43:47 -0400 Subject: [PATCH 0842/3617] brought back terms testing as with_ is not only way to call and we cannot guarantee terms is a list otherwise. --- lib/ansible/plugins/lookup/file.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/plugins/lookup/file.py b/lib/ansible/plugins/lookup/file.py index 76a12eb86ba61d..b38c2eff555264 100644 --- a/lib/ansible/plugins/lookup/file.py +++ b/lib/ansible/plugins/lookup/file.py @@ -27,6 +27,9 @@ class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): + if not isinstance(terms, list): + terms = [ terms ] + ret = [] for term in terms: basedir_path = self._loader.path_dwim(term) From b9bb3e83b7f001ecca392f4ff51f913d495a69cf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 13:44:31 -0400 Subject: [PATCH 0843/3617] added new test that allows for listed bare strings now with_times: barestring, will error out in test --- test/integration/roles/test_lookups/tasks/main.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index f9970f70a29f70..44e8b18ccb4f75 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -125,9 +125,16 @@ - "bare_var.results[0].item == 1" - "bare_var.results[1].item == 2" +- name: use list with bare strings in it + debug: msg={{item}} + with_items: + - things2 + - things1 + - name: use list with undefined var in it debug: msg={{item}} with_items: things2 + ignore_errors: True # BUG #10073 nested template handling From 48c1064d0b1fe8972a863f176ae0f9c05144f92d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Gl=C3=A4ske?= Date: Fri, 12 Jun 2015 17:21:23 +0300 Subject: [PATCH 0844/3617] Update guide_gce.rst Make the docs more specific. --- docsite/rst/guide_gce.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index ed236544a3d14c..fbcab9ba2a4938 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -22,7 +22,7 @@ The GCE modules all require the apache-libcloud module, which you can install fr Credentials ----------- -To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command: +To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console `_ by going to the "APIs and Auth" section and choosing to create a new client ID for a service account. Once you've created a new client ID and downloaded (you must click **Generate new P12 Key**) the generated private key (in the `pkcs12 format `_), you'll need to convert the key by running the following command: .. code-block:: bash @@ -79,6 +79,8 @@ Create a file ``secrets.py`` looking like following, and put it in some folder w GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem') GCE_KEYWORD_PARAMS = {'project': 'project_id'} +Ensure to enter the email adress from the created services account and not the one from your main account. + Now the modules can be used as above, but the account information can be omitted. GCE Dynamic Inventory From a4e2d1eb623ae8a87cf74bfc2b6499808847e36b Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 12 Jun 2015 13:52:20 -0500 Subject: [PATCH 0845/3617] Require passlib over crypt in password_hash for Mac OS X/Darwin. Fixes #11244 --- lib/ansible/plugins/filter/core.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index 977d0947c38c61..a717c5bd817bce 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -42,6 +42,12 @@ from ansible.utils.hashing import md5s, checksum_s from ansible.utils.unicode import unicode_wrap, to_unicode +try: + import passlib.hash + HAS_PASSLIB = True +except: + HAS_PASSLIB = False + UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E') @@ -266,8 +272,15 @@ def get_encrypted_password(password, hashtype='sha512', salt=None): r = SystemRandom() salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)]) - saltstring = "$%s$%s" % (cryptmethod[hashtype],salt) - encrypted = crypt.crypt(password,saltstring) + if not HAS_PASSLIB: + if sys.platform.startswith('darwin'): + raise errors.AnsibleFilterError('|password_hash requires the passlib python module to generate password hashes on Mac OS X/Darwin') + saltstring = "$%s$%s" % (cryptmethod[hashtype],salt) + encrypted = crypt.crypt(password, saltstring) + else: + cls = getattr(passlib.hash, '%s_crypt' % hashtype) + encrypted = cls.encrypt(password, salt=salt) + return encrypted return None From 4161d78a94cf91f56370645dd54dda6a4b0ebdeb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 12 Jun 2015 12:24:23 -0700 Subject: [PATCH 0846/3617] Split the fetch_url() function into fetch_url and open_url(). open_url() is suitable for use outside of a module environment. Will let us use open_url to do SSL cert verification in other, non-module code. --- lib/ansible/module_utils/urls.py | 186 ++++++++++++++++++------------- 1 file changed, 110 insertions(+), 76 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 18317e86aeb8e3..2725980fcb53d9 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -26,12 +26,6 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -try: - import urllib - HAS_URLLIB = True -except: - HAS_URLLIB = False - try: import urllib2 HAS_URLLIB2 = True @@ -62,7 +56,9 @@ import httplib import os import re +import sys import socket +import platform import tempfile @@ -89,6 +85,27 @@ -----END CERTIFICATE----- """ +# +# Exceptions +# + +class ConnectionError(Exception): + """Failed to connect to the server""" + pass + +class ProxyError(ConnectionError): + """Failure to connect because of a proxy""" + pass + +class SSLValidationError(ConnectionError): + """Failure to connect due to SSL validation failing""" + pass + +class NoSSLError(SSLValidationError): + """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" + pass + + class CustomHTTPSConnection(httplib.HTTPSConnection): def connect(self): "Connect to a host on a given (SSL) port." @@ -153,7 +170,7 @@ def generic_urlparse(parts): username, password = auth.split(':', 1) generic_parts['username'] = username generic_parts['password'] = password - generic_parts['hostname'] = hostnme + generic_parts['hostname'] = hostname generic_parts['port'] = port except: generic_parts['username'] = None @@ -189,8 +206,7 @@ class SSLValidationHandler(urllib2.BaseHandler): ''' CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n" - def __init__(self, module, hostname, port): - self.module = module + def __init__(self, hostname, port): self.hostname = hostname self.port = port @@ -200,23 +216,22 @@ def get_ca_certs(self): ca_certs = [] paths_checked = [] - platform = get_platform() - distribution = get_distribution() + system = platform.system() # build a list of paths to check for .crt/.pem files # based on the platform type paths_checked.append('/etc/ssl/certs') - if platform == 'Linux': + if system == 'Linux': paths_checked.append('/etc/pki/ca-trust/extracted/pem') paths_checked.append('/etc/pki/tls/certs') paths_checked.append('/usr/share/ca-certificates/cacert.org') - elif platform == 'FreeBSD': + elif system == 'FreeBSD': paths_checked.append('/usr/local/share/certs') - elif platform == 'OpenBSD': + elif system == 'OpenBSD': paths_checked.append('/etc/ssl') - elif platform == 'NetBSD': + elif system == 'NetBSD': ca_certs.append('/etc/openssl/certs') - elif platform == 'SunOS': + elif system == 'SunOS': paths_checked.append('/opt/local/etc/openssl/certs') # fall back to a user-deployed cert in a standard @@ -226,7 +241,7 @@ def get_ca_certs(self): tmp_fd, tmp_path = tempfile.mkstemp() # Write the dummy ca cert if we are running on Mac OS X - if platform == 'Darwin': + if system == 'Darwin': os.write(tmp_fd, DUMMY_CA_CERT) # Default Homebrew path for OpenSSL certs paths_checked.append('/usr/local/etc/openssl') @@ -259,7 +274,7 @@ def validate_proxy_response(self, response, valid_codes=[200]): if int(resp_code) not in valid_codes: raise Exception except: - self.module.fail_json(msg='Connection to proxy failed') + raise ProxyError('Connection to proxy failed') def detect_no_proxy(self, url): ''' @@ -304,7 +319,7 @@ def http_request(self, req): ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) match_hostname(ssl_s.getpeercert(), self.hostname) else: - self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) + raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) @@ -315,15 +330,14 @@ def http_request(self, req): except (ssl.SSLError, socket.error), e: # fail if we tried all of the certs but none worked if 'connection refused' in str(e).lower(): - self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port)) + raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port)) else: - self.module.fail_json( - msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \ - 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \ - 'Paths checked for this platform: %s' % ", ".join(paths_checked) + raise SSLValidationError('Failed to validate the SSL certificate for %s:%s. ' + 'Use validate_certs=False (insecure) or make sure your managed systems have a valid CA certificate installed. ' + 'Paths checked for this platform: %s' % (self.hostname, self.port, ", ".join(paths_checked)) ) except CertificateError: - self.module.fail_json(msg="SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=no (insecure)" % self.hostname) + raise SSLValidationError("SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=False (insecure)" % self.hostname) try: # cleanup the temp file created, don't worry @@ -336,55 +350,23 @@ def http_request(self, req): https_request = http_request - -def url_argument_spec(): - ''' - Creates an argument spec that can be used with any module - that will be requesting content via urllib/urllib2 - ''' - return dict( - url = dict(), - force = dict(default='no', aliases=['thirsty'], type='bool'), - http_agent = dict(default='ansible-httpget'), - use_proxy = dict(default='yes', type='bool'), - validate_certs = dict(default='yes', type='bool'), - url_username = dict(required=False), - url_password = dict(required=False), - ) - - -def fetch_url(module, url, data=None, headers=None, method=None, - use_proxy=True, force=False, last_mod_time=None, timeout=10): +# Rewrite of fetch_url to not require the module environment +def open_url(url, data=None, headers=None, method=None, use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None): ''' Fetches a file from an HTTP/FTP server using urllib2 ''' - - if not HAS_URLLIB: - module.fail_json(msg='urllib is not installed') - if not HAS_URLLIB2: - module.fail_json(msg='urllib2 is not installed') - elif not HAS_URLPARSE: - module.fail_json(msg='urlparse is not installed') - - r = None handlers = [] - info = dict(url=url) - - distribution = get_distribution() - # Get validate_certs from the module params - validate_certs = module.params.get('validate_certs', True) # FIXME: change the following to use the generic_urlparse function # to remove the indexed references for 'parsed' parsed = urlparse.urlparse(url) if parsed[0] == 'https' and validate_certs: if not HAS_SSL: - if distribution == 'Redhat': - module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL') - else: - module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended') + raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False, however this is unsafe and not recommended') if not HAS_MATCH_HOSTNAME: - module.fail_json(msg='Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=no, however this is unsafe and not recommended') + raise SSLValidationError('Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=False, however this is unsafe and not recommended') # do the cert validation netloc = parsed[1] @@ -398,13 +380,14 @@ def fetch_url(module, url, data=None, headers=None, method=None, port = 443 # create the SSL validation handler and # add it to the list of handlers - ssl_handler = SSLValidationHandler(module, hostname, port) + ssl_handler = SSLValidationHandler(hostname, port) handlers.append(ssl_handler) if parsed[0] != 'ftp': - username = module.params.get('url_username', '') + username = url_username + if username: - password = module.params.get('url_password', '') + password = url_password netloc = parsed[1] elif '@' in parsed[1]: credentials, netloc = parsed[1].split('@', 1) @@ -448,14 +431,14 @@ def fetch_url(module, url, data=None, headers=None, method=None, if method: if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'): - module.fail_json(msg='invalid HTTP request method; %s' % method.upper()) + raise ConnectionError('invalid HTTP request method; %s' % method.upper()) request = RequestWithMethod(url, method.upper(), data) else: request = urllib2.Request(url, data) # add the custom agent header, to help prevent issues # with sites that block the default urllib agent string - request.add_header('User-agent', module.params.get('http_agent')) + request.add_header('User-agent', http_agent) # if we're ok with getting a 304, set the timestamp in the # header, otherwise make sure we don't get a cached copy @@ -468,20 +451,72 @@ def fetch_url(module, url, data=None, headers=None, method=None, # user defined headers now, which may override things we've set above if headers: if not isinstance(headers, dict): - module.fail_json("headers provided to fetch_url() must be a dict") + raise ValueError("headers provided to fetch_url() must be a dict") for header in headers: request.add_header(header, headers[header]) + if sys.version_info < (2,6,0): + # urlopen in python prior to 2.6.0 did not + # have a timeout parameter + r = urllib2.urlopen(request, None) + else: + r = urllib2.urlopen(request, None, timeout) + + return r + +# +# Module-related functions +# + +def url_argument_spec(): + ''' + Creates an argument spec that can be used with any module + that will be requesting content via urllib/urllib2 + ''' + return dict( + url = dict(), + force = dict(default='no', aliases=['thirsty'], type='bool'), + http_agent = dict(default='ansible-httpget'), + use_proxy = dict(default='yes', type='bool'), + validate_certs = dict(default='yes', type='bool'), + url_username = dict(required=False), + url_password = dict(required=False), + ) + +def fetch_url(module, url, data=None, headers=None, method=None, + use_proxy=True, force=False, last_mod_time=None, timeout=10): + ''' + Fetches a file from an HTTP/FTP server using urllib2. Requires the module environment + ''' + + if not HAS_URLLIB2: + module.fail_json(msg='urllib2 is not installed') + elif not HAS_URLPARSE: + module.fail_json(msg='urlparse is not installed') + + # Get validate_certs from the module params + validate_certs = module.params.get('validate_certs', True) + + username = module.params.get('url_username', '') + password = module.params.get('url_password', '') + http_agent = module.params.get('http_agent', None) + + r = None + info = dict(url=url) try: - if sys.version_info < (2,6,0): - # urlopen in python prior to 2.6.0 did not - # have a timeout parameter - r = urllib2.urlopen(request, None) - else: - r = urllib2.urlopen(request, None, timeout) + r = open_url(url, data=None, headers=None, method=None, + use_proxy=True, force=False, last_mod_time=None, timeout=10, + validate_certs=validate_certs, url_username=username, + url_password=password, http_agent=http_agent) info.update(r.info()) info['url'] = r.geturl() # The URL goes in too, because of redirects. info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) + except NoSSLError, e: + distribution = get_distribution() + if distribution.lower() == 'redhat': + module.fail_json(msg='%s. You can also install python-ssl from EPEL' % str(e)) + except (ConnectionError, ValueError), e: + module.fail_json(msg=str(e)) except urllib2.HTTPError, e: info.update(dict(msg=str(e), status=e.code)) except urllib2.URLError, e: @@ -493,4 +528,3 @@ def fetch_url(module, url, data=None, headers=None, method=None, info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1)) return r, info - From 77c76e632eb896def3b214606e636198ac67e5fe Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 12 Jun 2015 12:32:02 -0700 Subject: [PATCH 0847/3617] Switch etcd and url lookup plugins to verify ssl certificates --- lib/ansible/plugins/lookup/etcd.py | 14 +++++++++----- lib/ansible/plugins/lookup/url.py | 30 ++++++++++++++++++------------ 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/lib/ansible/plugins/lookup/etcd.py b/lib/ansible/plugins/lookup/etcd.py index 002068389f8357..1ea42e8f84cf18 100644 --- a/lib/ansible/plugins/lookup/etcd.py +++ b/lib/ansible/plugins/lookup/etcd.py @@ -18,23 +18,25 @@ __metaclass__ = type import os -import urllib2 + try: import json except ImportError: import simplejson as json from ansible.plugins.lookup import LookupBase +from ansible.module_utils.urls import open_url # this can be made configurable, not should not use ansible.cfg ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001' if os.getenv('ANSIBLE_ETCD_URL') is not None: ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL'] -class etcd(): - def __init__(self, url=ANSIBLE_ETCD_URL): +class Etcd: + def __init__(self, url=ANSIBLE_ETCD_URL, validate_certs): self.url = url self.baseurl = '%s/v1/keys' % (self.url) + self.validate_certs = validate_certs def get(self, key): url = "%s/%s" % (self.baseurl, key) @@ -42,7 +44,7 @@ def get(self, key): data = None value = "" try: - r = urllib2.urlopen(url) + r = open_url(url, validate_certs=self.validate_certs) data = r.read() except: return value @@ -67,7 +69,9 @@ def run(self, terms, variables, **kwargs): if isinstance(terms, basestring): terms = [ terms ] - etcd = etcd() + validate_certs = kwargs.get('validate_certs', True) + + etcd = Etcd(validate_certs=validate_certs) ret = [] for term in terms: diff --git a/lib/ansible/plugins/lookup/url.py b/lib/ansible/plugins/lookup/url.py index 9f1a89f772ce54..c6efc6a31b3c65 100644 --- a/lib/ansible/plugins/lookup/url.py +++ b/lib/ansible/plugins/lookup/url.py @@ -17,30 +17,36 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.plugins.lookup import LookupBase import urllib2 +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError +from ansible.utils.unicode import to_unicode + class LookupModule(LookupBase): - def run(self, terms, inject=None, **kwargs): + def run(self, terms, variables=None, **kwargs): if isinstance(terms, basestring): terms = [ terms ] + validate_certs = kwargs.get('validate_certs', True) + ret = [] for term in terms: try: - r = urllib2.Request(term) - response = urllib2.urlopen(r) - except URLError as e: - utils.warnings("Failed lookup url for %s : %s" % (term, str(e))) - continue - except HTTPError as e: - utils.warnings("Received HTTP error for %s : %s" % (term, str(e))) - continue + response = open_url(term, validate_certs=validate_certs) + except urllib2.URLError as e: + raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e))) + except urllib2.HTTPError as e: + raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e))) + except SSLValidationError as e: + raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e))) + except ConnectionError as e: + raise AnsibleError("Error connecting to %s: %s" % (term, str(e))) for line in response.read().splitlines(): - ret.append(line) - + ret.append(to_unicode(line)) return ret From d315f6e22c2196accca42498ef2101c69d51a696 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 12 Jun 2015 12:59:29 -0700 Subject: [PATCH 0848/3617] Fix Etcd constructor --- lib/ansible/plugins/lookup/etcd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/lookup/etcd.py b/lib/ansible/plugins/lookup/etcd.py index 1ea42e8f84cf18..46a81e4d6bb5e7 100644 --- a/lib/ansible/plugins/lookup/etcd.py +++ b/lib/ansible/plugins/lookup/etcd.py @@ -33,7 +33,7 @@ ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL'] class Etcd: - def __init__(self, url=ANSIBLE_ETCD_URL, validate_certs): + def __init__(self, url=ANSIBLE_ETCD_URL, validate_certs=True): self.url = url self.baseurl = '%s/v1/keys' % (self.url) self.validate_certs = validate_certs From 9ed3e2ef486347fe5e92bbec7c6ad69cf0629871 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 12 Jun 2015 15:06:11 -0500 Subject: [PATCH 0849/3617] Display a warning when using a deprecated module --- lib/ansible/plugins/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index 8d23ae796cb028..bbbe0bd7950649 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -247,6 +247,14 @@ def find_plugin(self, name, suffixes=None): for alias_name in ('_%s' % n for n in potential_names): # We've already cached all the paths at this point if alias_name in self._plugin_path_cache: + if not os.path.islink(self._plugin_path_cache[alias_name]): + d = Display() + d.warning('%s has been deprecated, which means ' + 'it is kept for backwards compatibility ' + 'but usage is discouraged. The module ' + 'documentation details page may explain ' + 'more about this rationale.' % + name.lstrip('_')) return self._plugin_path_cache[alias_name] return None From 0132c51346ec9b0fcffc0c5eebb5597cc4c57c24 Mon Sep 17 00:00:00 2001 From: Scot Marvin Date: Fri, 12 Jun 2015 17:38:37 -0700 Subject: [PATCH 0850/3617] Update index.rst Adding some copy edits. Feel free to disregard. --- docsite/rst/index.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index a0da19cca29f81..26db29ab82f43c 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -9,14 +9,16 @@ Welcome to the Ansible documentation! Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates. -Ansible's goals are foremost those of simplicity and maximum ease of use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with an accelerated socket mode and pull modes as alternatives), and a language that is designed around auditability by humans -- even those not familiar with the program. +Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with an accelerated socket mode and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program. -We believe simplicity is relevant to all sizes of environments and design for busy users of all types -- whether this means developers, sysadmins, release engineers, IT managers, and everywhere in between. Ansible is appropriate for managing small setups with a handful of instances as well as enterprise environments with many thousands. +We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all ennvironements, from small setups with a handful of instances to enterprise environments with many thousands of instances. Ansible manages machines in an agentless manner. There is never a question of how to -upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. As OpenSSH is one of the most peer reviewed open source components, the security exposure of using the tool is greatly reduced. Ansible is decentralized -- it relies on your existing OS credentials to control access to remote machines; if needed it can easily connect with Kerberos, LDAP, and other centralized authentication management systems. +upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems. -This documentation covers the current released version of Ansible (1.9.1) and also some development version features (2.0). For recent features, in each section, the version of Ansible where the feature is added is indicated. Ansible, Inc releases a new major release of Ansible approximately every 2 months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup, while the community around new modules and plugins being developed and contributed moves very very quickly, typically adding 20 or so new modules in each release. +This documentation covers the current released version of Ansible (1.9.1) and also some development version features (2.0). For recent features, we note in each section the version of Ansible where the feature was added. + +Ansible, Inc. releases a new major release of Ansible approximately every two months. The core application evolves somewhat conservatively, valuing simplicity in language design and setup. However, the community around new modules and plugins being developed and contributed moves very quickly, typically adding 20 or so new modules in each release. .. _an_introduction: From 11f1d99a5b133e81354b835f8bca5d24ffebdc29 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 12 Jun 2015 23:41:16 -0400 Subject: [PATCH 0851/3617] added test for first_available and copy --- test/integration/roles/test_copy/tasks/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/integration/roles/test_copy/tasks/main.yml b/test/integration/roles/test_copy/tasks/main.yml index 5e77295fbb30e2..8bb13b45022ba0 100644 --- a/test/integration/roles/test_copy/tasks/main.yml +++ b/test/integration/roles/test_copy/tasks/main.yml @@ -250,3 +250,9 @@ assert: that: - replace_follow_result.checksum == target_file_result.stdout + +- name: test first avialable file + copy: dest={{output_dir}}/faf_test + first_available_file: + - doesntexist.txt + - foo.txt From a6ca133da8d0f65536dc7495c75b1f34bf960ccb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 12 Jun 2015 23:43:36 -0400 Subject: [PATCH 0852/3617] got first_available working with copy --- lib/ansible/plugins/action/copy.py | 48 +++++++++++++----------------- 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 2d404029c5070f..90b1c3a9011cc2 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -43,14 +43,12 @@ def run(self, tmp=None, task_vars=dict()): dest = self._task.args.get('dest', None) raw = boolean(self._task.args.get('raw', 'no')) force = boolean(self._task.args.get('force', 'yes')) + faf = task_vars.get('first_available_file', None) - # FIXME: first available file needs to be reworked somehow... - #if (source is None and content is None and not 'first_available_file' in inject) or dest is None: - # result=dict(failed=True, msg="src (or content) and dest are required") - # return ReturnData(conn=conn, result=result) - #elif (source is not None or 'first_available_file' in inject) and content is not None: - # result=dict(failed=True, msg="src and content are mutually exclusive") - # return ReturnData(conn=conn, result=result) + if (source is None and content is None and faf is None) or dest is None: + return dict(failed=True, msg="src (or content) and dest are required") + elif (source is not None or faf is not None) and content is not None: + return dict(failed=True, msg="src and content are mutually exclusive") # Check if the source ends with a "/" source_trailing_slash = False @@ -65,7 +63,7 @@ def run(self, tmp=None, task_vars=dict()): try: # If content comes to us as a dict it should be decoded json. # We need to encode it back into a string to write it out. - if isinstance(content, dict): + if isinstance(content, dict) or isinstance(content, list): content_tempfile = self._create_content_tempfile(json.dumps(content)) else: content_tempfile = self._create_content_tempfile(content) @@ -73,27 +71,23 @@ def run(self, tmp=None, task_vars=dict()): except Exception as err: return dict(failed=True, msg="could not write content temp file: %s" % err) - ############################################################################################### - # FIXME: first_available_file needs to be reworked? - ############################################################################################### # if we have first_available_file in our vars # look up the files and use the first one we find as src - #elif 'first_available_file' in inject: - # found = False - # for fn in inject.get('first_available_file'): - # fn_orig = fn - # fnt = template.template(self.runner.basedir, fn, inject) - # fnd = utils.path_dwim(self.runner.basedir, fnt) - # if not os.path.exists(fnd) and '_original_file' in inject: - # fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False) - # if os.path.exists(fnd): - # source = fnd - # found = True - # break - # if not found: - # results = dict(failed=True, msg="could not find src in first_available_file list") - # return ReturnData(conn=conn, result=results) - ############################################################################################### + elif faf: + found = False + for fn in faf: + fn_orig = fn + fnt = self._templar.template(fn) + fnd = self._loader.path_dwim_relative(self._task._role._role_path, 'files', fnt) + of = task_vars.get('_original_file', None) + if not os.path.exists(fnd) and of is not None: + fnd = self._loader.path_dwim_relative(of, 'files', fnt) + if os.path.exists(fnd): + source = fnd + found = True + break + if not found: + return dict(failed=True, msg="could not find src in first_available_file list") else: if self._task._role is not None: source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source) From 491761f880c3b5c8d0a441d6378272947d15437e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 12 Jun 2015 23:53:56 -0400 Subject: [PATCH 0853/3617] added note to add faf deprecation --- lib/ansible/plugins/action/copy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 90b1c3a9011cc2..355fed6d3aae2f 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -74,6 +74,7 @@ def run(self, tmp=None, task_vars=dict()): # if we have first_available_file in our vars # look up the files and use the first one we find as src elif faf: + #FIXME: issue deprecation warning for first_available_file, use with_first_found or lookup('first_found',...) instead found = False for fn in faf: fn_orig = fn From 8ee4c7266c32d82c4b24f3e51b9a89ae07b1caa2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 13 Jun 2015 00:10:17 -0400 Subject: [PATCH 0854/3617] corrected original_file code path to use actually use data from original file --- lib/ansible/plugins/action/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 355fed6d3aae2f..ef80275ec0c30e 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -82,7 +82,7 @@ def run(self, tmp=None, task_vars=dict()): fnd = self._loader.path_dwim_relative(self._task._role._role_path, 'files', fnt) of = task_vars.get('_original_file', None) if not os.path.exists(fnd) and of is not None: - fnd = self._loader.path_dwim_relative(of, 'files', fnt) + fnd = self._loader.path_dwim_relative(of, 'files', of) if os.path.exists(fnd): source = fnd found = True From e7abe06440039b9a3bf897446b59e55d416ac957 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 13 Jun 2015 00:34:15 -0400 Subject: [PATCH 0855/3617] added first_found to template --- lib/ansible/plugins/action/template.py | 48 ++++++++++++-------------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index ea033807dff493..e841ab939c01f2 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -51,42 +51,38 @@ def run(self, tmp=None, task_vars=dict()): source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) + faf = task_vars.get('first_available_file', None) - if (source is None and 'first_available_file' not in task_vars) or dest is None: + if (source is None and faf is not None) or dest is None: return dict(failed=True, msg="src and dest are required") if tmp is None: tmp = self._make_tmp_path() - ################################################################################################## - # FIXME: this all needs to be sorted out - ################################################################################################## - # if we have first_available_file in our vars - # look up the files and use the first one we find as src - #if 'first_available_file' in task_vars: - # found = False - # for fn in task_vars.get('first_available_file'): - # fn_orig = fn - # fnt = template.template(self.runner.basedir, fn, task_vars) - # fnd = utils.path_dwim(self.runner.basedir, fnt) - # if not os.path.exists(fnd) and '_original_file' in task_vars: - # fnd = utils.path_dwim_relative(task_vars['_original_file'], 'templates', fnt, self.runner.basedir, check=False) - # if os.path.exists(fnd): - # source = fnd - # found = True - # break - # if not found: - # result = dict(failed=True, msg="could not find src in first_available_file list") - # return ReturnData(conn=conn, comm_ok=False, result=result) - #else: - if 1: + if faf: + #FIXME: issue deprecation warning for first_available_file, use with_first_found or lookup('first_found',...) instead + found = False + for fn in faf: + fn_orig = fn + fnt = self._templar.template(fn) + fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', fnt) + + if not os.path.exists(fnd): + of = task_vars.get('_original_file', None) + if of is not None: + fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', of) + + if os.path.exists(fnd): + source = fnd + found = True + break + if not found: + return dict(failed=True, msg="could not find src in first_available_file list") + else: if self._task._role is not None: source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source) else: source = self._loader.path_dwim(source) - ################################################################################################## - # END FIXME - ################################################################################################## # Expand any user home dir specification dest = self._remote_expand_user(dest, tmp) From 382c6fe05b14b42465b79709e03574ce13f3e46f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 22:07:39 +0200 Subject: [PATCH 0856/3617] Adds basic configuration to ec2.ini to support ElastiCache Clusters and Nodes --- plugins/inventory/ec2.ini | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index 6583160f0f7b7d..a835b01fe77c70 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -47,6 +47,9 @@ route53 = False # To exclude RDS instances from the inventory, uncomment and set to False. #rds = False +# To exclude ElastiCache instances from the inventory, uncomment and set to False. +#elasticache = False + # Additionally, you can specify the list of zones to exclude looking up in # 'route53_excluded_zones' as a comma-separated list. # route53_excluded_zones = samplezone1.com, samplezone2.com @@ -59,6 +62,12 @@ all_instances = False # 'all_rds_instances' to True return all RDS instances regardless of state. all_rds_instances = False +# By default, only ElastiCache clusters and nodes in the 'available' state +# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' +# to True return all ElastiCache clusters and nodes, regardless of state. +all_elasticache_clusters = False +all_elasticache_nodes = False + # API calls to EC2 are slow. For this reason, we cache the results of an API # call. Set this to the path you want cache files to be written to. Two files # will be written to this directory: @@ -89,6 +98,9 @@ group_by_tag_none = True group_by_route53_names = True group_by_rds_engine = True group_by_rds_parameter_group = True +group_by_elasticache_engine = True +group_by_elasticache_cluster = True +group_by_elasticache_parameter_group = True # If you only want to include hosts that match a certain regular expression # pattern_include = stage-* From bc80bd36afbf71b7feab71edc5dfcc5004a0e1fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 22:12:03 +0200 Subject: [PATCH 0857/3617] Adds the necessary logic to ec2.py to load ElastiCache related configuration --- plugins/inventory/ec2.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 16ac93f5ee4827..c7fa6bdb15ccbc 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -121,6 +121,7 @@ import boto from boto import ec2 from boto import rds +from boto import elasticache from boto import route53 import six @@ -232,6 +233,11 @@ def read_settings(self): if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') + # Include ElastiCache instances? + self.elasticache_enabled = True + if config.has_option('ec2', 'elasticache'): + self.elasticache_enabled = config.getboolean('ec2', 'elasticache') + # Return all EC2 and RDS instances (if RDS is enabled) if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') @@ -242,6 +248,18 @@ def read_settings(self): else: self.all_rds_instances = False + # Return all ElastiCache clusters? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: + self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') + else: + self.all_elasticache_clusters = False + + # Return all ElastiCache nodes? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: + self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') + else: + self.all_elasticache_nodes = False + # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if not os.path.exists(cache_dir): @@ -272,6 +290,9 @@ def read_settings(self): 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', + 'group_by_elasticache_engine', + 'group_by_elasticache_cluster', + 'group_by_elasticache_parameter_group', ] for option in group_by_options: if config.has_option('ec2', option): From 50b320615eee3235b5178637ad8793cefe79c7fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 22:13:27 +0200 Subject: [PATCH 0858/3617] Little improvement in the organization of the configuration loader method --- plugins/inventory/ec2.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index c7fa6bdb15ccbc..80afee7444cda0 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -238,11 +238,13 @@ def read_settings(self): if config.has_option('ec2', 'elasticache'): self.elasticache_enabled = config.getboolean('ec2', 'elasticache') - # Return all EC2 and RDS instances (if RDS is enabled) + # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False + + # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: From 06c6db8e6bfc8d3484720aea8cb902fd971f853c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 22:21:40 +0200 Subject: [PATCH 0859/3617] Adds get_elasticache_clusters_by_region method to perform the API call to AWS (and sadly finds out that Boto support for ElastiCache is very outdated...) --- plugins/inventory/ec2.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 80afee7444cda0..f64f4a9315006e 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -357,6 +357,8 @@ def do_api_calls_update_cache(self): self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) + if self.elasticache_enabled: + self.get_elasticache_clusters_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) @@ -417,6 +419,40 @@ def get_rds_instances_by_region(self, region): error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error) + def get_elasticache_clusters_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache clusters in a + particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + response = conn.describe_cache_clusters() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error) + + try: + # Boto also doesn't provide wrapper classes to CacheClusters or + # CacheNodes. Because of that wo can't make use of the get_list + # method in the AWSQueryConnection. Let's do the work manually + clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] + + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error) + + for cluster in clusters: + self.add_elasticache_cluster(cluster, region) + def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] From 2cd76cf0e3d160e1e8a7b31a35772ab71bdc75ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 22:41:05 +0200 Subject: [PATCH 0860/3617] Creates add_elasticache_cluster method to digest the API answer about ElastiCache clusters --- plugins/inventory/ec2.py | 88 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index f64f4a9315006e..0f6141345137b3 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -688,6 +688,94 @@ def add_rds_instance(self, instance, region): self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + def add_elasticache_cluster(self, cluster, region): + ''' Adds an ElastiCache cluster to the inventory and index, as long as + it's nodes are addressable ''' + + # Only want available clusters unless all_elasticache_clusters is True + if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': + return + + # Select the best destination address + if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: + # Memcached cluster + dest = cluster['ConfigurationEndpoint']['Address'] + else: + # Redis sigle node cluster + dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, cluster['CacheClusterId']] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[cluster['CacheClusterId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + # if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + # vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + # self.push(self.inventory, vpc_id_name, dest) + # if self.nested_groups: + # self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + if 'SecurityGroups' in cluster: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) + + # Inventory: Group by parameter group + if self.group_by_elasticache_parameter_group: + self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(cluster) def get_route53_records(self): ''' Get and store the map of resource records to domain names that From c6f2b08a6010d2309f25c3d82bd97dd3794562f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 22:57:03 +0200 Subject: [PATCH 0861/3617] Creates get_host_info_dict_from_describe_dict helper method to translate information from a 'describe' call (we don't have instance objects in this case) --- plugins/inventory/ec2.py | 41 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 0f6141345137b3..b2374cc26f3edc 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -775,7 +775,9 @@ def add_elasticache_cluster(self, cluster, region): # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) - self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(cluster) + host_info = self.get_host_info_dict_from_describe_dict(cluster) + + self.inventory["_meta"]["hostvars"][dest] = host_info def get_route53_records(self): ''' Get and store the map of resource records to domain names that @@ -870,6 +872,43 @@ def get_host_info_dict_from_instance(self, instance): return instance_vars + def get_host_info_dict_from_describe_dict(self, describe_dict): + ''' Parses the dictionary returned by the API call into a flat list + of parameters. This method should be used only when 'describe' is + used directly because Boto doesn't provide specific classes. ''' + + host_info = {} + for key in describe_dict: + value = describe_dict[key] + key = self.to_safe('ec2_' + key) + + # Handle complex types + if key == 'ec2_ConfigurationEndpoint' and value: + host_info['ec2_configuration_endpoint_address'] = value['Address'] + host_info['ec2_configuration_endpoint_port'] = value['Port'] + if key == 'ec2_Endpoint' and value: + host_info['ec2_endpoint_address'] = value['Address'] + host_info['ec2_endpoint_port'] = value['Port'] + elif key == 'ec2_CacheParameterGroup': + host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] + host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + elif key == 'ec2_SecurityGroups': + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + elif type(value) in [int, bool]: + host_info[key] = value + elif isinstance(value, six.string_types): + host_info[key] = value.strip() + elif type(value) == type(None): + host_info[key] = '' + + else: + pass + + return host_info + def get_host_info(self): ''' Get variables about a specific host ''' From dbb0304ceab81d1364e9fa9609cf994925abf745 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:01:13 +0200 Subject: [PATCH 0862/3617] Adds uncammelize helper method to put the labels in the expected output format --- plugins/inventory/ec2.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index b2374cc26f3edc..0352a5e4f47ed6 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -880,19 +880,19 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): host_info = {} for key in describe_dict: value = describe_dict[key] - key = self.to_safe('ec2_' + key) + key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types - if key == 'ec2_ConfigurationEndpoint' and value: + if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] - if key == 'ec2_Endpoint' and value: + if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] - elif key == 'ec2_CacheParameterGroup': + elif key == 'ec2_cache_parameter_group': host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] - elif key == 'ec2_SecurityGroups': + elif key == 'ec2_security_groups': sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) @@ -972,6 +972,9 @@ def write_to_cache(self, data, filename): cache.write(json_data) cache.close() + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be From 98a5531966ec4693ddb3f72f50498b7bd611434e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:03:15 +0200 Subject: [PATCH 0863/3617] Makes the API requests to return nodes' information too --- plugins/inventory/ec2.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 0352a5e4f47ed6..165e97099d9931 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -420,8 +420,8 @@ def get_rds_instances_by_region(self, region): self.fail_with_error(error) def get_elasticache_clusters_by_region(self, region): - ''' Makes an AWS API call to the list of ElastiCache clusters in a - particular region.''' + ''' Makes an AWS API call to the list of ElastiCache clusters (with + nodes' info) in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by @@ -429,7 +429,9 @@ def get_elasticache_clusters_by_region(self, region): try: conn = elasticache.connect_to_region(region) if conn: - response = conn.describe_cache_clusters() + # show_cache_node_info = True + # because we also want nodes' information + response = conn.describe_cache_clusters(None, None, None, True) except boto.exception.BotoServerError as e: error = e.reason From 2a242a0e1bb72dcbb226a5ef073103a5008f1c48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:08:10 +0200 Subject: [PATCH 0864/3617] Creates add_elasticache_node method in ec2.py --- plugins/inventory/ec2.py | 99 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 165e97099d9931..cec994798cf8c5 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -781,6 +781,105 @@ def add_elasticache_cluster(self, cluster, region): self.inventory["_meta"]["hostvars"][dest] = host_info + # Add the nodes + for node in cluster['CacheNodes']: + self.add_elasticache_node(node, cluster, region) + + def add_elasticache_node(self, node, cluster, region): + ''' Adds an ElastiCache node to the inventory and index, as long as + it is addressable ''' + + # Only want available nodes unless all_elasticache_nodes is True + if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': + return + + # Select the best destination address + dest = node['Endpoint']['Address'] + + if not dest: + # Skip nodes we cannot address (e.g. private VPC subnet) + return + + node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) + + # Add to index + self.index[dest] = [region, node_id] + + # Inventory: Group by node ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[node_id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', node_id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + # if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + # vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + # self.push(self.inventory, vpc_id_name, dest) + # if self.nested_groups: + # self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + if 'SecurityGroups' in cluster: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) + + # Inventory: Group by parameter group + # if self.group_by_elasticache_parameter_group: + # self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + # if self.nested_groups: + # self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + # if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + # self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + # if self.nested_groups: + # self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe("elasticache_" + cluster['ReplicationGroupId'])) + + # Inventory: Group by ElastiCache Cluster + if self.group_by_elasticache_cluster: + self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) + + # Global Tag: all ElastiCache nodes + self.push(self.inventory, 'elasticache_nodes', dest) + + host_info = self.get_host_info_dict_from_describe_dict(node) + + if dest in self.inventory["_meta"]["hostvars"]: + self.inventory["_meta"]["hostvars"][dest].update(host_info) + else: + self.inventory["_meta"]["hostvars"][dest] = host_info + def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' From e64daba8e72deee8b97d06ed2a3076ed32a607ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:10:33 +0200 Subject: [PATCH 0865/3617] Adds a flag (is_redis) to prevent duplicity of information about Redis single node clusters --- plugins/inventory/ec2.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index cec994798cf8c5..3dddbc65b2c93f 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -702,9 +702,13 @@ def add_elasticache_cluster(self, cluster, region): if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: # Memcached cluster dest = cluster['ConfigurationEndpoint']['Address'] + is_redis = False else: # Redis sigle node cluster + # Because all Redis clusters are single nodes, we'll merge the + # info from the cluster with info about the node dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + is_redis = True if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) @@ -720,13 +724,13 @@ def add_elasticache_cluster(self, cluster, region): self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) # Inventory: Group by region - if self.group_by_region: + if self.group_by_region and not is_redis: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone - if self.group_by_availability_zone: + if self.group_by_availability_zone and not is_redis: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: @@ -734,7 +738,7 @@ def add_elasticache_cluster(self, cluster, region): self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type - if self.group_by_instance_type: + if self.group_by_instance_type and not is_redis: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: @@ -748,7 +752,7 @@ def add_elasticache_cluster(self, cluster, region): # self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group - if self.group_by_security_group: + if self.group_by_security_group and not is_redis: if 'SecurityGroups' in cluster: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) @@ -757,7 +761,7 @@ def add_elasticache_cluster(self, cluster, region): self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine - if self.group_by_elasticache_engine: + if self.group_by_elasticache_engine and not is_redis: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) From 22020ac3cdf7586273ec362771227f616185b07c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:12:52 +0200 Subject: [PATCH 0866/3617] Adds the necessary config entries to ec2.ini, to support ElastiCache replication groups --- plugins/inventory/ec2.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index a835b01fe77c70..b6818e876c6665 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -65,6 +65,7 @@ all_rds_instances = False # By default, only ElastiCache clusters and nodes in the 'available' state # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' # to True return all ElastiCache clusters and nodes, regardless of state. +all_elasticache_replication_groups = False all_elasticache_clusters = False all_elasticache_nodes = False @@ -101,6 +102,7 @@ group_by_rds_parameter_group = True group_by_elasticache_engine = True group_by_elasticache_cluster = True group_by_elasticache_parameter_group = True +group_by_elasticache_replication_group = True # If you only want to include hosts that match a certain regular expression # pattern_include = stage-* From 40ce0727470cf820999dc1591d76e964e57bbdd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:14:00 +0200 Subject: [PATCH 0867/3617] Adds the logic to process the new config entries about ElastiCache replication groups --- plugins/inventory/ec2.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 3dddbc65b2c93f..5004a704d9b839 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -250,6 +250,12 @@ def read_settings(self): else: self.all_rds_instances = False + # Return all ElastiCache replication groups? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: + self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') + else: + self.all_elasticache_replication_groups = False + # Return all ElastiCache clusters? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') @@ -295,6 +301,7 @@ def read_settings(self): 'group_by_elasticache_engine', 'group_by_elasticache_cluster', 'group_by_elasticache_parameter_group', + 'group_by_elasticache_replication_group', ] for option in group_by_options: if config.has_option('ec2', option): From c18f6cae11960735e9be6db0984c35df002abf9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:15:33 +0200 Subject: [PATCH 0868/3617] Creates get_elasticache_replication_groups_by_region method to handle the API call --- plugins/inventory/ec2.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 5004a704d9b839..5f80c47675a944 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -366,6 +366,7 @@ def do_api_calls_update_cache(self): self.get_rds_instances_by_region(region) if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) + self.get_elasticache_replication_groups_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) @@ -462,6 +463,40 @@ def get_elasticache_clusters_by_region(self, region): for cluster in clusters: self.add_elasticache_cluster(cluster, region) + def get_elasticache_replication_groups_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache replication groups + in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + response = conn.describe_replication_groups() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error) + + try: + # Boto also doesn't provide wrapper classes to ReplicationGroups + # Because of that wo can't make use of the get_list method in the + # AWSQueryConnection. Let's do the work manually + replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] + + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error) + + for replication_group in replication_groups: + self.add_elasticache_replication_group(replication_group, region) + def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] From 069ee116995bdab33302287fcf5bce9034c7d893 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:18:21 +0200 Subject: [PATCH 0869/3617] Creates add_elasticache_replication_group method in ec2.py dynamic inventory script --- plugins/inventory/ec2.py | 52 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 5f80c47675a944..078e07b97bda3e 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -926,6 +926,58 @@ def add_elasticache_node(self, node, cluster, region): else: self.inventory["_meta"]["hostvars"][dest] = host_info + def add_elasticache_replication_group(self, replication_group, region): + ''' Adds an ElastiCache replication group to the inventory and index ''' + + # Only want available clusters unless all_elasticache_replication_groups is True + if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': + return + + # Select the best destination address (PrimaryEndpoint) + dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, replication_group['ReplicationGroupId']] + + # Inventory: Group by ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[replication_group['ReplicationGroupId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone (doesn't apply to replication groups) + + # Inventory: Group by node type (doesn't apply to replication groups) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for replication groups + + # Inventory: Group by security group (doesn't apply to replication groups) + # Check this value in cluster level + + # Inventory: Group by engine (replication groups are always Redis) + if self.group_by_elasticache_engine: + self.push(self.inventory, 'elasticache_redis', dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', 'redis') + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) + + host_info = self.get_host_info_dict_from_describe_dict(replication_group) + + self.inventory["_meta"]["hostvars"][dest] = host_info + def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' From f25ad9dc51db9d906174dd7c0e7c1a8905845952 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:21:33 +0200 Subject: [PATCH 0870/3617] Adds the appropriate key checks for ElastiCache replication groups in get_dict_from_describe_dict method --- plugins/inventory/ec2.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 078e07b97bda3e..9aec945472c12a 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -1088,6 +1088,11 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] + if key == 'ec2_node_groups' and value: + host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] + host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + if key == 'ec2_member_clusters' and value: + host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) elif key == 'ec2_cache_parameter_group': host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] From ffd74049da595a2d12b081a9b4c4e039a233da8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:24:51 +0200 Subject: [PATCH 0871/3617] Comments about the naming pattern in the script, that certainly deserves future refactoring --- plugins/inventory/ec2.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 9aec945472c12a..4b205c0d95e44d 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -1076,6 +1076,11 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' + # I really don't agree with prefixing everything with 'ec2' + # because EC2, RDS and ElastiCache are different services. + # I'm just following the pattern used until now to not break any + # compatibility. + host_info = {} for key in describe_dict: value = describe_dict[key] From 43f9a653d0c6edf0a6c69587ef76f094e7fa1e90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:27:16 +0200 Subject: [PATCH 0872/3617] Process CacheNodeIdsToReboot complex type for cache clusters --- plugins/inventory/ec2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 4b205c0d95e44d..4bdde428ced493 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -1099,6 +1099,7 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) elif key == 'ec2_cache_parameter_group': + host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] elif key == 'ec2_security_groups': From e692a18a2990505b37aede4c6e814141ec110e34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:29:05 +0200 Subject: [PATCH 0873/3617] Process information about primary clusters for ElastiCache replication groups --- plugins/inventory/ec2.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 4bdde428ced493..dddcf587afa88a 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -1096,6 +1096,11 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + for node in value[0]['NodeGroupMembers']: + if node['CurrentRole'] == 'primary': + host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] + host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] + host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) elif key == 'ec2_cache_parameter_group': From 41b034a5d2d2178e93ae5667a65028ad48307367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:29:55 +0200 Subject: [PATCH 0874/3617] Process information about replica clusters for ElastiCache replication groups --- plugins/inventory/ec2.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index dddcf587afa88a..76fc83497d05d7 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -1096,11 +1096,17 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] + elif node['CurrentRole'] == 'replica': + host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] + host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] + host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] + replica_count += 1 if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) elif key == 'ec2_cache_parameter_group': From 77a2ad0e8cc5b6d09a39d21a926060df1976edb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:32:10 +0200 Subject: [PATCH 0875/3617] Improves code organization in get_dict_from_describe_dict method --- plugins/inventory/ec2.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 76fc83497d05d7..9cb7219f66c981 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -1087,12 +1087,18 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types + + # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] + + # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] + + # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] @@ -1107,25 +1113,41 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] replica_count += 1 + + # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) + + # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + + # Target: Almost everything elif key == 'ec2_security_groups': sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Target: Everything + # Preserve booleans and integers elif type(value) in [int, bool]: host_info[key] = value + + # Target: Everything + # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() + + # Target: Everything + # Replace None by an empty string elif type(value) == type(None): host_info[key] = '' else: + # Remove non-processed complex types pass return host_info From e8c3e3d64520f12d3afb224f6fc5e2723535873c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Sun, 14 Jun 2015 23:38:09 +0200 Subject: [PATCH 0876/3617] Cleans some unnecessary white spaces in ec2.py dynamic inventory plugin --- plugins/inventory/ec2.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 9cb7219f66c981..2c6066fc6afc22 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -420,7 +420,7 @@ def get_rds_instances_by_region(self, region): self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: error = e.reason - + if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": @@ -513,7 +513,7 @@ def get_auth_error_message(self): errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors) - + def fail_with_error(self, err_msg): '''log an error to std err for ansible-playbook to consume and exit''' sys.stderr.write(err_msg) @@ -1025,7 +1025,6 @@ def get_instance_route53_names(self, instance): return list(name_list) - def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): @@ -1225,7 +1224,6 @@ def to_safe(self, word): return re.sub("[^A-Za-z0-9\_]", "_", word) - def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' From ff15f374ad8e9ad03f301fae5d45eee358a9c707 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 14 Jun 2015 20:50:38 -0400 Subject: [PATCH 0877/3617] fixed new become settings, rearranged constants to find PE related vars easier --- lib/ansible/constants.py | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 98f058e21ccf96..7417eb73e4872b 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -104,7 +104,7 @@ def shell_expand_path(path): # sections in config file DEFAULTS='defaults' -# configurable things +# generaly configurable things DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts'))) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) @@ -120,8 +120,6 @@ def shell_expand_path(path): DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user) DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True) DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None)) -DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') -DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True) DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True) DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None)) @@ -130,36 +128,39 @@ def shell_expand_path(path): DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}') DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) -DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) -DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') -DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') -DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su') -DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) -DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') -DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') -DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True) -#TODO: get rid of ternary chain mess +### PRIVILEGE ESCALATION ### +# Backwards Compat +DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True) +DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root') +DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su') +DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '') +DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True) +DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True) +DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root') +DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo') +DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H') +DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) + +# Become BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] -BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} -DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() +DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root') +DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None) +DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None) DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) -# need to rethink impementing these 2 -DEFAULT_BECOME_EXE = None -#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo') -#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H') +# Plugin paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') @@ -174,6 +175,7 @@ def shell_expand_path(path): CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True) +# Display ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True) ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True) ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True) From a267f93c83d6f680cf590d2c6a393ffc5aa3e200 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 14 Jun 2015 21:05:23 -0400 Subject: [PATCH 0878/3617] removed incorrect assumption on become user being set --- lib/ansible/playbook/become.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index 0323a9b613b9d5..f01b48512fa1f6 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -60,10 +60,6 @@ def _preprocess_data_become(self, ds): self._detect_privilege_escalation_conflict(ds) - # Setting user implies setting become/sudo/su to true - if 'become_user' in ds and not ds.get('become', False): - ds['become'] = True - # Privilege escalation, backwards compatibility for sudo/su if 'sudo' in ds or 'sudo_user' in ds: ds['become_method'] = 'sudo' From a2486785188f44878cd58445970c27b067fa2534 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 14 Jun 2015 22:35:53 -0400 Subject: [PATCH 0879/3617] initial become support to ssh plugin - password prompt detection and incorrect passwrod detection to connection info - sudoable flag to avoid become on none pe'able commands --- lib/ansible/executor/connection_info.py | 147 +++++++++++++++---- lib/ansible/plugins/connections/__init__.py | 2 +- lib/ansible/plugins/connections/ssh.py | 149 +++++++++----------- 3 files changed, 187 insertions(+), 111 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index d8881f54ab79f2..d52ae72c3965ed 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # (c) 2012-2014, Michael DeHaan # # This file is part of Ansible @@ -21,6 +23,8 @@ import pipes import random +import re +import gettext from ansible import constants as C from ansible.template import Templar @@ -29,6 +33,40 @@ __all__ = ['ConnectionInformation'] +SU_PROMPT_LOCALIZATIONS = [ + 'Password', + '암호', + 'パスワード', + 'Adgangskode', + 'Contraseña', + 'Contrasenya', + 'Hasło', + 'Heslo', + 'Jelszó', + 'Lösenord', + 'Mật khẩu', + 'Mot de passe', + 'Parola', + 'Parool', + 'Pasahitza', + 'Passord', + 'Passwort', + 'Salasana', + 'Sandi', + 'Senha', + 'Wachtwoord', + 'ססמה', + 'Лозинка', + 'Парола', + 'Пароль', + 'गुप्तशब्द', + 'शब्दकूट', + 'సంకేతపదము', + 'හස්පදය', + '密码', + '密碼', +] + # the magic variable mapping dictionary below is used to translate # host/inventory variables to fields in the ConnectionInformation # object. The dictionary values are tuples, to account for aliases @@ -44,6 +82,40 @@ shell = ('ansible_shell_type',), ) +SU_PROMPT_LOCALIZATIONS = [ + 'Password', + '암호', + 'パスワード', + 'Adgangskode', + 'Contraseña', + 'Contrasenya', + 'Hasło', + 'Heslo', + 'Jelszó', + 'Lösenord', + 'Mật khẩu', + 'Mot de passe', + 'Parola', + 'Parool', + 'Pasahitza', + 'Passord', + 'Passwort', + 'Salasana', + 'Sandi', + 'Senha', + 'Wachtwoord', + 'ססמה', + 'Лозинка', + 'Парола', + 'Пароль', + 'गुप्तशब्द', + 'शब्दकूट', + 'సంకేతపదము', + 'හස්පදය', + '密码', + '密碼', +] + class ConnectionInformation: ''' @@ -72,6 +144,14 @@ def __init__(self, play=None, options=None, passwords=None): self.become_method = None self.become_user = None self.become_pass = passwords.get('become_pass','') + self.become_exe = None + self.become_flags = None + + # backwards compat + self.sudo_exe = None + self.sudo_flags = None + self.su_exe = None + self.su_flags = None # general flags (should we move out?) self.verbosity = 0 @@ -202,25 +282,20 @@ def set_task_and_host_override(self, task, host): return new_info - def make_become_cmd(self, cmd, executable, become_settings=None): + def make_become_cmd(self, cmd, executable ): + """ helper function to create privilege escalation commands """ - """ - helper function to create privilege escalation commands - """ - - # FIXME: become settings should probably be stored in the connection info itself - if become_settings is None: - become_settings = {} - - randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) - success_key = 'BECOME-SUCCESS-%s' % randbits prompt = None - becomecmd = None + success_key = None - executable = executable or '$SHELL' - - success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd)) if self.become: + + becomecmd = None + randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) + success_key = 'BECOME-SUCCESS-%s' % randbits + executable = executable or '$SHELL' + success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd)) + if self.become_method == 'sudo': # Rather than detect if sudo wants a password this time, -k makes sudo always ask for # a password if one is required. Passing a quoted compound command to sudo (or sudo -s) @@ -228,24 +303,33 @@ def make_become_cmd(self, cmd, executable, become_settings=None): # string to the user's shell. We loop reading output until we see the randomly-generated # sudo prompt set with the -p option. prompt = '[sudo via ansible, key=%s] password: ' % randbits - exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE) - flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS) + exe = self.become_exe or self.sudo_exe or 'sudo' + flags = self.become_flags or self.sudo_flags or '' becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd) elif self.become_method == 'su': - exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE) - flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS) + + def detect_su_prompt(data): + SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) + return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data)) + + prompt = su_prompt() + exe = self.become_exe or self.su_exe or 'su' + flags = self.become_flags or self.su_flags or '' becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd) elif self.become_method == 'pbrun': - exe = become_settings.get('pbrun_exe', 'pbrun') - flags = become_settings.get('pbrun_flags', '') + + prompt='assword:' + exe = self.become_exe or 'pbrun' + flags = self.become_flags or '' becomecmd = '%s -b -l %s -u %s %s' % (exe, flags, self.become_user, success_cmd) elif self.become_method == 'pfexec': - exe = become_settings.get('pfexec_exe', 'pbrun') - flags = become_settings.get('pfexec_flags', '') + + exe = self.become_exe or 'pfexec' + flags = self.become_flags or '' # No user as it uses it's own exec_attr to figure it out becomecmd = '%s %s "%s"' % (exe, flags, success_cmd) @@ -254,11 +338,20 @@ def make_become_cmd(self, cmd, executable, become_settings=None): return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key) - return (cmd, "", "") + return (cmd, prompt, success_key) + + def check_become_success(self, output, success_key): + return success_key in output + + def check_password_prompt(self, output, prompt): + if isinstance(prompt, basestring): + return output.endswith(prompt) + else: + return prompt(output) - def check_become_success(self, output, become_settings): - #TODO: implement - pass + def check_incorrect_password(self, output, prompt): + incorrect_password = gettext.dgettext(self.become_method, "Sorry, try again.") + return output.endswith(incorrect_password) def _get_fields(self): return [i for i in self.__dict__.keys() if i[:1] != '_'] diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 1d3a2bdeede1f5..449d1379ef66e6 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -94,7 +94,7 @@ def _connect(self): @ensure_connect @abstractmethod - def exec_command(self, cmd, tmp_path, executable=None, in_data=None): + def exec_command(self, cmd, tmp_path, executable=None, in_data=None, sudoable=True): """Run a command on the remote host""" pass diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 44efbf901ef01b..353f24006581f5 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -110,9 +110,7 @@ def _connect(self): "-o", "PasswordAuthentication=no") if self._connection_info.remote_user is not None and self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]: self._common_args += ("-o", "User={0}".format(self._connection_info.remote_user)) - # FIXME: figure out where this goes - #self._common_args += ("-o", "ConnectTimeout={0}".format(self.runner.timeout)) - self._common_args += ("-o", "ConnectTimeout=15") + self._common_args += ("-o", "ConnectTimeout={0}".format(self._connection_info.timeout)) self._connected = True @@ -171,24 +169,14 @@ def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): while True: rfd, wfd, efd = select.select(rpipes, [], rpipes, 1) - # FIXME: su/sudo stuff - # fail early if the sudo/su password is wrong - #if self.runner.sudo and sudoable: - # if self.runner.sudo_pass: - # incorrect_password = gettext.dgettext( - # "sudo", "Sorry, try again.") - # if stdout.endswith("%s\r\n%s" % (incorrect_password, - # prompt)): - # raise AnsibleError('Incorrect sudo password') - # - # if stdout.endswith(prompt): - # raise AnsibleError('Missing sudo password') - # - #if self.runner.su and su and self.runner.su_pass: - # incorrect_password = gettext.dgettext( - # "su", "Sorry") - # if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)): - # raise AnsibleError('Incorrect su password') + # fail early if the become password is wrong + if self._connection_info.become and sudoable: + if self._connection_info.become_pass: + if self._connection_info.check_incorrect_password(stdout, prompt): + raise AnsibleError('Incorrect %s password', self._connection_info.become_method) + + elif self._connection_info.check_password_prompt(stdout, prompt): + raise AnsibleError('Missing %s password', self._connection_info.become_method) if p.stdout in rfd: dat = os.read(p.stdout.fileno(), 9000) @@ -270,10 +258,10 @@ def not_in_host_file(self, host): self._display.vvv("EXEC previous known host file not found for {0}".format(host)) return True - def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): + def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoable=True): ''' run a command on the remote host ''' - super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data, sudoable=False) host = self._connection_info.remote_addr @@ -294,6 +282,11 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): ssh_cmd += ['-6'] ssh_cmd.append(host) + prompt = None + success_key = '' + if sudoable: + cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd, executable) + ssh_cmd.append(cmd) self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=host) @@ -306,72 +299,62 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): # fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) # fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) + # create process (p, stdin) = self._run(ssh_cmd, in_data) - self._send_password() + if prompt: + self._send_password() no_prompt_out = '' no_prompt_err = '' - # FIXME: su/sudo stuff - #if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \ - # (self.runner.su and su and self.runner.su_pass): - # # several cases are handled for sudo privileges with password - # # * NOPASSWD (tty & no-tty): detect success_key on stdout - # # * without NOPASSWD: - # # * detect prompt on stdout (tty) - # # * detect prompt on stderr (no-tty) - # fcntl.fcntl(p.stdout, fcntl.F_SETFL, - # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) - # fcntl.fcntl(p.stderr, fcntl.F_SETFL, - # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) - # sudo_output = '' - # sudo_errput = '' - # - # while True: - # if success_key in sudo_output or \ - # (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \ - # (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)): - # break - # - # rfd, wfd, efd = select.select([p.stdout, p.stderr], [], - # [p.stdout], self.runner.timeout) - # if p.stderr in rfd: - # chunk = p.stderr.read() - # if not chunk: - # raise AnsibleError('ssh connection closed waiting for sudo or su password prompt') - # sudo_errput += chunk - # incorrect_password = gettext.dgettext( - # "sudo", "Sorry, try again.") - # if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)): - # raise AnsibleError('Incorrect sudo password') - # elif sudo_errput.endswith(prompt): - # stdin.write(self.runner.sudo_pass + '\n') - # - # if p.stdout in rfd: - # chunk = p.stdout.read() - # if not chunk: - # raise AnsibleError('ssh connection closed waiting for sudo or su password prompt') - # sudo_output += chunk - # - # if not rfd: - # # timeout. wrap up process communication - # stdout = p.communicate() - # raise AnsibleError('ssh connection error waiting for sudo or su password prompt') - # - # if success_key not in sudo_output: - # if sudoable: - # stdin.write(self.runner.sudo_pass + '\n') - # elif su: - # stdin.write(self.runner.su_pass + '\n') - # else: - # no_prompt_out += sudo_output - # no_prompt_err += sudo_errput - - #(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt) - # FIXME: the prompt won't be here anymore - prompt="" - (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt) + q(self._connection_info.password) + if self._connection_info.become and sudoable and self._connection_info.password: + # several cases are handled for sudo privileges with password + # * NOPASSWD (tty & no-tty): detect success_key on stdout + # * without NOPASSWD: + # * detect prompt on stdout (tty) + # * detect prompt on stderr (no-tty) + fcntl.fcntl(p.stdout, fcntl.F_SETFL, + fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, + fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) + become_output = '' + become_errput = '' + + while True: + if self._connection_info.check_become_success(become_output, success_key) or \ + self._connection_info.check_password_prompt(become_output, prompt ): + break + rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) + if p.stderr in rfd: + chunk = p.stderr.read() + if not chunk: + raise AnsibleError('ssh connection closed waiting for privilege escalation password prompt') + become_errput += chunk + + if self._connection_info.check_incorrect_password(become_errput, prompt): + raise AnsibleError('Incorrect %s password', self._connection_info.become_method) + + if p.stdout in rfd: + chunk = p.stdout.read() + if not chunk: + raise AnsibleError('ssh connection closed waiting for sudo or su password prompt') + become_output += chunk + + if not rfd: + # timeout. wrap up process communication + stdout = p.communicate() + raise AnsibleError('ssh connection error waiting for sudo or su password prompt') + + if not self._connection_info.check_become_success(become_output, success_key): + if sudoable: + stdin.write(self._connection_info.password + '\n') + else: + no_prompt_out += become_output + no_prompt_err += become_errput + + (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt) #if C.HOST_KEY_CHECKING and not_in_host_file: # # lock around the initial SSH connectivity so the user prompt about whether to add From de82c953f2886dd0bf69277d9a30c723aecff822 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 14 Jun 2015 23:19:49 -0400 Subject: [PATCH 0880/3617] added privilege escalation special var mapping --- lib/ansible/executor/connection_info.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index d52ae72c3965ed..5d43725b51318d 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -80,6 +80,22 @@ password = ('ansible_ssh_pass', 'ansible_password'), private_key_file = ('ansible_ssh_private_key_file', 'ansible_private_key_file'), shell = ('ansible_shell_type',), + become = ('ansible_become',), + become_method = ('ansible_become_method',), + become_user = ('ansible_become_user',), + become_pass = ('ansible_become_password','ansible_become_pass'), + become_exe = ('ansible_become_exe',), + become_flags = ('ansible_become_flags',), + sudo = ('ansible_sudo',), + sudo_user = ('ansible_sudo_user',), + sudo_pass = ('ansible_sudo_password',), + sudo_exe = ('ansible_sudo_exe',), + sudo_flags = ('ansible_sudo_flags',), + su = ('ansible_su',), + su_user = ('ansible_su_user',), + su_pass = ('ansible_su_password',), + su_exe = ('ansible_su_exe',), + su_flags = ('ansible_su_flags',), ) SU_PROMPT_LOCALIZATIONS = [ From c3ccf26b7027e7c282d3313d2dd58571b7431e84 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 14 Jun 2015 23:45:56 -0400 Subject: [PATCH 0881/3617] added become check back to connections --- lib/ansible/plugins/connections/__init__.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 449d1379ef66e6..921c4e38825ba4 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -63,10 +63,10 @@ def __init__(self, connection_info, new_stdin, *args, **kwargs): if not hasattr(self, '_connected'): self._connected = False - def _become_method_supported(self, become_method): + def _become_method_supported(self): ''' Checks if the current class supports this privilege escalation method ''' - if become_method in self.__class__.become_methods: + if self._connection_info.become_method in self.__class__.become_methods: return True raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method) @@ -90,7 +90,10 @@ def transport(self): @abstractmethod def _connect(self): """Connect to the host we've been initialized with""" - pass + + # Check if PE is supported + if self._connection_info.become: + self.__become_method_supported() @ensure_connect @abstractmethod From ff443d4534d98d0ec567f7a3aed97a58562cffcd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 14 Jun 2015 23:48:03 -0400 Subject: [PATCH 0882/3617] added note to figurte out correct var udpate on connection_info --- lib/ansible/executor/connection_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 5d43725b51318d..3e7586e2ca9832 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -385,7 +385,7 @@ def update_vars(self, variables): ''' Adds 'magic' variables relating to connections to the variable dictionary provided. ''' - + #FIXME: is this reversed? why use this and not set_task_and_host_override? variables['ansible_connection'] = self.connection variables['ansible_ssh_host'] = self.remote_addr variables['ansible_ssh_pass'] = self.password From bac35ae773a0a6bc792ab739961ce595ea71e342 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 14 Jun 2015 23:49:10 -0400 Subject: [PATCH 0883/3617] set correct become mehotds for plugin fixed mixup with remote password vs become_password --- lib/ansible/plugins/connections/ssh.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 353f24006581f5..471b4143e22907 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -40,6 +40,8 @@ class Connection(ConnectionBase): ''' ssh based connections ''' + become_methods = frozenset(C.BECOME_METHODS).difference(['runas']) + def __init__(self, *args, **kwargs): # SSH connection specific init stuff self._common_args = [] @@ -261,7 +263,7 @@ def not_in_host_file(self, host): def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoable=True): ''' run a command on the remote host ''' - super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data, sudoable=False) + super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data, sudoable=sudoable) host = self._connection_info.remote_addr @@ -303,13 +305,11 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoab # create process (p, stdin) = self._run(ssh_cmd, in_data) - if prompt: - self._send_password() + self._send_password() no_prompt_out = '' no_prompt_err = '' - q(self._connection_info.password) - if self._connection_info.become and sudoable and self._connection_info.password: + if self._connection_info.become and sudoable and self._connection_info.become_pass: # several cases are handled for sudo privileges with password # * NOPASSWD (tty & no-tty): detect success_key on stdout # * without NOPASSWD: @@ -349,7 +349,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoab if not self._connection_info.check_become_success(become_output, success_key): if sudoable: - stdin.write(self._connection_info.password + '\n') + stdin.write(self._connection_info.become_pass + '\n') else: no_prompt_out += become_output no_prompt_err += become_errput From 580993fef7f3b18c194c315ba928723970fd5649 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 00:09:25 -0400 Subject: [PATCH 0884/3617] enabled initial support for password prompt on become - moved check prompt/password functions to connection, make more senes there - TODO: consider moving make_become to connection from connection_info - removed executable param that was never overriden outside of connection info --- lib/ansible/executor/connection_info.py | 16 +--------------- lib/ansible/plugins/action/__init__.py | 18 ++++++++---------- lib/ansible/plugins/connections/__init__.py | 17 ++++++++++++++++- lib/ansible/plugins/connections/local.py | 6 +++--- lib/ansible/plugins/connections/ssh.py | 18 +++++++++--------- 5 files changed, 37 insertions(+), 38 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 3e7586e2ca9832..24e42a9701499f 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -24,7 +24,6 @@ import pipes import random import re -import gettext from ansible import constants as C from ansible.template import Templar @@ -298,7 +297,7 @@ def set_task_and_host_override(self, task, host): return new_info - def make_become_cmd(self, cmd, executable ): + def make_become_cmd(self, cmd, executable='/bin/sh'): """ helper function to create privilege escalation commands """ prompt = None @@ -356,19 +355,6 @@ def detect_su_prompt(data): return (cmd, prompt, success_key) - def check_become_success(self, output, success_key): - return success_key in output - - def check_password_prompt(self, output, prompt): - if isinstance(prompt, basestring): - return output.endswith(prompt) - else: - return prompt(output) - - def check_incorrect_password(self, output, prompt): - incorrect_password = gettext.dgettext(self.become_method, "Sorry, try again.") - return output.endswith(incorrect_password) - def _get_fields(self): return [i for i in self.__dict__.keys() if i[:1] != '_'] diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 4b2d7abe27aed3..f941d1304cad97 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -425,7 +425,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var debug("done with _execute_module (%s, %s)" % (module_name, module_args)) return data - def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, in_data=None): + def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to @@ -438,17 +438,15 @@ def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, i debug("no command, exiting _low_level_execute_command()") return dict(stdout='', stderr='') - if executable is None: - executable = C.DEFAULT_EXECUTABLE - - prompt = None - success_key = None - - if sudoable: - cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd, executable) + #FIXME: disabled as this should happen in the connection plugin, verify before removing + #prompt = None + #success_key = None + # + #if sudoable: + # cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd) debug("executing the command %s through the connection" % cmd) - rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data) + rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, in_data=in_data, sudoable=sudoable) debug("command execution done") if not isinstance(stdout, basestring): diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 921c4e38825ba4..45a07a9c307e4f 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import gettext from abc import ABCMeta, abstractmethod, abstractproperty from functools import wraps @@ -97,7 +98,7 @@ def _connect(self): @ensure_connect @abstractmethod - def exec_command(self, cmd, tmp_path, executable=None, in_data=None, sudoable=True): + def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): """Run a command on the remote host""" pass @@ -117,3 +118,17 @@ def fetch_file(self, in_path, out_path): def close(self): """Terminate the connection""" pass + + def check_become_success(self, output, success_key): + return success_key in output + + def check_password_prompt(self, output, prompt): + if isinstance(prompt, basestring): + return output.endswith(prompt) + else: + return prompt(output) + + def check_incorrect_password(self, output, prompt): + incorrect_password = gettext.dgettext(self._connection_info.become_method, "Sorry, try again.") + return output.endswith(incorrect_password) + diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py index 85bc51de0aee1b..5915569b0249c2 100644 --- a/lib/ansible/plugins/connections/local.py +++ b/lib/ansible/plugins/connections/local.py @@ -46,10 +46,10 @@ def _connect(self, port=None): self._connected = True return self - def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): + def exec_command(self, cmd, tmp_path, in_data=None): ''' run a command on the local host ''' - super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data) debug("in local.exec_command()") # su requires to be run from a terminal, and therefore isn't supported here (yet?) @@ -59,7 +59,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - executable = executable.split()[0] if executable else None + executable = self._connection_info.executable.split()[0] if self._connection_info.executable else None self._display.vvv("{0} EXEC {1}".format(self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 471b4143e22907..b29418c9962414 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -174,10 +174,10 @@ def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): # fail early if the become password is wrong if self._connection_info.become and sudoable: if self._connection_info.become_pass: - if self._connection_info.check_incorrect_password(stdout, prompt): + if self.check_incorrect_password(stdout, prompt): raise AnsibleError('Incorrect %s password', self._connection_info.become_method) - elif self._connection_info.check_password_prompt(stdout, prompt): + elif self.check_password_prompt(stdout, prompt): raise AnsibleError('Missing %s password', self._connection_info.become_method) if p.stdout in rfd: @@ -260,10 +260,10 @@ def not_in_host_file(self, host): self._display.vvv("EXEC previous known host file not found for {0}".format(host)) return True - def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoable=True): + def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the remote host ''' - super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data, sudoable=sudoable) + super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) host = self._connection_info.remote_addr @@ -287,7 +287,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoab prompt = None success_key = '' if sudoable: - cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd, executable) + cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd) ssh_cmd.append(cmd) self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=host) @@ -323,8 +323,8 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoab become_errput = '' while True: - if self._connection_info.check_become_success(become_output, success_key) or \ - self._connection_info.check_password_prompt(become_output, prompt ): + if self.check_become_success(become_output, success_key) or \ + self.check_password_prompt(become_output, prompt ): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) if p.stderr in rfd: @@ -333,7 +333,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoab raise AnsibleError('ssh connection closed waiting for privilege escalation password prompt') become_errput += chunk - if self._connection_info.check_incorrect_password(become_errput, prompt): + if self.check_incorrect_password(become_errput, prompt): raise AnsibleError('Incorrect %s password', self._connection_info.become_method) if p.stdout in rfd: @@ -347,7 +347,7 @@ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None, sudoab stdout = p.communicate() raise AnsibleError('ssh connection error waiting for sudo or su password prompt') - if not self._connection_info.check_become_success(become_output, success_key): + if not self.check_become_success(become_output, success_key): if sudoable: stdin.write(self._connection_info.become_pass + '\n') else: From 956937b110f64b56fb3640a56865cab53b025452 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 00:15:31 -0400 Subject: [PATCH 0885/3617] made executable shell configurable again --- lib/ansible/executor/connection_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 24e42a9701499f..08b42b7ce16451 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -297,7 +297,7 @@ def set_task_and_host_override(self, task, host): return new_info - def make_become_cmd(self, cmd, executable='/bin/sh'): + def make_become_cmd(self, cmd, executable=C.DEFAULT_EXECUTABLE): """ helper function to create privilege escalation commands """ prompt = None From 872448e9e8d1da6ef94e7363b0966b48f5df475b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 01:02:27 -0400 Subject: [PATCH 0886/3617] updated connection info update_vars to only update if data is not alreayd present aslo added comment clarifying why we do this --- lib/ansible/executor/connection_info.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 08b42b7ce16451..f2ab52fa68d996 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -370,11 +370,12 @@ def post_validate(self, templar): def update_vars(self, variables): ''' Adds 'magic' variables relating to connections to the variable dictionary provided. + In case users need to access from the play, this is a legacy from runner. ''' - #FIXME: is this reversed? why use this and not set_task_and_host_override? - variables['ansible_connection'] = self.connection - variables['ansible_ssh_host'] = self.remote_addr - variables['ansible_ssh_pass'] = self.password - variables['ansible_ssh_port'] = self.port - variables['ansible_ssh_user'] = self.remote_user - variables['ansible_ssh_private_key_file'] = self.private_key_file + + #FIXME: remove password? possibly add become/sudo settings + for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file']: + if special_var not in variables: + for prop, varnames in MAGIC_VARIABLE_MAPPING.items(): + if special_var in varnames: + variables[special_var] = getattr(self, prop) From be8d797c23af943d3660dff2fa378d96a8609a46 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 01:07:02 -0400 Subject: [PATCH 0887/3617] fixed su prompt function reference --- lib/ansible/executor/connection_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index f2ab52fa68d996..2800e233535c20 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -329,7 +329,7 @@ def detect_su_prompt(data): SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE) return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data)) - prompt = su_prompt() + prompt = detect_su_prompt exe = self.become_exe or self.su_exe or 'su' flags = self.become_flags or self.su_flags or '' becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd) From 5bac17de515de214cd6e5eae2fbfe089064e13ca Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 01:20:38 -0400 Subject: [PATCH 0888/3617] fixed pfexec test --- test/units/executor/test_connection_information.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/units/executor/test_connection_information.py b/test/units/executor/test_connection_information.py index 010639d3683a04..9d702b77abc220 100644 --- a/test/units/executor/test_connection_information.py +++ b/test/units/executor/test_connection_information.py @@ -126,6 +126,8 @@ def test_connection_info_make_become_cmd(self): su_flags = C.DEFAULT_SU_FLAGS pbrun_exe = 'pbrun' pbrun_flags = '' + pfexec_exe = 'pfexec' + pfexec_flags = '' (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable=default_exe) self.assertEqual(cmd, default_cmd) @@ -147,7 +149,7 @@ def test_connection_info_make_become_cmd(self): conn_info.become_method = 'pfexec' (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s %s "'"'"'echo %s; %s'"'"'"'""" % (default_exe, pbrun_exe, pbrun_flags, key, default_cmd)) + self.assertEqual(cmd, """%s -c '%s %s "'"'"'echo %s; %s'"'"'"'""" % (default_exe, pfexec_exe, pfexec_flags, key, default_cmd)) conn_info.become_method = 'bad' self.assertRaises(AnsibleError, conn_info.make_become_cmd, cmd=default_cmd, executable="/bin/bash") From b89071e4858e5bf37846b347fab43d95b4785aef Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 01:30:03 -0400 Subject: [PATCH 0889/3617] now detects incorrect password with sudo and su (at least in english) --- lib/ansible/constants.py | 1 + lib/ansible/plugins/connections/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 7417eb73e4872b..8f9c5bf5103ff6 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -151,6 +151,7 @@ def shell_expand_path(path): DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True) # Become +BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''} #FIXME: deal with i18n BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas'] DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower() DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 45a07a9c307e4f..c38dd3bec43ed0 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -129,6 +129,6 @@ def check_password_prompt(self, output, prompt): return prompt(output) def check_incorrect_password(self, output, prompt): - incorrect_password = gettext.dgettext(self._connection_info.become_method, "Sorry, try again.") + incorrect_password = gettext.dgettext(self._connection_info.become_method, C.BECOME_ERROR_STRINGS[self._connection_info.become_method]) return output.endswith(incorrect_password) From 1ce1c52f6f553f2b57eb0935c86f65b6cff1446d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 01:40:53 -0400 Subject: [PATCH 0890/3617] centralized bad password handling, fixed outputing of become method --- lib/ansible/plugins/connections/__init__.py | 3 ++- lib/ansible/plugins/connections/ssh.py | 9 +++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index c38dd3bec43ed0..20ed2a80e332d0 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -130,5 +130,6 @@ def check_password_prompt(self, output, prompt): def check_incorrect_password(self, output, prompt): incorrect_password = gettext.dgettext(self._connection_info.become_method, C.BECOME_ERROR_STRINGS[self._connection_info.become_method]) - return output.endswith(incorrect_password) + if output.endswith(incorrect_password): + raise AnsibleError('Incorrect %s password' % self._connection_info.become_method) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index b29418c9962414..6f37154380df82 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -174,9 +174,7 @@ def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): # fail early if the become password is wrong if self._connection_info.become and sudoable: if self._connection_info.become_pass: - if self.check_incorrect_password(stdout, prompt): - raise AnsibleError('Incorrect %s password', self._connection_info.become_method) - + self.check_incorrect_password(stdout, prompt) elif self.check_password_prompt(stdout, prompt): raise AnsibleError('Missing %s password', self._connection_info.become_method) @@ -324,7 +322,7 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): while True: if self.check_become_success(become_output, success_key) or \ - self.check_password_prompt(become_output, prompt ): + self.check_password_prompt(become_output, prompt): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) if p.stderr in rfd: @@ -333,8 +331,7 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): raise AnsibleError('ssh connection closed waiting for privilege escalation password prompt') become_errput += chunk - if self.check_incorrect_password(become_errput, prompt): - raise AnsibleError('Incorrect %s password', self._connection_info.become_method) + self.check_incorrect_password(become_errput, prompt) if p.stdout in rfd: chunk = p.stdout.read() From f2d22c1373fe80b19a18a0e91eec7e892a4788da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Mon, 15 Jun 2015 10:02:54 +0200 Subject: [PATCH 0891/3617] Fixes error messages to mention ElastiCache --- plugins/inventory/ec2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 2c6066fc6afc22..3f0b950986bc33 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -447,7 +447,7 @@ def get_elasticache_clusters_by_region(self, region): if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": - error = "Looks like AWS RDS is down:\n%s" % e.message + error = "Looks like AWS ElastiCache is down:\n%s" % e.message self.fail_with_error(error) try: @@ -481,7 +481,7 @@ def get_elasticache_replication_groups_by_region(self, region): if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": - error = "Looks like AWS RDS is down:\n%s" % e.message + error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error) try: @@ -491,7 +491,7 @@ def get_elasticache_replication_groups_by_region(self, region): replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: - error = "ElastiCache query to AWS failed (unexpected format)." + error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error) for replication_group in replication_groups: From 2acfbce64de08a623598443547e090e7ca987e3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Mon, 15 Jun 2015 11:35:25 +0200 Subject: [PATCH 0892/3617] Removes unnecessary commented code and replaces with useful information --- plugins/inventory/ec2.py | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 3f0b950986bc33..e07efac4c0cf4c 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -786,12 +786,8 @@ def add_elasticache_cluster(self, cluster, region): if self.nested_groups: self.push_group(self.inventory, 'types', type_name) - # Inventory: Group by VPC - # if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: - # vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - # self.push(self.inventory, vpc_id_name, dest) - # if self.nested_groups: - # self.push_group(self.inventory, 'vpcs', vpc_id_name) + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group and not is_redis: @@ -878,12 +874,8 @@ def add_elasticache_node(self, node, cluster, region): if self.nested_groups: self.push_group(self.inventory, 'types', type_name) - # Inventory: Group by VPC - # if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: - # vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - # self.push(self.inventory, vpc_id_name, dest) - # if self.nested_groups: - # self.push_group(self.inventory, 'vpcs', vpc_id_name) + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: @@ -900,17 +892,9 @@ def add_elasticache_node(self, node, cluster, region): if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) - # Inventory: Group by parameter group - # if self.group_by_elasticache_parameter_group: - # self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) - # if self.nested_groups: - # self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName'])) + # Inventory: Group by parameter group (done at cluster level) - # Inventory: Group by replication group - # if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: - # self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) - # if self.nested_groups: - # self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe("elasticache_" + cluster['ReplicationGroupId'])) + # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: From d164c9c7a0f0c2c2c2db6edf3092b41f0beccaa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Mon, 15 Jun 2015 11:36:33 +0200 Subject: [PATCH 0893/3617] Adds explanation about all_elasticache_nodes and all_elastic_clusters settings --- plugins/inventory/ec2.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index b6818e876c6665..c21e512c0da754 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -65,6 +65,11 @@ all_rds_instances = False # By default, only ElastiCache clusters and nodes in the 'available' state # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' # to True return all ElastiCache clusters and nodes, regardless of state. +# +# Note that all_elasticache_nodes only applies to listed clusters. That means +# if you set all_elastic_clusters to false, no node will be return from +# unavailable clusters, regardless of the state and to what you set for +# all_elasticache_nodes. all_elasticache_replication_groups = False all_elasticache_clusters = False all_elasticache_nodes = False From 0d606b5705677539d9c0f17ea4a33744f8021ccc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 10:42:55 -0400 Subject: [PATCH 0894/3617] added cs_template to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b76d021d34eadc..17884e9dd6a650 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ New Modules: * cloudstack: cs_sshkeypair * cloudstack: cs_securitygroup * cloudstack: cs_securitygroup_rule + * cloudstack: cs_template * cloudstack: cs_vmsnapshot * datadog_monitor * expect From f576d29b6b8071f56498facc48c32f8b12bbcb73 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 11:02:51 -0400 Subject: [PATCH 0895/3617] allow for any non string iterable in listify --- lib/ansible/utils/listify.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py index d8ef025e0bb9b7..7bcf9ce802c71c 100644 --- a/lib/ansible/utils/listify.py +++ b/lib/ansible/utils/listify.py @@ -19,7 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type - +from collections import Iterable from ansible.template import Templar from ansible.template.safe_eval import safe_eval @@ -38,7 +38,7 @@ def listify_lookup_plugin_terms(terms, variables, loader): #TODO: check if this is needed as template should also return correct type already terms = safe_eval(terms) - if isinstance(terms, basestring) or not isinstance(terms, list) and not isinstance(terms, set): + if isinstance(terms, basestring) or not isinstance(terms, Iterable): terms = [ terms ] return terms From 8ae58f7ea3ee237b94e98f38be894e5618e535a0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 11:26:45 -0400 Subject: [PATCH 0896/3617] fixed executable, correctly this time --- lib/ansible/plugins/connections/local.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py index 5915569b0249c2..273bf1718f5d45 100644 --- a/lib/ansible/plugins/connections/local.py +++ b/lib/ansible/plugins/connections/local.py @@ -25,6 +25,8 @@ #import select #import fcntl +import ansible.constants as C + from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase @@ -46,7 +48,7 @@ def _connect(self, port=None): self._connected = True return self - def exec_command(self, cmd, tmp_path, in_data=None): + def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the local host ''' super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data) @@ -59,7 +61,7 @@ def exec_command(self, cmd, tmp_path, in_data=None): if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - executable = self._connection_info.executable.split()[0] if self._connection_info.executable else None + executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None self._display.vvv("{0} EXEC {1}".format(self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook From 670894e2bd951d8b79adbf1339cf131242fd4eb7 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 2 Jun 2015 14:16:39 -0500 Subject: [PATCH 0897/3617] Move building the play_ds into a method, that can be overridden --- lib/ansible/cli/adhoc.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 3607e3ee03d91b..9bc234507c9e86 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -65,6 +65,13 @@ def parse(self): return True + def _play_ds(self, pattern): + return dict( + name = "Ansible Ad-Hoc", + hosts = pattern, + gather_facts = 'no', + tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args))), ] + ) def run(self): ''' use Runner lib to do SSH things ''' @@ -117,13 +124,7 @@ def run(self): # results = runner.run() # create a pseudo-play to execute the specified module via a single task - play_ds = dict( - name = "Ansible Ad-Hoc", - hosts = pattern, - gather_facts = 'no', - tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args))), ] - ) - + play_ds = self._play_ds(pattern) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # now create a task queue manager to execute the play From 1d55e193c1041c907793aca91395eddc8a10a74c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 15 Jun 2015 13:04:46 -0500 Subject: [PATCH 0898/3617] Expose the TaskQueueManager to self --- lib/ansible/cli/adhoc.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 9bc234507c9e86..e940a0224f6302 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -128,9 +128,9 @@ def run(self): play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) # now create a task queue manager to execute the play - tqm = None + self._tqm = None try: - tqm = TaskQueueManager( + self._tqm = TaskQueueManager( inventory=inventory, variable_manager=variable_manager, loader=loader, @@ -139,10 +139,10 @@ def run(self): passwords=passwords, stdout_callback='minimal', ) - result = tqm.run(play) + result = self._tqm.run(play) finally: - if tqm: - tqm.cleanup() + if self._tqm: + self._tqm.cleanup() return result From dcf81e3ffee84216696dba02e7b35a0d3cd3dd86 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 15:04:19 -0400 Subject: [PATCH 0899/3617] removed useless comments --- lib/ansible/plugins/connections/local.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py index 273bf1718f5d45..74df551f1369f7 100644 --- a/lib/ansible/plugins/connections/local.py +++ b/lib/ansible/plugins/connections/local.py @@ -114,7 +114,6 @@ def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) - #vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host) self._display.vvv("{0} PUT {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path)) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) @@ -132,7 +131,6 @@ def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) - #vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host) self._display.vvv("{0} FETCH {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path)) self.put_file(in_path, out_path) From dc31086a17dbef43b12600dce4a7377630611831 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 17:12:18 -0400 Subject: [PATCH 0900/3617] added with_dict test --- test/integration/roles/test_lookups/tasks/main.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 44e8b18ccb4f75..89f9e3f886ba25 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -159,3 +159,13 @@ that: - "test_val == known_var_value.stdout" + +- name: set with_dict + shell: echo "{{ item.key + '=' + item.value }}" + register: keyval + with_dict: "{{ mydict }}" + +- name: compare dict return + assert: + that: + - "keyval.stdout == 'mykey=myval'" From 5ed2e440260e2d06d234634305f4d61e82413f6c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 15 Jun 2015 17:42:40 -0400 Subject: [PATCH 0901/3617] adjusted with_dict test to now work --- test/integration/roles/test_lookups/tasks/main.yml | 6 ------ test/integration/roles/test_lookups/vars/main.yml | 3 +++ 2 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 test/integration/roles/test_lookups/vars/main.yml diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 89f9e3f886ba25..d5032083cf991a 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -162,10 +162,4 @@ - name: set with_dict shell: echo "{{ item.key + '=' + item.value }}" - register: keyval with_dict: "{{ mydict }}" - -- name: compare dict return - assert: - that: - - "keyval.stdout == 'mykey=myval'" diff --git a/test/integration/roles/test_lookups/vars/main.yml b/test/integration/roles/test_lookups/vars/main.yml new file mode 100644 index 00000000000000..5338487676d233 --- /dev/null +++ b/test/integration/roles/test_lookups/vars/main.yml @@ -0,0 +1,3 @@ +mydict: + mykey1: myval1 + mykey2: myval2 From 98f5534d9c08950ca60afecf4e1725459431d551 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 09:12:42 -0400 Subject: [PATCH 0902/3617] adaptaed to new exec signature should fix #11275 --- lib/ansible/plugins/connections/paramiko_ssh.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py index 5a5259c5fcc80d..457b1946d377ab 100644 --- a/lib/ansible/plugins/connections/paramiko_ssh.py +++ b/lib/ansible/plugins/connections/paramiko_ssh.py @@ -189,10 +189,10 @@ def _connect_uncached(self): return ssh - def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): + def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the remote host ''' - super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") From 9116ff1c2856da3c81f3d7c3878b0d98cb1e5964 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 09:19:37 -0400 Subject: [PATCH 0903/3617] replaced removed pager_print for print --- lib/ansible/cli/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 5be92683824e62..c6a4e75c47d7f7 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -415,16 +415,16 @@ def pager(text): ''' find reasonable way to display text ''' # this is a much simpler form of what is in pydoc.py if not sys.stdout.isatty(): - pager_print(text) + print(text) elif 'PAGER' in os.environ: if sys.platform == 'win32': - pager_print(text) + print(text) else: CLI.pager_pipe(text, os.environ['PAGER']) elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: CLI.pager_pipe(text, 'less') else: - pager_print(text) + print(text) @staticmethod def pager_pipe(text, cmd): From b76dbb01ccf6e9cbd3a91b9a133f611cc7e38e99 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 09:20:15 -0400 Subject: [PATCH 0904/3617] generalized prereqs check added vaultfile class for action and lookup plugin usage --- lib/ansible/parsing/vault/__init__.py | 68 +++++++++++++++++++++------ 1 file changed, 54 insertions(+), 14 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 4cd7d2e80bbb6b..27780551f44441 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -86,6 +86,11 @@ def byte2int(bs): CIPHER_WHITELIST=['AES', 'AES256'] +def check_prereqs(): + + if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: + raise errors.AnsibleError(CRYPTO_UPGRADE) + class VaultLib(object): def __init__(self, password): @@ -239,8 +244,7 @@ def _edit_file_helper(self, existing_data=None, cipher=None): def create_file(self): """ create a new encrypted file """ - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + check_prereqs() if os.path.isfile(self.filename): raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) @@ -250,8 +254,7 @@ def create_file(self): def decrypt_file(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + check_prereqs() if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) @@ -269,8 +272,7 @@ def decrypt_file(self): def edit_file(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + check_prereqs() # decrypt to tmpfile tmpdata = self.read_data(self.filename) @@ -286,8 +288,7 @@ def edit_file(self): def view_file(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + check_prereqs() # decrypt to tmpfile tmpdata = self.read_data(self.filename) @@ -302,8 +303,7 @@ def view_file(self): def encrypt_file(self): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + check_prereqs() if not os.path.isfile(self.filename): raise errors.AnsibleError("%s does not exist" % self.filename) @@ -319,8 +319,7 @@ def encrypt_file(self): def rekey_file(self, new_password): - if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + check_prereqs() # decrypt tmpdata = self.read_data(self.filename) @@ -370,6 +369,48 @@ def _pager_shell_command(self, filename): return pager +class VaultFile(object): + + def __init__(self, password, filename): + self.password = password + + self.filename = filename + if not os.path.isfile(self.filename): + raise errors.AnsibleError("%s does not exist" % self.filename) + try: + self.filehandle = open(filename, "rb") + except Exception, e: + raise errors.AnsibleError("Could not open %s: %s" % (self.filename, str(e))) + + _, self.tmpfile = tempfile.mkstemp() + + def __del__(self): + self.filehandle.close() + os.unlink(self.tmplfile) + + def is_encrypted(self): + peak = self.filehandler.readline() + if peak.startswith(HEADER): + return True + else: + return False + + def get_decrypted(self): + + check_prereqs() + + if self.is_encrypted(): + tmpdata = self.filehandle.read() + this_vault = VaultLib(self.password) + dec_data = this_vault.decrypt(tmpdata) + if dec_data is None: + raise errors.AnsibleError("Decryption failed") + else: + self.tempfile.write(dec_data) + return self.tmpfile + else: + return self.filename + ######################################## # CIPHERS # ######################################## @@ -503,8 +544,7 @@ class VaultAES256(object): def __init__(self): - if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + check_prereqs() def gen_key_initctr(self, password, salt): # 16 for AES 128, 32 for AES256 From c87591f76fb53619c6055071f8d3c6212d2cd437 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 09:28:27 -0400 Subject: [PATCH 0905/3617] updated to new exec_command signature --- lib/ansible/plugins/connections/winrm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index 4da04b549a56d5..3fe769617e1c43 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -153,8 +153,8 @@ def _connect(self): self.protocol = self._winrm_connect() return self - def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None): - super(Connection, self).exec_command(cmd, tmp_path, executable=executable, in_data=in_data) + def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): + super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) cmd = to_bytes(cmd) cmd_parts = shlex.split(cmd, posix=False) From b1574ecfced35050a0e9f7d184aef8ab4e01cb8b Mon Sep 17 00:00:00 2001 From: Vebryn Date: Tue, 16 Jun 2015 16:13:01 +0200 Subject: [PATCH 0906/3617] Update syslog_json.py localhost is better than locahost ;) --- plugins/callbacks/syslog_json.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/callbacks/syslog_json.py b/plugins/callbacks/syslog_json.py index 5ab764acfe754b..8e0b3e40916364 100644 --- a/plugins/callbacks/syslog_json.py +++ b/plugins/callbacks/syslog_json.py @@ -22,7 +22,7 @@ def __init__(self): self.logger.setLevel(logging.DEBUG) self.handler = logging.handlers.SysLogHandler( - address = (os.getenv('SYSLOG_SERVER','locahost'), + address = (os.getenv('SYSLOG_SERVER','localhost'), os.getenv('SYSLOG_PORT',514)), facility=logging.handlers.SysLogHandler.LOG_USER ) From daee298cb662f1d3e6b88b20b351302ab36cb8f9 Mon Sep 17 00:00:00 2001 From: Trond Hindenes Date: Tue, 16 Jun 2015 14:20:34 +0000 Subject: [PATCH 0907/3617] Bugfix: win_checksum.ps1 --- v1/ansible/module_utils/powershell.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v1/ansible/module_utils/powershell.ps1 b/v1/ansible/module_utils/powershell.ps1 index 9606f47783b66c..a11e316989c6f0 100644 --- a/v1/ansible/module_utils/powershell.ps1 +++ b/v1/ansible/module_utils/powershell.ps1 @@ -151,7 +151,7 @@ Function Get-FileChecksum($path) { $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); - [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); } ElseIf (Test-Path -PathType Container $path) From 423f1233c8dfe7c39852c66f8d982b841b679e9c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 10:26:00 -0400 Subject: [PATCH 0908/3617] removed typo file --- lib/ansible/executor/task_queue_manager.py: | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 lib/ansible/executor/task_queue_manager.py: diff --git a/lib/ansible/executor/task_queue_manager.py: b/lib/ansible/executor/task_queue_manager.py: deleted file mode 100644 index e69de29bb2d1d6..00000000000000 From d913f169a82a00c5291ee436d540ced5d24d44d5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 5 Jun 2015 07:25:37 -0400 Subject: [PATCH 0909/3617] Update failed_when integration test to be more thorough --- .../roles/test_failed_when/tasks/main.yml | 55 ++++++++++++++++--- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/test/integration/roles/test_failed_when/tasks/main.yml b/test/integration/roles/test_failed_when/tasks/main.yml index 3492422e4389ee..a69cef74cf005f 100644 --- a/test/integration/roles/test_failed_when/tasks/main.yml +++ b/test/integration/roles/test_failed_when/tasks/main.yml @@ -16,13 +16,54 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -- name: Test failed_when behavior but catch it. - command: /bin/true - failed_when: 2 != 3 - register: failed +- name: command rc 0 failed_when_result undef + shell: exit 0 ignore_errors: True + register: result -- name: Assert that failed_when is true. - assert: +- assert: that: - - "failed.failed_when_result == True" \ No newline at end of file + - "'failed' not in result" + +- name: command rc 0 failed_when_result False + shell: exit 0 + failed_when: false + ignore_errors: true + register: result + +- assert: + that: + - "'failed' in result and not result.failed" + - "'failed_when_result' in result and not result.failed_when_result" + +- name: command rc 1 failed_when_result True + shell: exit 1 + failed_when: true + ignore_errors: true + register: result + +- assert: + that: + - "'failed' in result and result.failed" + - "'failed_when_result' in result and result.failed_when_result" + +- name: command rc 1 failed_when_result undef + shell: exit 1 + ignore_errors: true + register: result + +- assert: + that: + - "'failed' not in result" + +- name: command rc 1 failed_when_result False + shell: exit 1 + failed_when: false + ignore_errors: true + register: result + +- assert: + that: + - "'failed' in result and not result.failed" + - "'failed_when_result' in result and not result.failed_when_result" + From 4705a79a98bc5d9b63fe2358853a11580555a311 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Jun 2015 11:00:03 -0400 Subject: [PATCH 0910/3617] Updating docs banners --- docsite/_themes/srtd/layout.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index b9d9d065c7bd5d..158f45008e9e74 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -200,8 +200,8 @@ - - + +
 

 
From 336f45f5b3dfa96437bcc947c4b2932f4d7e5919 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 16 Jun 2015 08:20:33 -0700 Subject: [PATCH 0911/3617] Add serf inventory plugin Add inventory plugin for [Serf](https://serfdom.io/). Requires [`serfclient` Python module](https://pypi.python.org/pypi/serfclient). --- plugins/inventory/serf.py | 89 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100755 plugins/inventory/serf.py diff --git a/plugins/inventory/serf.py b/plugins/inventory/serf.py new file mode 100755 index 00000000000000..7b91b508529bb2 --- /dev/null +++ b/plugins/inventory/serf.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Serf +# (https://serfdom.io/). +# +# Requires host to be a member of a Serf cluster and the `serfclient` Python +# module from https://pypi.python.org/pypi/serfclient + +import argparse +import sys + +# https://pypi.python.org/pypi/serfclient +from serfclient.client import SerfClient + +try: + import json +except ImportError: + import simplejson as json + +_key = 'serf' + + +def get_serf_members_data(): + serf = SerfClient() + return serf.members().body['Members'] + + +def get_nodes(data): + return [node['Name'] for node in data] + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['Name']] = node['Tags'] + return meta + + +def print_list(): + data = get_serf_members_data() + nodes = get_nodes(data) + meta = get_meta(data) + print(json.dumps({_key: nodes, '_meta': meta})) + + +def print_host(host): + data = get_serf_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from serf cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from serf cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) From 30c1a2d86192fedc706b43a76c26c6e4c31a6fe0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Jun 2015 11:55:26 -0400 Subject: [PATCH 0912/3617] Have group/host var file loading check for YAML extensions too Fixes #11132 --- lib/ansible/inventory/__init__.py | 4 ++-- lib/ansible/vars/__init__.py | 28 +++++++++++++++++++++------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 3cd5d8c264f265..9f97e5256d2963 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -661,11 +661,11 @@ def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False): if group and host is None: # load vars in dir/group_vars/name_of_group base_path = os.path.join(basedir, "group_vars/%s" % group.name) - self._variable_manager.add_group_vars_file(base_path, self._loader) + results = self._variable_manager.add_group_vars_file(base_path, self._loader) elif host and group is None: # same for hostvars in dir/host_vars/name_of_host base_path = os.path.join(basedir, "host_vars/%s" % host.name) - self._variable_manager.add_host_vars_file(base_path, self._loader) + results = self._variable_manager.add_host_vars_file(base_path, self._loader) # all done, results is a dictionary of variables for this particular host. return results diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 5a576daba7cc58..64ad9e3a1435a7 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -272,9 +272,17 @@ def _load_inventory_file(self, path, loader): data = self._combine_vars(data, results) else: - data = loader.load_from_file(path) - if data is None: - data = dict() + file_name, ext = os.path.splitext(path) + data = None + if not ext: + for ext in ('', '.yml', '.yaml'): + new_path = path + ext + if loader.path_exists(new_path): + data = loader.load_from_file(new_path) + break + else: + if loader.path_exists(path): + data = loader.load_from_file(path) name = self._get_inventory_basename(path) return (name, data) @@ -286,9 +294,12 @@ def add_host_vars_file(self, path, loader): the extension, for matching against a given inventory host name ''' - if loader.path_exists(path): - (name, data) = self._load_inventory_file(path, loader) + (name, data) = self._load_inventory_file(path, loader) + if data: self._host_vars_files[name] = data + return data + else: + return dict() def add_group_vars_file(self, path, loader): ''' @@ -297,9 +308,12 @@ def add_group_vars_file(self, path, loader): the extension, for matching against a given inventory host name ''' - if loader.path_exists(path): - (name, data) = self._load_inventory_file(path, loader) + (name, data) = self._load_inventory_file(path, loader) + if data: self._group_vars_files[name] = data + return data + else: + return dict() def set_host_facts(self, host, facts): ''' From 605ddad37ebf1576664829e91fbebb2442fddf64 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Jun 2015 16:41:57 -0700 Subject: [PATCH 0913/3617] Add test that url lookup checks tls certificates --- .../roles/test_lookups/tasks/main.yml | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index d5032083cf991a..5ca29e27c1e618 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -163,3 +163,34 @@ - name: set with_dict shell: echo "{{ item.key + '=' + item.value }}" with_dict: "{{ mydict }}" + +# URL Lookups + +- name: Test that retrieving a url works + set_fact: + web_data: "{{ lookup('url', 'https://gist.githubusercontent.com/abadger/9858c22712f62a8effff/raw/43dd47ea691c90a5fa7827892c70241913351963/test') }}" + +- name: Assert that the url was retrieved + assert: + that: + - "'one' in web_data" + +- name: Test that retrieving a url with invalid cert fails + set_fact: + web_data: "{{ lookup('url', 'https://kennethreitz.org/') }}" + ignore_errors: True + register: url_invalid_cert + +- assert: + that: + - "url_invalid_cert.failed" + - "'Error validating the server' in url_invalid_cert.msg" + +- name: Test that retrieving a url with invalid cert with validate_certs=False works + set_fact: + web_data: "{{ lookup('url', 'https://kennethreitz.org/', validate_certs=False) }}" + register: url_no_validate_cert + +- assert: + that: + - "'kennethreitz.org' in web_data" From 4b28a51f25226a1c6a86892b774a8bcea5a63883 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 16 Jun 2015 13:55:05 -0400 Subject: [PATCH 0914/3617] Don't fail outright when a play has an empty hosts list --- lib/ansible/executor/playbook_executor.py | 1 - lib/ansible/plugins/strategies/linear.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 0c18ad3c893ade..4e77838559c8d2 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -120,7 +120,6 @@ def run(self): if len(batch) == 0: self._tqm.send_callback('v2_playbook_on_play_start', new_play) self._tqm.send_callback('v2_playbook_on_no_hosts_matched') - result = 1 break # restrict the inventory to the hosts in the serialized batch self._inventory.restrict_to_hosts(batch) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index e92f10eb374e0b..b60a922f83438a 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -122,9 +122,8 @@ def run(self, iterator, connection_info): moving on to the next task ''' - result = True - # iteratate over each task, while there is one left to run + result = True work_to_do = True while work_to_do and not self._tqm._terminated: From f300be0f3891fa33839b04558966d240db5b1d3c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 11:05:06 -0400 Subject: [PATCH 0915/3617] added ec2_eni_facts --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17884e9dd6a650..2674a9b9a6cf04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ Deprecated Modules (new ones in parens): New Modules: * amazon: ec2_ami_find + * amazon: ec2_eni_facts * amazon: elasticache_subnet_group * amazon: ec2_win_password * amazon: iam From 42e2724fa57ff3aca919c54759b297d314c92ba8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 11:51:36 -0400 Subject: [PATCH 0916/3617] added serf inventory plugin --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2674a9b9a6cf04..ca25530733d62f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ New Modules: New Inventory scripts: * cloudstack * fleetctl + * serf Other Notable Changes: From bb7d33adbcc0f1888c9c5fa6dfb87bb6d80efba1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 15:46:11 -0400 Subject: [PATCH 0917/3617] moved become password handlingn to base class --- lib/ansible/plugins/connections/__init__.py | 81 +++++++++++++++++++-- 1 file changed, 73 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 20ed2a80e332d0..c861f03778ccf8 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -20,7 +20,10 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import fcntl import gettext +import select +import os from abc import ABCMeta, abstractmethod, abstractproperty from functools import wraps @@ -34,6 +37,9 @@ # which may want to output display/logs too from ansible.utils.display import Display +from ansible.utils.debug import debug + + __all__ = ['ConnectionBase', 'ensure_connect'] @@ -64,6 +70,9 @@ def __init__(self, connection_info, new_stdin, *args, **kwargs): if not hasattr(self, '_connected'): self._connected = False + self.success_key = None + self.prompt = None + def _become_method_supported(self): ''' Checks if the current class supports this privilege escalation method ''' @@ -119,17 +128,73 @@ def close(self): """Terminate the connection""" pass - def check_become_success(self, output, success_key): - return success_key in output + def check_become_success(self, output): + return self.success_key in output - def check_password_prompt(self, output, prompt): - if isinstance(prompt, basestring): - return output.endswith(prompt) + def check_password_prompt(self, output): + if isinstance(self.prompt, basestring): + return output.endswith(self.prompt) else: - return prompt(output) + return self.prompt(output) - def check_incorrect_password(self, output, prompt): + def check_incorrect_password(self, output): incorrect_password = gettext.dgettext(self._connection_info.become_method, C.BECOME_ERROR_STRINGS[self._connection_info.become_method]) - if output.endswith(incorrect_password): + if output.strip().endswith(incorrect_password): raise AnsibleError('Incorrect %s password' % self._connection_info.become_method) + def handle_become_password(self, p, stdin): + ''' + Several cases are handled for privileges with password + * NOPASSWD (tty & no-tty): detect success_key on stdout + * without NOPASSWD: + * detect prompt on stdout (tty) + * detect prompt on stderr (no-tty) + ''' + + out = '' + err = '' + + debug("Handling privilege escalation password prompt.") + + if self._connection_info.become and self._connection_info.become_pass: + + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) + + become_output = '' + become_errput = '' + while True: + debug('Waiting for Privilege Escalation input') + if self.check_become_success(become_output) or \ + self.check_password_prompt(become_output): + break + + rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) + if p.stderr in rfd: + chunk = p.stderr.read() + if not chunk: + raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) + become_errput += chunk + + self.check_incorrect_password(become_errput) + + if p.stdout in rfd: + chunk = p.stdout.read() + if not chunk: + raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) + become_output += chunk + + if not rfd: + # timeout. wrap up process communication + stdout, stderr = p.communicate() + raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output) + + if not self.check_become_success(become_output): + debug("Sending privilege escalation password.") + stdin.write(self._connection_info.become_pass + '\n') + else: + out += become_output + err += become_errput + + return out, err + From 3b1b95b916e8cb2f788b48a4995c24c04d632dc8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 15:47:33 -0400 Subject: [PATCH 0918/3617] moved ipv6 handling to init fixed become password handling --- lib/ansible/plugins/connections/ssh.py | 96 ++++++-------------------- 1 file changed, 22 insertions(+), 74 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 6f37154380df82..7c117fee902ebd 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -48,9 +48,6 @@ def __init__(self, *args, **kwargs): self.HASHED_KEY_MAGIC = "|1|" self._has_pipelining = True - # FIXME: make this work, should be set from connection info - self._ipv6 = False - # FIXME: move the lockfile locations to ActionBase? #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) #self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700) @@ -59,6 +56,12 @@ def __init__(self, *args, **kwargs): super(Connection, self).__init__(*args, **kwargs) + # FIXME: make this work, should be set from connection info + self._ipv6 = False + self.host = self._connection_info.remote_addr + if self._ipv6: + self.host = '[%s]' % self.host + @property def transport(self): ''' used to identify this connection object from other classes ''' @@ -154,7 +157,7 @@ def _send_password(self): os.write(self.wfd, "{0}\n".format(self._connection_info.password)) os.close(self.wfd) - def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): + def _communicate(self, p, stdin, indata, sudoable=True): fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) # We can't use p.communicate here because the ControlMaster may have stdout open as well @@ -174,8 +177,8 @@ def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None): # fail early if the become password is wrong if self._connection_info.become and sudoable: if self._connection_info.become_pass: - self.check_incorrect_password(stdout, prompt) - elif self.check_password_prompt(stdout, prompt): + self.check_incorrect_password(stdout) + elif self.check_password_prompt(stdout): raise AnsibleError('Missing %s password', self._connection_info.become_method) if p.stdout in rfd: @@ -263,8 +266,6 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) - host = self._connection_info.remote_addr - ssh_cmd = self._password_cmd() ssh_cmd += ("ssh", "-C") if not in_data: @@ -280,17 +281,15 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): if self._ipv6: ssh_cmd += ['-6'] - ssh_cmd.append(host) + ssh_cmd.append(self.host) - prompt = None - success_key = '' if sudoable: - cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd) + cmd, self.prompt, self.success_key = self._connection_info.make_become_cmd(cmd) ssh_cmd.append(cmd) - self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=host) + self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self.host) - not_in_host_file = self.not_in_host_file(host) + not_in_host_file = self.not_in_host_file(self.host) # FIXME: move the locations of these lock files, same as init above #if C.HOST_KEY_CHECKING and not_in_host_file: @@ -307,51 +306,10 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): no_prompt_out = '' no_prompt_err = '' - if self._connection_info.become and sudoable and self._connection_info.become_pass: - # several cases are handled for sudo privileges with password - # * NOPASSWD (tty & no-tty): detect success_key on stdout - # * without NOPASSWD: - # * detect prompt on stdout (tty) - # * detect prompt on stderr (no-tty) - fcntl.fcntl(p.stdout, fcntl.F_SETFL, - fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) - fcntl.fcntl(p.stderr, fcntl.F_SETFL, - fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) - become_output = '' - become_errput = '' - - while True: - if self.check_become_success(become_output, success_key) or \ - self.check_password_prompt(become_output, prompt): - break - rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) - if p.stderr in rfd: - chunk = p.stderr.read() - if not chunk: - raise AnsibleError('ssh connection closed waiting for privilege escalation password prompt') - become_errput += chunk - - self.check_incorrect_password(become_errput, prompt) - - if p.stdout in rfd: - chunk = p.stdout.read() - if not chunk: - raise AnsibleError('ssh connection closed waiting for sudo or su password prompt') - become_output += chunk - - if not rfd: - # timeout. wrap up process communication - stdout = p.communicate() - raise AnsibleError('ssh connection error waiting for sudo or su password prompt') - - if not self.check_become_success(become_output, success_key): - if sudoable: - stdin.write(self._connection_info.become_pass + '\n') - else: - no_prompt_out += become_output - no_prompt_err += become_errput + if self.prompt: + no_prompt_out, no_prompt_err = self.handle_become_password(p, stdin) - (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt) + (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable) #if C.HOST_KEY_CHECKING and not_in_host_file: # # lock around the initial SSH connectivity so the user prompt about whether to add @@ -378,12 +336,7 @@ def put_file(self, in_path, out_path): super(Connection, self).put_file(in_path, out_path) - # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH - host = self._connection_info.remote_addr - if self._ipv6: - host = '[%s]' % host - - self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=host) + self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host) if not os.path.exists(in_path): raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path)) cmd = self._password_cmd() @@ -391,12 +344,12 @@ def put_file(self, in_path, out_path): if C.DEFAULT_SCP_IF_SSH: cmd.append('scp') cmd.extend(self._common_args) - cmd.extend([in_path, '{0}:{1}'.format(host, pipes.quote(out_path))]) + cmd.extend([in_path, '{0}:{1}'.format(self.host, pipes.quote(out_path))]) indata = None else: cmd.append('sftp') cmd.extend(self._common_args) - cmd.append(host) + cmd.append(self.host) indata = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path)) (p, stdin) = self._run(cmd, indata) @@ -413,24 +366,19 @@ def fetch_file(self, in_path, out_path): super(Connection, self).fetch_file(in_path, out_path) - # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH - host = self._connection_info.remote_addr - if self._ipv6: - host = '[%s]' % host - - self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=host) + self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host) cmd = self._password_cmd() if C.DEFAULT_SCP_IF_SSH: cmd.append('scp') cmd.extend(self._common_args) - cmd.extend(['{0}:{1}'.format(host, in_path), out_path]) + cmd.extend(['{0}:{1}'.format(self.host, in_path), out_path]) indata = None else: cmd.append('sftp') cmd.extend(self._common_args) - cmd.append(host) + cmd.append(self.host) indata = "get {0} {1}\n".format(in_path, out_path) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) From 935da01068e1e48e0a5796b4b561f8422876ad3b Mon Sep 17 00:00:00 2001 From: Kirk Strauser Date: Tue, 16 Jun 2015 14:35:36 -0700 Subject: [PATCH 0919/3617] Fixes for FreeBSD get_memory_facts - swapinfo on FreeBSD 6 (maybe 7 too?) doesn't support the "-m" flag for fetching amounts in megabytes. This patch fetches amounts in kilobytes and divides by 1024 (and also returns the result as an int instead of a string). - When no swap is configured, swapinfo prints a header line and nothing else: $ swapinfo Device 1K-blocks Used Avail Capacity The old version unexpectedly parsed that header line and emitted nonsense values like: "ansible_swapfree_mb": "Avail" "ansible_swaptotal_mb": "1K-blocks" This version emits those items altogether. --- lib/ansible/module_utils/facts.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 06da6d53e32607..c1b05ce8d134fc 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1264,13 +1264,14 @@ def get_memory_facts(self): # Device 1M-blocks Used Avail Capacity # /dev/ada0p3 314368 0 314368 0% # - rc, out, err = module.run_command("/usr/sbin/swapinfo -m") + rc, out, err = module.run_command("/usr/sbin/swapinfo -k") lines = out.split('\n') if len(lines[-1]) == 0: lines.pop() data = lines[-1].split() - self.facts['swaptotal_mb'] = data[1] - self.facts['swapfree_mb'] = data[3] + if data[0] != 'Device': + self.facts['swaptotal_mb'] = int(data[1]) / 1024 + self.facts['swapfree_mb'] = int(data[3]) / 1024 @timeout(10) def get_mount_facts(self): From eb820837ac83cdfdf4602a9c5b46681b3a488447 Mon Sep 17 00:00:00 2001 From: Kirk Strauser Date: Tue, 16 Jun 2015 15:17:52 -0700 Subject: [PATCH 0920/3617] Don't panic if AIX's uname doesn't support -W The current code expects "uname -W" on AIX to always succeed. The AIX 5 instance I have doesn't support the -W flag and facts gathering always crashes on it. This skips some WPAR handling code if "uname -W" doesn't work. --- lib/ansible/module_utils/facts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 06da6d53e32607..87c9814ce852ff 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2217,7 +2217,7 @@ def get_interfaces_info(self, ifconfig_path): rc, out, err = module.run_command([uname_path, '-W']) # don't bother with wpars it does not work # zero means not in wpar - if out.split()[0] == '0': + if not rc and out.split()[0] == '0': if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']): entstat_path = module.get_bin_path('entstat') if entstat_path: From a0e8b9ef98d63dc8a262976e50d9c36e300c4713 Mon Sep 17 00:00:00 2001 From: Marc Tamsky Date: Tue, 16 Jun 2015 19:28:53 -0700 Subject: [PATCH 0921/3617] for tags with empty value, do not append separator --- plugins/inventory/ec2.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 16ac93f5ee4827..112f5c29e86c0e 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -520,7 +520,10 @@ def add_instance(self, instance, region): # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): - key = self.to_safe("tag_" + k + "=" + v) + if v: + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) From ff998b602291acf55bbda498ca0361383c440a48 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Jun 2015 00:09:04 -0400 Subject: [PATCH 0922/3617] Make sure the templar is using the right vars when evaluating conditionals --- lib/ansible/playbook/conditional.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index 707233aaa0e991..ff00a01de27c45 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -73,6 +73,9 @@ def _check_conditional(self, conditional, templar, all_vars): if conditional in all_vars and '-' not in unicode(all_vars[conditional]): conditional = all_vars[conditional] + # make sure the templar is using the variables specifed to this method + templar.set_available_variables(variables=all_vars) + conditional = templar.template(conditional) if not isinstance(conditional, basestring) or conditional == "": return conditional From ce42c66e27c47595031ca4fcdf9facfaf6d6fd74 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 16 Jun 2015 21:11:36 -0700 Subject: [PATCH 0923/3617] plugins/inventory/serf.py: Use SERF_RPC_* env vars This makes the Serf inventory plugin use the `SERF_RPC_ADDR` and `SERF_RPC_AUTH` environment variables that the `serf` command-line tool already uses. These can be used to get Serf data from a remote node instead of requiring the ansible control host to be running a serf agent and to be a member of the serf cluster. --- plugins/inventory/serf.py | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/plugins/inventory/serf.py b/plugins/inventory/serf.py index 7b91b508529bb2..3c4cf365c64b3b 100755 --- a/plugins/inventory/serf.py +++ b/plugins/inventory/serf.py @@ -20,10 +20,18 @@ # Dynamic inventory script which lets you use nodes discovered by Serf # (https://serfdom.io/). # -# Requires host to be a member of a Serf cluster and the `serfclient` Python -# module from https://pypi.python.org/pypi/serfclient +# Requires the `serfclient` Python module from +# https://pypi.python.org/pypi/serfclient +# +# Environment variables +# --------------------- +# - `SERF_RPC_ADDR` +# - `SERF_RPC_AUTH` +# +# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr import argparse +import os import sys # https://pypi.python.org/pypi/serfclient @@ -37,9 +45,22 @@ _key = 'serf' +def _serf_client(): + kwargs = {} + + rpc_addr = os.getenv('SERF_RPC_ADDR') + if rpc_addr: + kwargs['host'], kwargs['port'] = rpc_addr.split(':') + + rpc_auth = os.getenv('SERF_RPC_AUTH') + if rpc_auth: + kwargs['rpc_auth'] = rpc_auth + + return SerfClient(**kwargs) + + def get_serf_members_data(): - serf = SerfClient() - return serf.members().body['Members'] + return _serf_client().members().body['Members'] def get_nodes(data): From 0d5b7ae669ec568257f0415d8bee8dadfb85795a Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Wed, 17 Jun 2015 19:18:19 +0530 Subject: [PATCH 0924/3617] fixes 11296 where the groups does not have all the groups --- lib/ansible/vars/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 64ad9e3a1435a7..239d77ca658bc4 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -219,6 +219,7 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): if self._inventory is not None: hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader) all_vars['hostvars'] = hostvars + all_vars['groups'] = self._inventory.groups_list() if task: if task._role: From dc63bbf0b9686db297de8d0bb801cba0418f88f2 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Wed, 17 Jun 2015 08:18:58 -0700 Subject: [PATCH 0925/3617] Simplify serf inventory plugin using newly added `EnvironmentConfig` class in `serfclient`. See https://github.com/KushalP/serfclient-py/pull/17 --- plugins/inventory/serf.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/plugins/inventory/serf.py b/plugins/inventory/serf.py index 3c4cf365c64b3b..dfda4dd855db91 100755 --- a/plugins/inventory/serf.py +++ b/plugins/inventory/serf.py @@ -35,7 +35,7 @@ import sys # https://pypi.python.org/pypi/serfclient -from serfclient.client import SerfClient +from serfclient import SerfClient, EnvironmentConfig try: import json @@ -46,17 +46,8 @@ def _serf_client(): - kwargs = {} - - rpc_addr = os.getenv('SERF_RPC_ADDR') - if rpc_addr: - kwargs['host'], kwargs['port'] = rpc_addr.split(':') - - rpc_auth = os.getenv('SERF_RPC_AUTH') - if rpc_auth: - kwargs['rpc_auth'] = rpc_auth - - return SerfClient(**kwargs) + env = EnvironmentConfig() + return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) def get_serf_members_data(): From 16f66a39a6ec8ce5c041c8f08ed2b017b409885d Mon Sep 17 00:00:00 2001 From: rncry Date: Wed, 17 Jun 2015 17:22:28 +0100 Subject: [PATCH 0926/3617] support instances with no public ip default to private ip if the instance doesn't have a public ip assigned. (causes list index out of range error otherwise) --- plugins/inventory/gce.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/gce.py b/plugins/inventory/gce.py index 76e14f2301278d..5fe3db93f8e35e 100755 --- a/plugins/inventory/gce.py +++ b/plugins/inventory/gce.py @@ -221,7 +221,7 @@ def node_to_dict(self, inst): 'gce_image': inst.image, 'gce_machine_type': inst.size, 'gce_private_ip': inst.private_ips[0], - 'gce_public_ip': inst.public_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 'gce_name': inst.name, 'gce_description': inst.extra['description'], 'gce_status': inst.extra['status'], @@ -230,7 +230,7 @@ def node_to_dict(self, inst): 'gce_metadata': md, 'gce_network': net, # Hosts don't have a public name, so we add an IP - 'ansible_ssh_host': inst.public_ips[0] + 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] } def get_instance(self, instance_name): From daa319881f584948e27f943d12c2dbed28467d98 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Jun 2015 12:42:47 -0400 Subject: [PATCH 0927/3617] Make sure registered variable message is sent before other messages Avoids a race condition where previously the registered variable message was being sent after the 'host_task_ok' message, meaning the next task may be started before the var is registered, leading to an undefined variable error --- lib/ansible/executor/process/result.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index f0416db852d3b6..352b532cd48cc3 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -105,7 +105,9 @@ def run(self): time.sleep(0.1) continue - host_name = result._host.get_name() + # if this task is registering a result, do it now + if result._task.register: + self._send_result(('set_host_var', result._host, result._task.register, result._result)) # send callbacks, execute other options based on the result status # FIXME: this should all be cleaned up and probably moved to a sub-function. @@ -160,10 +162,6 @@ def run(self): # finally, send the ok for this task self._send_result(('host_task_ok', result)) - # if this task is registering a result, do it now - if result._task.register: - self._send_result(('set_host_var', result._host, result._task.register, result._result)) - except queue.Empty: pass except (KeyboardInterrupt, IOError, EOFError): From 410285ecd6fd4201b78061d73dc29e58ca641663 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Wed, 17 Jun 2015 18:41:54 +0100 Subject: [PATCH 0928/3617] add simple prefix filtering to vmware inventory Significantly speeds up inventory collection on systems with many excluded machines. --- plugins/inventory/vmware.ini | 4 ++++ plugins/inventory/vmware.py | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/vmware.ini b/plugins/inventory/vmware.ini index 964be18c14e203..5097735fd0e18d 100644 --- a/plugins/inventory/vmware.ini +++ b/plugins/inventory/vmware.ini @@ -23,6 +23,10 @@ guests_only = True # caching will be disabled. #cache_dir = ~/.cache/ansible +# Specify a prefix filter. Any VMs with names beginning with this string will +# not be returned. +# prefix_filter = test_ + [auth] # Specify hostname or IP address of vCenter/ESXi server. A port may be diff --git a/plugins/inventory/vmware.py b/plugins/inventory/vmware.py index 92030d66e56925..27330b8bcdef8f 100755 --- a/plugins/inventory/vmware.py +++ b/plugins/inventory/vmware.py @@ -55,7 +55,7 @@ def emit(self, record): class VMwareInventory(object): - + def __init__(self, guests_only=None): self.config = ConfigParser.SafeConfigParser() if os.environ.get('VMWARE_INI', ''): @@ -305,6 +305,11 @@ def get_inventory(self, meta_hostvars=True): else: vm_group = default_group + '_vm' + if self.config.has_option('defaults', 'prefix_filter'): + prefix_filter = self.config.get('defaults', 'prefix_filter') + else: + prefix_filter = None + # Loop through physical hosts: for host in HostSystem.all(self.client): @@ -318,6 +323,9 @@ def get_inventory(self, meta_hostvars=True): # Loop through all VMs on physical host. for vm in host.vm: + if prefix_filter: + if vm.name.startswith( prefix_filter ): + continue self._add_host(inv, 'all', vm.name) self._add_host(inv, vm_group, vm.name) vm_info = self._get_vm_info(vm) From a38574442652008a0a3274caeccf2578b1302e2f Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Wed, 17 Jun 2015 10:58:13 -0700 Subject: [PATCH 0929/3617] Add inventory file to "Unable to find" error msg E.g.: $ ansible gabriel -m ping -i ssh_config.py ERROR! Unable to find an inventory file (ssh_config.py), specify one with -i ? --- lib/ansible/inventory/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 9f97e5256d2963..a6e93b565592c9 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -144,7 +144,8 @@ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST): vars_loader.add_directory(self.basedir(), with_subdir=True) else: - raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?") + raise errors.AnsibleError("Unable to find an inventory file (%s), " + "specify one with -i ?" % host_list) self._vars_plugins = [ x for x in vars_loader.all(self) ] From c3c398cffe202146df9c73b8ed6e478c054dd207 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Jun 2015 15:38:52 -0400 Subject: [PATCH 0930/3617] Cleaning up some task failure detection problems * fixed a bug in which failures from a with_* loop were not being caught correctly, leading to tasks continuing when they should stop * when ignore_errors is enabled, the failure will no longer count towards the number of failed tasks --- lib/ansible/executor/task_result.py | 3 ++- lib/ansible/plugins/strategies/__init__.py | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py index 2b760bac003d6d..99ac06c8eb3715 100644 --- a/lib/ansible/executor/task_result.py +++ b/lib/ansible/executor/task_result.py @@ -43,7 +43,7 @@ def is_skipped(self): return self._check_key('skipped') def is_failed(self): - if 'failed_when_result' in self._result: + if 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]: return self._check_key('failed_when_result') else: return self._check_key('failed') or self._result.get('rc', 0) != 0 @@ -57,5 +57,6 @@ def _check_key(self, key): for res in self._result.get('results', []): if isinstance(res, dict): flag |= res.get(key, False) + return flag else: return self._result.get(key, False) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 57630f4f21e224..e9cdd7d35ce93f 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -154,7 +154,9 @@ def _process_pending_results(self, iterator): debug("marking %s as failed" % host.name) iterator.mark_host_failed(host) self._tqm._failed_hosts[host.name] = True - self._tqm._stats.increment('failures', host.name) + self._tqm._stats.increment('failures', host.name) + else: + self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True From 90445ee67dad1e0a9d069e21780a4dc27fc304bf Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Jun 2015 16:03:19 -0400 Subject: [PATCH 0931/3617] Add ::1 where we see 127.0.0.1, for better ipv6 support Fixes #5764 --- lib/ansible/inventory/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index a6e93b565592c9..de25c2ac32fba9 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -372,7 +372,7 @@ def __append_host_to_results(host): for host in matching_hosts: __append_host_to_results(host) - if pattern in ["localhost", "127.0.0.1"] and len(results) == 0: + if pattern in ["localhost", "127.0.0.1", "::1"] and len(results) == 0: new_host = self._create_implicit_localhost(pattern) results.append(new_host) return results @@ -408,9 +408,9 @@ def get_host(self, hostname): return self._hosts_cache[hostname] def _get_host(self, hostname): - if hostname in ['localhost','127.0.0.1']: + if hostname in ['localhost', '127.0.0.1', '::1']: for host in self.get_group('all').get_hosts(): - if host.name in ['localhost', '127.0.0.1']: + if host.name in ['localhost', '127.0.0.1', '::1']: return host return self._create_implicit_localhost(hostname) else: @@ -512,7 +512,7 @@ def list_hosts(self, pattern="all"): """ return a list of hostnames for a pattern """ result = [ h for h in self.get_hosts(pattern) ] - if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]: + if len(result) == 0 and pattern in ["localhost", "127.0.0.1", "::1"]: result = [pattern] return result From 87ca4757049ff47621d5a9b9d7641be1ed9b178b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 17 Jun 2015 16:25:58 -0400 Subject: [PATCH 0932/3617] Exclude the all/ungrouped groups from pattern matching results Fixes #5375 --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index de25c2ac32fba9..26e9e617875c52 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -364,7 +364,7 @@ def __append_host_to_results(host): for host in group.get_hosts(): __append_host_to_results(host) else: - if self._match(group.name, pattern): + if self._match(group.name, pattern) and group.name not in ('all', 'ungrouped'): for host in group.get_hosts(): __append_host_to_results(host) else: From a0f1d81ada8757a0993735f6e0cde420de84d7cb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 18:25:57 -0400 Subject: [PATCH 0933/3617] added several openstack modules to changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca25530733d62f..473b8d6d2b416d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,9 +45,13 @@ New Modules: * expect * find * maven_artifact + * openstack: os_ironic + * openstack: os_ironic_node * openstack: os_client_config * openstack: os_image * openstack: os_network + * openstack: os_object + * openstack: os_security_group * openstack: os_server * openstack: os_server_actions * openstack: os_server_facts From b27d762081ab196276d0470b90ffce3eef00062c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 19:19:55 -0400 Subject: [PATCH 0934/3617] addeed osx_defaults to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 473b8d6d2b416d..3910cfbcc725b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ New Modules: * openstack: os_server_volume * openstack: os_subnet * openstack: os_volume + * osx_defaults * pear * proxmox * proxmox_template From faed1b2d0544a9f1941532d542ca13b4bc36cc5b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 19:20:25 -0400 Subject: [PATCH 0935/3617] better error reporting when doc parsing fails --- lib/ansible/cli/doc.py | 63 ++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 797a59f0381c9e..09020b41ffeebe 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -81,43 +81,46 @@ def run(self): text = '' for module in self.args: - filename = module_loader.find_plugin(module) - if filename is None: - self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) - continue + try: + filename = module_loader.find_plugin(module) + if filename is None: + self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) + continue - if any(filename.endswith(x) for x in self.BLACKLIST_EXTS): - continue + if any(filename.endswith(x) for x in self.BLACKLIST_EXTS): + continue - try: - doc, plainexamples, returndocs = module_docs.get_docstring(filename) - except: - self.display.vvv(traceback.print_exc()) - self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) - continue + try: + doc, plainexamples, returndocs = module_docs.get_docstring(filename) + except: + self.display.vvv(traceback.print_exc()) + self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module) + continue - if doc is not None: + if doc is not None: - all_keys = [] - for (k,v) in doc['options'].iteritems(): - all_keys.append(k) - all_keys = sorted(all_keys) - doc['option_keys'] = all_keys + all_keys = [] + for (k,v) in doc['options'].iteritems(): + all_keys.append(k) + all_keys = sorted(all_keys) + doc['option_keys'] = all_keys - doc['filename'] = filename - doc['docuri'] = doc['module'].replace('_', '-') - doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') - doc['plainexamples'] = plainexamples - doc['returndocs'] = returndocs + doc['filename'] = filename + doc['docuri'] = doc['module'].replace('_', '-') + doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') + doc['plainexamples'] = plainexamples + doc['returndocs'] = returndocs - if self.options.show_snippet: - text += DocCLI.get_snippet_text(doc) + if self.options.show_snippet: + text += DocCLI.get_snippet_text(doc) + else: + text += DocCLI.get_man_text(doc) else: - text += DocCLI.get_man_text(doc) - else: - # this typically means we couldn't even parse the docstring, not just that the YAML is busted, - # probably a quoting issue. - self.display.warning("module %s missing documentation (or could not parse documentation)\n" % module) + # this typically means we couldn't even parse the docstring, not just that the YAML is busted, + # probably a quoting issue. + raise AnsibleError("Parsing produced an empty object.") + except Exception, e: + raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e))) CLI.pager(text) return 0 From 08f62b6e13f1bb856df3ce895e3136e3df0e623e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 09:38:54 -0400 Subject: [PATCH 0936/3617] added vsphere copy --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3910cfbcc725b1..bbbac4ec17a487 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -74,6 +74,7 @@ New Modules: * vertica_schema * vertica_user * vmware_datacenter + * vsphere_copy * webfaction_app * webfaction_db * webfaction_domain From f74f0e76f041e2c11620b3f80ce5f9d2fbf28158 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 11:50:02 -0400 Subject: [PATCH 0937/3617] added dpkg_selections to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbbac4ec17a487..4fbf63d2bba573 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ New Modules: * cloudstack: cs_template * cloudstack: cs_vmsnapshot * datadog_monitor + * dpkg_selections * expect * find * maven_artifact From c7457967074a51829a4fcf0b1cb1111ae0a598b7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 12:46:45 -0400 Subject: [PATCH 0938/3617] added hall notification module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fbf63d2bba573..064612f5bdd3f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ New Modules: * dpkg_selections * expect * find + * hall * maven_artifact * openstack: os_ironic * openstack: os_ironic_node From 3fab516d3d1bb1fe81fecb8d7ef412317277a373 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 22:50:54 -0400 Subject: [PATCH 0939/3617] fixed detection of incorrect password --- lib/ansible/plugins/connections/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index c861f03778ccf8..01a3496b5c6185 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -139,7 +139,7 @@ def check_password_prompt(self, output): def check_incorrect_password(self, output): incorrect_password = gettext.dgettext(self._connection_info.become_method, C.BECOME_ERROR_STRINGS[self._connection_info.become_method]) - if output.strip().endswith(incorrect_password): + if incorrect_password in output: raise AnsibleError('Incorrect %s password' % self._connection_info.become_method) def handle_become_password(self, p, stdin): From 5cfd0f82a052e9cfb28e3f4e06da264fda22ab06 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 23:18:43 -0400 Subject: [PATCH 0940/3617] moved away from generic function for become --- lib/ansible/plugins/connections/__init__.py | 56 --------------------- 1 file changed, 56 deletions(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 01a3496b5c6185..629c90d8d7e20a 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -142,59 +142,3 @@ def check_incorrect_password(self, output): if incorrect_password in output: raise AnsibleError('Incorrect %s password' % self._connection_info.become_method) - def handle_become_password(self, p, stdin): - ''' - Several cases are handled for privileges with password - * NOPASSWD (tty & no-tty): detect success_key on stdout - * without NOPASSWD: - * detect prompt on stdout (tty) - * detect prompt on stderr (no-tty) - ''' - - out = '' - err = '' - - debug("Handling privilege escalation password prompt.") - - if self._connection_info.become and self._connection_info.become_pass: - - fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) - fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) - - become_output = '' - become_errput = '' - while True: - debug('Waiting for Privilege Escalation input') - if self.check_become_success(become_output) or \ - self.check_password_prompt(become_output): - break - - rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) - if p.stderr in rfd: - chunk = p.stderr.read() - if not chunk: - raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) - become_errput += chunk - - self.check_incorrect_password(become_errput) - - if p.stdout in rfd: - chunk = p.stdout.read() - if not chunk: - raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) - become_output += chunk - - if not rfd: - # timeout. wrap up process communication - stdout, stderr = p.communicate() - raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output) - - if not self.check_become_success(become_output): - debug("Sending privilege escalation password.") - stdin.write(self._connection_info.become_pass + '\n') - else: - out += become_output - err += become_errput - - return out, err - From d6672ad285b5c4c65fc7126f139bb2a36bcb21a8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 23:23:09 -0400 Subject: [PATCH 0941/3617] removed unused import --- lib/ansible/plugins/connections/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 629c90d8d7e20a..8e4841225c63db 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -37,8 +37,6 @@ # which may want to output display/logs too from ansible.utils.display import Display -from ansible.utils.debug import debug - __all__ = ['ConnectionBase', 'ensure_connect'] From 744ec2bbad5c1717028ecc14b35fa8cfcdb25fab Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 23:23:36 -0400 Subject: [PATCH 0942/3617] put hostkey locking into function (still needs fixing) implemneted become handling here, cannot generalize well enough in base class --- lib/ansible/plugins/connections/ssh.py | 89 +++++++++++++++++++++----- 1 file changed, 74 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 7c117fee902ebd..7fb62e2263dd99 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -35,7 +35,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase - +from ansible.utils.debug import debug class Connection(ConnectionBase): ''' ssh based connections ''' @@ -261,6 +261,21 @@ def not_in_host_file(self, host): self._display.vvv("EXEC previous known host file not found for {0}".format(host)) return True + def lock_host_keys(self, lock): + + if C.HOST_KEY_CHECKING and self.not_in_host_file(self.host): + if lock: + action = fcntl.LOCK_EX + else: + action = fcntl.LOCK_UN + + # lock around the initial SSH connectivity so the user prompt about whether to add + # the host to known hosts is not intermingled with multiprocess output. + # FIXME: move the locations of these lock files, same as init above, these came from runner, probably need to be in task_executor + # fcntl.lockf(self.process_lockfile, action) + # fcntl.lockf(self.output_lockfile, action) + + def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the remote host ''' @@ -289,15 +304,8 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ssh_cmd.append(cmd) self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self.host) - not_in_host_file = self.not_in_host_file(self.host) - - # FIXME: move the locations of these lock files, same as init above - #if C.HOST_KEY_CHECKING and not_in_host_file: - # # lock around the initial SSH connectivity so the user prompt about whether to add - # # the host to known hosts is not intermingled with multiprocess output. - # fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX) - # fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX) + self.lock_host_keys(True) # create process (p, stdin) = self._run(ssh_cmd, in_data) @@ -306,16 +314,67 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): no_prompt_out = '' no_prompt_err = '' + if self.prompt: - no_prompt_out, no_prompt_err = self.handle_become_password(p, stdin) + ''' + Several cases are handled for privileges with password + * NOPASSWD (tty & no-tty): detect success_key on stdout + * without NOPASSWD: + * detect prompt on stdout (tty) + * detect prompt on stderr (no-tty) + ''' + + out = '' + err = '' + + debug("Handling privilege escalation password prompt.") + + if self._connection_info.become and self._connection_info.become_pass: + + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) + + become_output = '' + become_errput = '' + while True: + debug('Waiting for Privilege Escalation input') + if self.check_become_success(become_output) or self.check_password_prompt(become_output): + break + + rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) + if p.stderr in rfd: + chunk = p.stderr.read() + if not chunk: + raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) + become_errput += chunk + + self.check_incorrect_password(become_errput) + + if p.stdout in rfd: + chunk = p.stdout.read() + if not chunk: + raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) + become_output += chunk + + if not rfd: + # timeout. wrap up process communication + stdout, stderr = p.communicate() + raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output) + + if not self.check_become_success(become_output): + debug("Sending privilege escalation password.") + stdin.write(self._connection_info.become_pass + '\n') + else: + out += become_output + err += become_errput + + no_prompt_out = out + no_prompt_err = err (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable) - #if C.HOST_KEY_CHECKING and not_in_host_file: - # # lock around the initial SSH connectivity so the user prompt about whether to add - # # the host to known hosts is not intermingled with multiprocess output. - # fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN) - # fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN) + self.lock_host_keys(False) + controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr if C.HOST_KEY_CHECKING: From 671118ba71e472c0ebfb72d5a653cb25925977af Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 23:31:31 -0400 Subject: [PATCH 0943/3617] fixed become password handling with paramiko --- lib/ansible/plugins/connections/paramiko_ssh.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/connections/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py index 457b1946d377ab..e509108adf2184 100644 --- a/lib/ansible/plugins/connections/paramiko_ssh.py +++ b/lib/ansible/plugins/connections/paramiko_ssh.py @@ -43,6 +43,7 @@ from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connections import ConnectionBase from ansible.utils.path import makedirs_safe +from ansible.utils.debug import debug AUTHENTICITY_MSG=""" paramiko: The authenticity of host '%s' can't be established. @@ -216,17 +217,20 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr) + + if sudoable: + cmd, self.prompt, self.success_key = self._connection_info.make_become_cmd(cmd) + no_prompt_out = '' no_prompt_err = '' become_output = '' try: chan.exec_command(cmd) - if self._connection_info.become_pass: + if self.prompt: while True: - if success_key in become_output or \ - (prompt and become_output.endswith(prompt)) or \ - utils.su_prompts.check_su_prompt(become_output): + debug('Waiting for Privilege Escalation input') + if self.check_become_success(become_output) or self.check_password_prompt(become_output): break chunk = chan.recv(bufsize) if not chunk: @@ -237,7 +241,7 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): raise AnsibleError('ssh connection ' + 'closed waiting for password prompt') become_output += chunk - if success_key not in become_output: + if not self.check_become_success(become_output): if self._connection_info.become: chan.sendall(self._connection_info.become_pass + '\n') else: From 7c65f3ddd7150b6a2b8911c6319c9c53786f7ccc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 18 Jun 2015 01:46:43 -0400 Subject: [PATCH 0944/3617] partial become support for local connection plugin --- lib/ansible/plugins/connections/local.py | 60 +++++++++++------------- 1 file changed, 28 insertions(+), 32 deletions(-) diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py index 74df551f1369f7..3655cb5b6dfd1d 100644 --- a/lib/ansible/plugins/connections/local.py +++ b/lib/ansible/plugins/connections/local.py @@ -22,8 +22,8 @@ import os import shutil import subprocess -#import select -#import fcntl +import select +import fcntl import ansible.constants as C @@ -51,18 +51,17 @@ def _connect(self, port=None): def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the local host ''' - super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data) + super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) debug("in local.exec_command()") - # su requires to be run from a terminal, and therefore isn't supported here (yet?) - #if self._connection_info.su: - # raise AnsibleError("Internal Error: this module does not support running commands via su") if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None + if sudoable: + cmd, self.prompt, self.success_key = self._connection_info.make_become_cmd(cmd) + self._display.vvv("{0} EXEC {1}".format(self._connection_info.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook debug("opening command with Popen()") @@ -76,31 +75,28 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ) debug("done running command with Popen()") - # FIXME: more su/sudo stuff - #if self.runner.sudo and sudoable and self.runner.sudo_pass: - # fcntl.fcntl(p.stdout, fcntl.F_SETFL, - # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) - # fcntl.fcntl(p.stderr, fcntl.F_SETFL, - # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) - # sudo_output = '' - # while not sudo_output.endswith(prompt) and success_key not in sudo_output: - # rfd, wfd, efd = select.select([p.stdout, p.stderr], [], - # [p.stdout, p.stderr], self.runner.timeout) - # if p.stdout in rfd: - # chunk = p.stdout.read() - # elif p.stderr in rfd: - # chunk = p.stderr.read() - # else: - # stdout, stderr = p.communicate() - # raise AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output) - # if not chunk: - # stdout, stderr = p.communicate() - # raise AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output) - # sudo_output += chunk - # if success_key not in sudo_output: - # p.stdin.write(self.runner.sudo_pass + '\n') - # fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) - # fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) + if self.prompt: + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) + become_output = '' + while not self.check_become_success(become_output) and not self.check_password_prompt(become_output): + + rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout, p.stderr], self._connection_info.timeout) + if p.stdout in rfd: + chunk = p.stdout.read() + elif p.stderr in rfd: + chunk = p.stderr.read() + else: + stdout, stderr = p.communicate() + raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + become_output) + if not chunk: + stdout, stderr = p.communicate() + raise AnsibleError('privilege output closed while waiting for password prompt:\n' + become_output) + become_output += chunk + if not self.check_become_success(become_output): + p.stdin.write(self._connection_info.become_pass + '\n') + fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK) + fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK) debug("getting output with communicate()") stdout, stderr = p.communicate() From fdc06c134ab08f854d1c45f91644659971a98553 Mon Sep 17 00:00:00 2001 From: Rodolfo Carvalho Date: Thu, 18 Jun 2015 09:03:42 +0200 Subject: [PATCH 0945/3617] Fix docs typo --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 6dc91c32bbcb73..0f13c561f71243 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -147,7 +147,7 @@ other than /etc/ansible/hosts: .. note:: - ANSIBLE_INVENTORY is available starting at 1.9 and subtitutes the deprecated ANSIBLE_HOSTS + ANSIBLE_INVENTORY is available starting at 1.9 and substitutes the deprecated ANSIBLE_HOSTS You can read more about the inventory file in later parts of the manual. From 4ca4d36ae6cb3386703c7be3c3b87bd7da2a106e Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Thu, 18 Jun 2015 11:00:10 +0200 Subject: [PATCH 0946/3617] Change syslog (priority) level from LOG_NOTICE to LOG_INFO If you look at the meaning of the different syslog levels, NOTICE means that the event may need someone to look at it. Whereas INFO is pure informational. Since module invocations are in fact requested (deliberate) actions, they shouldn't need any additional post-processing, and therefore should not be logged as NOTICE. This may seem like hairsplitting, but correctly categorizing system events helps weeding through the noise downhill. According to Wikipedia: https://en.wikipedia.org/wiki/Syslog 5 Notice notice Events that are unusual but not error conditions . 6 Informational info Normal operational messages -no action required. Example an application has started, paused or ended successfully. --- lib/ansible/module_utils/basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index b521e73f15cefb..1888a7c501eb45 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1161,10 +1161,10 @@ def _log_invocation(self): except IOError, e: # fall back to syslog since logging to journal failed syslog.openlog(str(module), 0, syslog.LOG_USER) - syslog.syslog(syslog.LOG_NOTICE, msg) #1 + syslog.syslog(syslog.LOG_INFO, msg) #1 else: syslog.openlog(str(module), 0, syslog.LOG_USER) - syslog.syslog(syslog.LOG_NOTICE, msg) #2 + syslog.syslog(syslog.LOG_INFO, msg) #2 def _set_cwd(self): try: From aede9f08dba8c5f88a869dca2ed9b1bc7f5ae35e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 18 Jun 2015 10:05:23 -0400 Subject: [PATCH 0947/3617] fixed case in which prompt was None --- lib/ansible/plugins/connections/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 8e4841225c63db..6515f62dcc4ad9 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -130,7 +130,9 @@ def check_become_success(self, output): return self.success_key in output def check_password_prompt(self, output): - if isinstance(self.prompt, basestring): + if self.prompt in None: + return True + elif isinstance(self.prompt, basestring): return output.endswith(self.prompt) else: return self.prompt(output) From 87a0ccc354b20d252485362bb9ab2c4ea90b1ecb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 18 Jun 2015 10:12:04 -0400 Subject: [PATCH 0948/3617] fixed typo --- lib/ansible/plugins/connections/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 6515f62dcc4ad9..40c7b13e954e10 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -130,7 +130,7 @@ def check_become_success(self, output): return self.success_key in output def check_password_prompt(self, output): - if self.prompt in None: + if self.prompt is None: return True elif isinstance(self.prompt, basestring): return output.endswith(self.prompt) From 7bb2a7aa874d881fa688f0efe1f050d379d01dfa Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 18 Jun 2015 10:23:37 -0400 Subject: [PATCH 0949/3617] actually no password to handle, this should return false --- lib/ansible/plugins/connections/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index 40c7b13e954e10..e6abc911021600 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -131,7 +131,7 @@ def check_become_success(self, output): def check_password_prompt(self, output): if self.prompt is None: - return True + return False elif isinstance(self.prompt, basestring): return output.endswith(self.prompt) else: From 270eb4274c7993658374dbcebbcb06ee2590a2dc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 18 Jun 2015 11:12:30 -0400 Subject: [PATCH 0950/3617] Make sure we safe_eval booleans too Fixes #5779 --- lib/ansible/template/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 0cbae466946305..a296da1959b0b6 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -162,7 +162,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=Fals result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, fail_on_undefined=fail_on_undefined, overrides=overrides) # if this looks like a dictionary or list, convert it to such using the safe_eval method - if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or result.startswith("["): + if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or result.startswith("[") or result in ("True", "False"): eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True) if eval_results[1] is None: result = eval_results[0] From 98fee172ee99432e7c8ddeec10fb73d6ed30f585 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 18 Jun 2015 13:49:12 -0400 Subject: [PATCH 0951/3617] Fix bug in async action plugin --- lib/ansible/plugins/action/async.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 7fedd544d67506..336457b0e5fd6d 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -57,7 +57,7 @@ def run(self, tmp=None, task_vars=dict()): async_jid = str(random.randint(0, 999999999999)) async_cmd = " ".join([str(x) for x in [async_module_path, async_jid, async_limit, remote_module_path, argsfile]]) - result = self._low_level_execute_command(cmd=async_cmd, task_vars=task_vars, tmp=None) + result = self._low_level_execute_command(cmd=async_cmd, tmp=None) # clean up after if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES: From b370f6efceeb8ca986a194ebaa2910dc24143161 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 19 May 2015 15:37:47 -0500 Subject: [PATCH 0952/3617] Add tests for rax_scaling_group --- test/integration/cleanup_rax.py | 20 + test/integration/rackspace.yml | 3 + .../roles/prepare_rax_tests/defaults/main.yml | 8 +- .../test_rax_scaling_group/files/test.txt | 1 + .../test_rax_scaling_group/meta/main.yml | 3 + .../test_rax_scaling_group/tasks/main.yml | 877 ++++++++++++++++++ 6 files changed, 911 insertions(+), 1 deletion(-) create mode 100644 test/integration/roles/test_rax_scaling_group/files/test.txt create mode 100644 test/integration/roles/test_rax_scaling_group/meta/main.yml create mode 100644 test/integration/roles/test_rax_scaling_group/tasks/main.yml diff --git a/test/integration/cleanup_rax.py b/test/integration/cleanup_rax.py index 95f8ba2f0aecb3..f872e9458db114 100644 --- a/test/integration/cleanup_rax.py +++ b/test/integration/cleanup_rax.py @@ -138,6 +138,26 @@ def delete_rax_cdb(args): args.assumeyes) +def _force_delete_rax_scaling_group(manager): + def wrapped(uri): + manager.api.method_delete('%s?force=true' % uri) + return wrapped + + +def delete_rax_scaling_group(args): + """Function for deleting Autoscale Groups""" + print ("--- Cleaning Autoscale Groups matching '%s'" % args.match_re) + for region in pyrax.identity.services.autoscale.regions: + asg = pyrax.connect_to_autoscale(region=region) + for group in rax_list_iterator(asg): + if re.search(args.match_re, group.name): + group.manager._delete = \ + _force_delete_rax_scaling_group(group.manager) + prompt_and_delete(group, + 'Delete matching %s? [y/n]: ' % group, + args.assumeyes) + + def main(): if not HAS_PYRAX: raise SystemExit('The pyrax python module is required for this script') diff --git a/test/integration/rackspace.yml b/test/integration/rackspace.yml index 37f9b097b9c30e..0fd56dc300badb 100644 --- a/test/integration/rackspace.yml +++ b/test/integration/rackspace.yml @@ -40,3 +40,6 @@ - role: test_rax_cdb_database tags: test_rax_cdb_database + + - role: test_rax_scaling_group + tags: test_rax_scaling_group diff --git a/test/integration/roles/prepare_rax_tests/defaults/main.yml b/test/integration/roles/prepare_rax_tests/defaults/main.yml index ffa72294b8cde6..48eec978abb0c5 100644 --- a/test/integration/roles/prepare_rax_tests/defaults/main.yml +++ b/test/integration/roles/prepare_rax_tests/defaults/main.yml @@ -7,4 +7,10 @@ rackspace_flavor: "performance1-1" rackspace_keypair_pub: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDymofzvt86DUA6XSSxc7eDHwUNvcOSmUWjB76jFvhYc6PbS5QmTzBtCka1ORdaW0Z2i3EjfFvzA8WvuY3qP/FpIVDL25ZqZHgxSfGN5pbJ2tAeXK165kNPXBuuISrMhmdLFbRZNn6PwKHEmtrtfEQ3w6ay9+MhqlEr0OX2r6bCXLj+f50QnQXamU6Fm4IpkTsb60osvHNi569Dd8cADEv92oLZpNMa8/MPGnlipjauhzNtEDTUeZwtrAQUXe6CzJ0QmIlyKDglDZLuAKU/VRumo1FRsn4AwJnVsbP2CHBPkbNoYt6LhQiQqXypEIWGmIln0dlO6gZTr3dYC4BVGREl" -resource_prefix: ansible-testing +resource_prefix: "ansible-testing" + +rackspace_alt_image_id: "e5575e1a-a519-4e21-9a6b-41207833bd39" +rackspace_alt_image_name: "CentOS 6 (PVHVM)" +rackspace_alt_image_human_id: "centos-6-pvhvm" + +rackspace_alt_flavor: "general1-1" diff --git a/test/integration/roles/test_rax_scaling_group/files/test.txt b/test/integration/roles/test_rax_scaling_group/files/test.txt new file mode 100644 index 00000000000000..493021b1c9e0ec --- /dev/null +++ b/test/integration/roles/test_rax_scaling_group/files/test.txt @@ -0,0 +1 @@ +this is a test file diff --git a/test/integration/roles/test_rax_scaling_group/meta/main.yml b/test/integration/roles/test_rax_scaling_group/meta/main.yml new file mode 100644 index 00000000000000..a3f85b642e366a --- /dev/null +++ b/test/integration/roles/test_rax_scaling_group/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - prepare_rax_tests diff --git a/test/integration/roles/test_rax_scaling_group/tasks/main.yml b/test/integration/roles/test_rax_scaling_group/tasks/main.yml new file mode 100644 index 00000000000000..f9189b5ba51451 --- /dev/null +++ b/test/integration/roles/test_rax_scaling_group/tasks/main.yml @@ -0,0 +1,877 @@ +# ============================================================ +- name: Test rax_scaling_group with no args + rax_scaling_group: + ignore_errors: true + register: rax_scaling_group + +- name: Validate results of rax_scaling_group with no args + assert: + that: + - rax_scaling_group|failed + - "rax_scaling_group.msg == 'missing required arguments: image,min_entities,flavor,max_entities,name,server_name'" +# ============================================================ + + + +# ============================================================ +- name: Test rax_scaling_group with image,min_entities,flavor,max_entities,name,server_name + rax_scaling_group: + name: "{{ resource_prefix }}-1" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-1" + ignore_errors: true + register: rax_scaling_group + +- name: Validate results of rax_scaling_group with image,min_entities,flavor,max_entities,name,server_name + assert: + that: + - rax_scaling_group|failed + - rax_scaling_group.msg == 'No credentials supplied!' +# ============================================================ + + + +# ============================================================ +- name: Test rax_scaling_group with creds and required args + rax_scaling_group: + name: "{{ resource_prefix }}-1" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + ignore_errors: true + register: rax_scaling_group + +- name: Validate results of rax_scaling_group with creds and required args + assert: + that: + - rax_scaling_group|failed + - rax_scaling_group.msg.startswith('None is not a valid region') +# ============================================================ + + + + + +# ============================================================ +- name: Test rax_scaling_group with creds, region and required args + rax_scaling_group: + name: "{{ resource_prefix }}-1" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate results of rax_scaling_group with creds, region and required args + assert: + that: + - rax_scaling_group|success + - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-1" + - rax_scaling_group.autoscale_group.min_entities == 1 + - rax_scaling_group.autoscale_group.max_entities == 1 + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.flavorRef == "{{ rackspace_flavor }}" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.imageRef == "{{ rackspace_image_id }}" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-1" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality == [] + - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers == [] + - rax_scaling_group.autoscale_group.metadata == {} + +- name: Test rax_scaling_group idempotency 1 + rax_scaling_group: + name: "{{ resource_prefix }}-1" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate idempotency 1 + assert: + that: + - not rax_scaling_group|changed + +- name: Remove servers 1 + rax_scaling_group: + name: "{{ resource_prefix }}-1" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate remove servers 1 + assert: + that: + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.min_entities == 0 + - rax_scaling_group.autoscale_group.max_entities == 0 + - rax_scaling_group.autoscale_group.state.desiredCapacity == 0 + +- name: Test delete integration 1 + rax_scaling_group: + name: "{{ resource_prefix }}-1" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + state: absent + register: rax_scaling_group + +- name: Validate delete integration 1 + assert: + that: + - rax_scaling_group|changed +# ============================================================ + + + +# ============================================================ +- name: Test rax_scaling_group server_name change 1 + rax_scaling_group: + name: "{{ resource_prefix }}-2" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-2" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate results of rax_scaling_group server_name change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-2" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-2" + +- name: Test rax_scaling_group server_name change 2 + rax_scaling_group: + name: "{{ resource_prefix }}-2" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-2a" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate results of rax_scaling_group server_name change 2 + assert: + that: + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-2" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-2a" + +- name: Remove servers 2 + rax_scaling_group: + name: "{{ resource_prefix }}-2" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-2a" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate remove servers 2 + assert: + that: + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.min_entities == 0 + - rax_scaling_group.autoscale_group.max_entities == 0 + - rax_scaling_group.autoscale_group.state.desiredCapacity == 0 + +- name: Test delete integration 2 + rax_scaling_group: + name: "{{ resource_prefix }}-2" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-2a" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + state: absent + register: rax_scaling_group + +- name: Validate delete integration 2 + assert: + that: + - rax_scaling_group|changed +# ============================================================ + + + + +# ============================================================ +- name: Test rax_scaling_group with invalid load balancers + rax_scaling_group: + name: "{{ resource_prefix }}-3" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-3" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + loadbalancers: + - id: "1234567890-0987654321" + port: 80 + register: rax_scaling_group + ignore_errors: true + +- name: Validate results of rax_scaling_group with load balancers + assert: + that: + - rax_scaling_group|failed + - rax_scaling_group.msg.startswith('Load balancer ID is not an integer') +# ============================================================ + + + + +# ============================================================ +- name: Build a CLB to test rax_scaling_group with + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-clb" + wait: true + register: rax_clb + +- name: Validate rax_clb creation + assert: + that: + - rax_clb|success + +- name: Set variable for CLB ID + set_fact: + rax_clb_id: "{{ rax_clb.balancer.id }}" +# ============================================================ + + + + +# ============================================================ +- name: Test rax_scaling_group with load balancers + rax_scaling_group: + name: "{{ resource_prefix }}-3" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-3" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + loadbalancers: + - id: "{{ rax_clb_id }}" + port: 80 + register: rax_scaling_group + +- name: Validate results of rax_scaling_group with load balancers + assert: + that: + - rax_scaling_group|success + - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-3" + - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers[0].loadBalancerId == rax_clb_id|int + +- name: Remove servers 3 + rax_scaling_group: + name: "{{ resource_prefix }}-3" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-3" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Test delete integration 3 + rax_scaling_group: + name: "{{ resource_prefix }}-3" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-3" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + state: absent + register: rax_scaling_group +# ============================================================ + + + + +# ============================================================ +- name: Test rax_scaling_group files change 1 + rax_scaling_group: + name: "{{ resource_prefix }}-4" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + files: + /tmp/test.txt: "{{ role_path }}/files/test.txt" + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-4" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate results of rax_scaling_group files change 1 + assert: + that: + - rax_scaling_group|success + - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-4" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality|length == 1 + +- name: Test rax_scaling_group files change 2 + rax_scaling_group: + name: "{{ resource_prefix }}-4" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-4" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate results of rax_scaling_group files change 2 + assert: + that: + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-4" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality is not defined + +- name: Remove servers 4 + rax_scaling_group: + name: "{{ resource_prefix }}-4" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-4" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Test delete integration 4 + rax_scaling_group: + name: "{{ resource_prefix }}-4" + image: "{{ rackspace_image_id }}" + min_entities: 0 + max_entities: 0 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-4" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + state: absent + register: rax_scaling_group +# ============================================================ + + + +# ============================================================ +- name: Build scaling group to test argument changes + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-5" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + register: rax_scaling_group + +- name: Validate default create + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.name == "{{ resource_prefix }}-5" + - rax_scaling_group.autoscale_group.min_entities == 1 + - rax_scaling_group.autoscale_group.max_entities == 1 + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.flavorRef == "{{ rackspace_flavor }}" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.imageRef == "{{ rackspace_image_id }}" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-5" + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.personality == [] + - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers == [] + - rax_scaling_group.autoscale_group.metadata == {} +# ============================================================ + + + +# ============================================================ +- name: Change cooldown + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 1 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-5" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + register: rax_scaling_group + +- name: Validate cooldown change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.cooldown == 500 +# ============================================================ + + + + +# ============================================================ +- name: Change max_entities + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_image_id }}" + min_entities: 1 + max_entities: 2 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-5" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + register: rax_scaling_group + +- name: Validate max_entities change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.max_entities == 2 +# ============================================================ + + + + +# ============================================================ +- name: Change min_entities + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-5" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + register: rax_scaling_group + +- name: Validate min_entities change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.min_entities == 2 +# ============================================================ + + + + +# ============================================================ +- name: Change server_name + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + register: rax_scaling_group + +- name: Validate server_name change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.name == "{{ resource_prefix }}-5-1" +# ============================================================ + + + + +# ============================================================ +- name: Change image + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + register: rax_scaling_group + +- name: Validate image change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.imageRef == "{{ rackspace_alt_image_id }}" +# ============================================================ + + + + +# ============================================================ +- name: Change flavor + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + register: rax_scaling_group + +- name: Validate flavor change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.flavorRef == "{{ rackspace_alt_flavor }}" +# ============================================================ + + + + +# ============================================================ +- name: Change disk_config + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + disk_config: auto + register: rax_scaling_group + +- name: Validate flavor change + assert: + that: + - rax_scaling_group|success + - not rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'AUTO' + +- name: Change disk_config 2 + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + disk_config: manual + register: rax_scaling_group + +- name: Validate flavor change 2 + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'MANUAL' +# ============================================================ + + + + +# ============================================================ +- name: Change networks + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + disk_config: manual + networks: + - public + register: rax_scaling_group + +- name: Validate networks change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.networks.0.uuid == "00000000-0000-0000-0000-000000000000" +# ============================================================ + + + + +# ============================================================ +- name: Change load balancers + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + disk_config: manual + networks: + - public + - private + loadbalancers: + - id: "{{ rax_clb_id }}" + port: 80 + register: rax_scaling_group + +- name: Validate networks change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.loadBalancers.0.loadBalancerId == rax_clb_id|int +# ============================================================ + + + + +# ============================================================ +- name: Create keypair to test with + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-keypair" + public_key: "{{ rackspace_keypair_pub }}" + register: rax_keypair + +- name: Validate rax_keypair creation + assert: + that: + - rax_keypair|success + - rax_keypair|changed + - rax_keypair.keypair.name == "{{ resource_prefix }}-keypair" + - rax_keypair.keypair.public_key == "{{ rackspace_keypair_pub }}" +# ============================================================ + + + + +# ============================================================ +- name: Change key_name + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + disk_config: manual + networks: + - public + - private + loadbalancers: + - id: "{{ rax_clb_id }}" + port: 80 + key_name: "{{ resource_prefix }}-keypair" + register: rax_scaling_group + +- name: Validate key_name change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.key_name == "{{ resource_prefix }}-keypair" +# ============================================================ + + + + +# ============================================================ +- name: Change config_drive + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + disk_config: manual + networks: + - public + - private + loadbalancers: + - id: "{{ rax_clb_id }}" + port: 80 + key_name: "{{ resource_prefix }}-keypair" + config_drive: true + register: rax_scaling_group + +- name: Validate config_drive change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.config_drive +# ============================================================ + + + +# ============================================================ +- name: Change config_drive + rax_scaling_group: + name: "{{ resource_prefix }}-5" + image: "{{ rackspace_alt_image_id }}" + min_entities: 2 + max_entities: 2 + flavor: "{{ rackspace_alt_flavor }}" + server_name: "{{ resource_prefix }}-5-1" + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + cooldown: 500 + disk_config: manual + networks: + - public + - private + loadbalancers: + - id: "{{ rax_clb_id }}" + port: 80 + key_name: "{{ resource_prefix }}-keypair" + config_drive: true + user_data: "foo" + register: rax_scaling_group + +- name: Validate config_drive change + assert: + that: + - rax_scaling_group|success + - rax_scaling_group|changed + - rax_scaling_group.autoscale_group.launchConfiguration.args.server.user_data == '{{ "foo"|b64encode }}' +# ============================================================ + + + + +# ============================================================ +- name: Delete keypair + rax_keypair: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ resource_prefix }}-keypair" + public_key: "{{ rackspace_keypair_pub }}" + state: absent + register: rax_keypair + +- name: Validate rax_keypair creation + assert: + that: + - rax_keypair|success + - rax_keypair|changed +# ============================================================ + + + + +# ============================================================ +- name: Delete CLB + rax_clb: + username: "{{ rackspace_username }}" + api_key: "{{ rackspace_api_key }}" + region: "{{ rackspace_region }}" + name: "{{ rax_clb.balancer.name }}" + state: absent + wait: true + register: rax_clb + +- name: "Validate delete integration 3" + assert: + that: + - rax_clb|changed + - rax_clb.balancer.id == rax_clb_id|int +# ============================================================ From c0dfa8d5121ee3588efc4b036880b25488b6fbb8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 18 Jun 2015 14:27:20 -0400 Subject: [PATCH 0953/3617] Make sure task names are templated before callbacks are sent --- lib/ansible/playbook/base.py | 2 +- lib/ansible/plugins/strategies/linear.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index ecd217c1e8f26d..211fff3a3abb19 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -281,7 +281,7 @@ def post_validate(self, templar): except (TypeError, ValueError) as e: raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds()) except UndefinedError as e: - if templar._fail_on_undefined_errors: + if templar._fail_on_undefined_errors and name != 'name': raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds()) def serialize(self): diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index b60a922f83438a..9b78c6e13e34ca 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -26,6 +26,7 @@ from ansible.playbook.task import Task from ansible.plugins import action_loader from ansible.plugins.strategies import StrategyBase +from ansible.template import Templar from ansible.utils.debug import debug class StrategyModule(StrategyBase): @@ -166,6 +167,7 @@ def run(self, iterator, connection_info): debug("getting variables") task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) + templar = Templar(loader=self._loader, variables=task_vars) debug("done getting variables") # check to see if this task should be skipped, due to it being a member of a @@ -190,7 +192,9 @@ def run(self, iterator, connection_info): raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) else: if not callback_sent: - self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) + temp_task = task.copy() + temp_task.name = templar.template(temp_task.get_name(), fail_on_undefined=False) + self._tqm.send_callback('v2_playbook_on_task_start', temp_task, is_conditional=False) callback_sent = True self._blocked_hosts[host.get_name()] = True From 18a9eff11f0a6e51b17405ce596bd9ff7e676320 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 18 Jun 2015 16:10:01 -0400 Subject: [PATCH 0954/3617] Properly use local variables from templates including other templates Fixes #6653 --- lib/ansible/template/__init__.py | 9 ++++++++- lib/ansible/template/vars.py | 14 ++++++++++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index a296da1959b0b6..1841560abbad6f 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -22,6 +22,7 @@ import re from jinja2 import Environment +from jinja2.loaders import FileSystemLoader from jinja2.exceptions import TemplateSyntaxError, UndefinedError from jinja2.utils import concat as j2_concat from jinja2.runtime import StrictUndefined @@ -71,7 +72,13 @@ def __init__(self, loader, shared_loader_obj=None, variables=dict()): self._fail_on_filter_errors = True self._fail_on_undefined_errors = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR - self.environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize) + self.environment = Environment( + trim_blocks=True, + undefined=StrictUndefined, + extensions=self._get_extensions(), + finalize=self._finalize, + loader=FileSystemLoader('.'), + ) self.environment.template_class = AnsibleJ2Template self.SINGLE_VAR = re.compile(r"^%s\s*(\w*)\s*%s$" % (self.environment.variable_start_string, self.environment.variable_end_string)) diff --git a/lib/ansible/template/vars.py b/lib/ansible/template/vars.py index 3c0bb61ecb0405..16efe9bff543c3 100644 --- a/lib/ansible/template/vars.py +++ b/lib/ansible/template/vars.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from jinja2.utils import missing __all__ = ['AnsibleJ2Vars'] @@ -33,7 +34,7 @@ class AnsibleJ2Vars: To facilitate using builtin jinja2 things like range, globals are also handled here. ''' - def __init__(self, templar, globals, *extras): + def __init__(self, templar, globals, locals=dict(), *extras): ''' Initializes this object with a valid Templar() object, as well as several dictionaries of variables representing @@ -43,10 +44,17 @@ def __init__(self, templar, globals, *extras): self._templar = templar self._globals = globals self._extras = extras + self._locals = dict() + if isinstance(locals, dict): + for key, val in locals.iteritems(): + if key[:2] == 'l_' and val is not missing: + self._locals[key[2:]] = val def __contains__(self, k): if k in self._templar._available_variables: return True + if k in self._locals: + return True for i in self._extras: if k in i: return True @@ -59,6 +67,8 @@ def __getitem__(self, varname): #from ansible.runner import HostVars if varname not in self._templar._available_variables: + if varname in self._locals: + return self._locals[varname] for i in self._extras: if varname in i: return i[varname] @@ -84,5 +94,5 @@ def add_locals(self, locals): ''' if locals is None: return self - return AnsibleJ2Vars(self._templar, self._globals, locals, *self._extras) + return AnsibleJ2Vars(self._templar, self._globals, locals=locals, *self._extras) From f0777d9c4ec90d968b2a56e411b75b419cd30876 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 09:08:57 -0700 Subject: [PATCH 0955/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9acc7c402f7297..cf273bbaeba32a 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9acc7c402f729748205e78f2b66b8f25b7552e37 +Subproject commit cf273bbaeba32a2e9ffab3616cbc2d1835bffc07 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 2f967a949f9a45..dd6e8f354aaeee 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 2f967a949f9a45657c31ae66c0c7e7c2672a87d8 +Subproject commit dd6e8f354aaeeeaccc1566ab14cfd368d6ec1f72 From ca2f2c4ebd7b5e097eab0a710f79c1f63badf95b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 09:41:48 -0700 Subject: [PATCH 0956/3617] Fix problem with jail and zone connection plugins and symlinks from within the jail/zone. --- lib/ansible/plugins/connections/jail.py | 77 +++++++++++++++---------- lib/ansible/plugins/connections/zone.py | 77 +++++++++++++++---------- 2 files changed, 93 insertions(+), 61 deletions(-) diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py index f7623b3938265b..08428229afca76 100644 --- a/lib/ansible/plugins/connections/jail.py +++ b/lib/ansible/plugins/connections/jail.py @@ -1,6 +1,7 @@ # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # (c) 2013, Michael Scherer +# (c) 2015, Toshio Kuratomi # # This file is part of Ansible # @@ -22,14 +23,15 @@ import distutils.spawn import traceback import os -import shutil import subprocess from ansible import errors from ansible.callbacks import vvv import ansible.constants as C +BUFSIZE = 4096 + class Connection(object): - ''' Local chroot based connections ''' + ''' Local BSD Jail based connections ''' def _search_executable(self, executable): cmd = distutils.spawn.find_executable(executable) @@ -81,9 +83,9 @@ def __init__(self, runner, host, port, *args, **kwargs): self.port = port def connect(self, port=None): - ''' connect to the chroot; nothing to do here ''' + ''' connect to the jail; nothing to do here ''' - vvv("THIS IS A LOCAL CHROOT DIR", host=self.jail) + vvv("THIS IS A LOCAL JAIL DIR", host=self.jail) return self @@ -95,8 +97,14 @@ def _generate_cmd(self, executable, cmd): local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd) return local_cmd - def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): - ''' run a command on the chroot ''' + def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE): + ''' run a command on the jail. This is only needed for implementing + put_file() get_file() so that we don't have to read the whole file + into memory. + + compared to exec_command() it looses some niceties like being able to + return the process's exit code immediately. + ''' if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) @@ -110,45 +118,52 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab vvv("EXEC %s" % (local_cmd), host=self.jail) p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), cwd=self.runner.basedir, - stdin=subprocess.PIPE, + stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return p + + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + ''' run a command on the jail ''' + + p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data) + stdout, stderr = p.communicate() return (p.returncode, '', stdout, stderr) - def _normalize_path(self, path, prefix): - if not path.startswith(os.path.sep): - path = os.path.join(os.path.sep, path) - normpath = os.path.normpath(path) - return os.path.join(prefix, normpath[1:]) - - def _copy_file(self, in_path, out_path): - if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) - try: - shutil.copyfile(in_path, out_path) - except shutil.Error: - traceback.print_exc() - raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) - except IOError: - traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) - def put_file(self, in_path, out_path): - ''' transfer a file from local to chroot ''' + ''' transfer a file from local to jail ''' - out_path = self._normalize_path(out_path, self.get_jail_path()) vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) - self._copy_file(in_path, out_path) + with open(in_path, 'rb') as in_file: + p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + try: + stdout, stderr = p.communicate() + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file to %s" % out_path) + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) def fetch_file(self, in_path, out_path): - ''' fetch a file from chroot to local ''' + ''' fetch a file from jail to local ''' - in_path = self._normalize_path(in_path, self.get_jail_path()) vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) - self._copy_file(in_path, out_path) + + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) + + with open(out_path, 'wb+') as out_file: + try: + for chunk in p.stdout.read(BUFSIZE): + out_file.write(chunk) + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file to %s" % out_path) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) def close(self): ''' terminate the connection; nothing to do here ''' diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py index f7e19c3bb4471f..aacb6f709eefe2 100644 --- a/lib/ansible/plugins/connections/zone.py +++ b/lib/ansible/plugins/connections/zone.py @@ -2,6 +2,7 @@ # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer # (c) 2015, Dagobert Michelsen +# (c) 2015, Toshio Kuratomi # # This file is part of Ansible # @@ -23,13 +24,13 @@ import distutils.spawn import traceback import os -import shutil import subprocess -from subprocess import Popen,PIPE from ansible import errors from ansible.callbacks import vvv import ansible.constants as C +BUFSIZE = 4096 + class Connection(object): ''' Local zone based connections ''' @@ -44,7 +45,7 @@ def list_zones(self): cwd=self.runner.basedir, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - #stdout, stderr = p.communicate() + zones = [] for l in pipe.stdout.readlines(): # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared @@ -97,13 +98,20 @@ def connect(self, port=None): # a modifier def _generate_cmd(self, executable, cmd): if executable: + ### TODO: Why was "-c" removed from here? (vs jail.py) local_cmd = [self.zlogin_cmd, self.zone, executable, cmd] else: local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd) return local_cmd - def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None): - ''' run a command on the zone ''' + def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None, stdin=subprocess.PIPE): + ''' run a command on the zone. This is only needed for implementing + put_file() get_file() so that we don't have to read the whole file + into memory. + + compared to exec_command() it looses some niceties like being able to + return the process's exit code immediately. + ''' if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) @@ -112,52 +120,61 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") # We happily ignore privilege escalation - if executable == '/bin/sh': - executable = None local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.zone) p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), cwd=self.runner.basedir, - stdin=subprocess.PIPE, + stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return p + + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None): + ''' run a command on the zone ''' + + ### TODO: Why all the precautions not to specify /bin/sh? (vs jail.py) + if executable == '/bin/sh': + executable = None + + p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data) + stdout, stderr = p.communicate() return (p.returncode, '', stdout, stderr) - def _normalize_path(self, path, prefix): - if not path.startswith(os.path.sep): - path = os.path.join(os.path.sep, path) - normpath = os.path.normpath(path) - return os.path.join(prefix, normpath[1:]) - - def _copy_file(self, in_path, out_path): - if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) - try: - shutil.copyfile(in_path, out_path) - except shutil.Error: - traceback.print_exc() - raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) - except IOError: - traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) - def put_file(self, in_path, out_path): ''' transfer a file from local to zone ''' - out_path = self._normalize_path(out_path, self.get_zone_path()) vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) - self._copy_file(in_path, out_path) + with open(in_path, 'rb') as in_file: + p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + try: + stdout, stderr = p.communicate() + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file to %s" % out_path) + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) def fetch_file(self, in_path, out_path): ''' fetch a file from zone to local ''' - in_path = self._normalize_path(in_path, self.get_zone_path()) vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) - self._copy_file(in_path, out_path) + + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) + + with open(out_path, 'wb+') as out_file: + try: + for chunk in p.stdout.read(BUFSIZE): + out_file.write(chunk) + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file to %s" % out_path) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) def close(self): ''' terminate the connection; nothing to do here ''' From 0777d025051bf5cf3092aa79a9e6b67cec7064dd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 11:09:48 -0700 Subject: [PATCH 0957/3617] Fix problem with jail and zone connection plugins and symlinks from within the jail/zone. --- lib/ansible/plugins/connections/jail.py | 14 +++++++++----- lib/ansible/plugins/connections/zone.py | 11 +++++++++-- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py index 08428229afca76..bbe1613f7e4d75 100644 --- a/lib/ansible/plugins/connections/jail.py +++ b/lib/ansible/plugins/connections/jail.py @@ -59,8 +59,6 @@ def get_jail_path(self): # remove \n return stdout[:-1] - - def __init__(self, runner, host, port, *args, **kwargs): self.jail = host self.runner = runner @@ -73,7 +71,7 @@ def __init__(self, runner, host, port, *args, **kwargs): self.jls_cmd = self._search_executable('jls') self.jexec_cmd = self._search_executable('jexec') - + if not self.jail in self.list_jails(): raise errors.AnsibleError("incorrect jail name %s" % self.jail) @@ -137,7 +135,10 @@ def put_file(self, in_path, out_path): vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) with open(in_path, 'rb') as in_file: - p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + try: + p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + except OSError: + raise errors.AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except: @@ -152,7 +153,10 @@ def fetch_file(self, in_path, out_path): vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) + try: + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) + except OSError: + raise errors.AnsibleError("jail connection requires dd command in the jail") with open(out_path, 'wb+') as out_file: try: diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py index aacb6f709eefe2..9aaeb5471e93ec 100644 --- a/lib/ansible/plugins/connections/zone.py +++ b/lib/ansible/plugins/connections/zone.py @@ -148,7 +148,10 @@ def put_file(self, in_path, out_path): vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) with open(in_path, 'rb') as in_file: - p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + try: + p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + except OSError: + raise errors.AnsibleError("zone connection requires dd command in the zone") try: stdout, stderr = p.communicate() except: @@ -163,7 +166,11 @@ def fetch_file(self, in_path, out_path): vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) + try: + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) + except OSError: + raise errors.AnsibleError("zone connection requires dd command in the zone") + with open(out_path, 'wb+') as out_file: try: From a77b58e3514553cf1e44245b7cf95b48b883e171 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Jun 2015 11:52:06 -0700 Subject: [PATCH 0958/3617] Bumpt the BUFSIZE to 64k for better performance --- lib/ansible/plugins/connections/jail.py | 2 +- lib/ansible/plugins/connections/zone.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py index bbe1613f7e4d75..4a47d5101e35ba 100644 --- a/lib/ansible/plugins/connections/jail.py +++ b/lib/ansible/plugins/connections/jail.py @@ -28,7 +28,7 @@ from ansible.callbacks import vvv import ansible.constants as C -BUFSIZE = 4096 +BUFSIZE = 65536 class Connection(object): ''' Local BSD Jail based connections ''' diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py index 9aaeb5471e93ec..ffcabfca5fefcb 100644 --- a/lib/ansible/plugins/connections/zone.py +++ b/lib/ansible/plugins/connections/zone.py @@ -29,7 +29,7 @@ from ansible.callbacks import vvv import ansible.constants as C -BUFSIZE = 4096 +BUFSIZE = 65536 class Connection(object): ''' Local zone based connections ''' From 0d92599d18d47c165057be2a95ef1cddbb237300 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 19 Jun 2015 22:58:53 -0400 Subject: [PATCH 0959/3617] Make exception printing a bit smarter --- lib/ansible/plugins/action/__init__.py | 2 +- lib/ansible/plugins/callback/default.py | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index f941d1304cad97..d98c980e494beb 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -405,7 +405,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var # not valid json, lets try to capture error data = dict(failed=True, parsed=False) if 'stderr' in res and res['stderr'].startswith('Traceback'): - data['traceback'] = res['stderr'] + data['exception'] = res['stderr'] else: data['msg'] = res.get('stdout', '') if 'stderr' in res: diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 5b50b49cc89702..071cb8e48adeea 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -37,10 +37,24 @@ def v2_on_any(self, *args, **kwargs): pass def v2_runner_on_failed(self, result, ignore_errors=False): - if 'exception' in result._result and self._display.verbosity < 3: + if 'exception' in result._result: + if self._display.verbosity < 3: + # extract just the actual error message from the exception text + error = result._result['exception'].strip().split('\n')[-1] + msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error + else: + msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] + + self._display.display(msg, color='red') + + # finally, remove the exception from the result so it's not shown every time del result._result['exception'] + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red') + if result._task.ignore_errors: + self._display.display("...ignoring") + def v2_runner_on_ok(self, result): if result._task.action == 'include': From fc5be30c2fc5ff56d8714a28ffbd7154b9c1372f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 19 Jun 2015 23:04:35 -0400 Subject: [PATCH 0960/3617] Change the use of a mutable arg for a default value for locals --- lib/ansible/template/vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/template/vars.py b/lib/ansible/template/vars.py index 16efe9bff543c3..96051f457410d3 100644 --- a/lib/ansible/template/vars.py +++ b/lib/ansible/template/vars.py @@ -34,7 +34,7 @@ class AnsibleJ2Vars: To facilitate using builtin jinja2 things like range, globals are also handled here. ''' - def __init__(self, templar, globals, locals=dict(), *extras): + def __init__(self, templar, globals, locals=None, *extras): ''' Initializes this object with a valid Templar() object, as well as several dictionaries of variables representing From be81b650e80ca07fb3f669a13b4882919508c558 Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Sat, 20 Jun 2015 14:10:41 +0530 Subject: [PATCH 0961/3617] fixes issue 11286 where role handlers are not run --- lib/ansible/executor/play_iterator.py | 3 +++ lib/ansible/executor/process/result.py | 3 +++ lib/ansible/playbook/play.py | 14 ++++++++++++++ lib/ansible/playbook/role/__init__.py | 2 +- 4 files changed, 21 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index d7c966148916d2..585c6556eb390f 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -100,6 +100,9 @@ def __init__(self, inventory, play, connection_info, all_vars): for host in inventory.get_hosts(self._play.hosts): self._host_states[host.name] = HostState(blocks=self._blocks) + # Extend the play handlers list to include the handlers defined in roles + self._play.handlers.extend(play.compile_roles_handlers()) + def get_host_state(self, host): try: return self._host_states[host.name].copy() diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 352b532cd48cc3..1b8f4f5d31d660 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -129,6 +129,9 @@ def run(self): # So, per the docs, we reassign the list so the proxy picks up and # notifies all other threads for notify in result._task.notify: + if result._task._role: + role_name = result._task._role.get_name() + notify = "%s : %s" %(role_name, notify) self._send_result(('notify_handler', result._host, notify)) if result._task.loop: diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 49a986555cdc29..ffa526d0ff8f7f 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -206,6 +206,20 @@ def _compile_roles(self): return block_list + def compile_roles_handlers(self): + ''' + Handles the role handler compilation step, returning a flat list of Handlers + This is done for all roles in the Play. + ''' + + block_list = [] + + if len(self.roles) > 0: + for r in self.roles: + block_list.extend(r.get_handler_blocks()) + + return block_list + def compile(self): ''' Compiles and returns the task list for this play, compiled from the diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index bea61147ae86b6..b453d937405013 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -172,7 +172,7 @@ def _load_role_data(self, role_include, parent_role=None): handler_data = self._load_role_yaml('handlers') if handler_data: - self._handler_blocks = load_list_of_blocks(handler_data, play=None, role=self, loader=self._loader) + self._handler_blocks = load_list_of_blocks(handler_data, play=None, role=self, use_handlers=True, loader=self._loader) # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars') From e4fcef21369d4cf33747acf2278c4455fa63d429 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 01:35:07 -0400 Subject: [PATCH 0962/3617] added ec2_eni to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 064612f5bdd3f6..20cd0517d3170c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ Deprecated Modules (new ones in parens): New Modules: * amazon: ec2_ami_find + * amazon: ec2_eni * amazon: ec2_eni_facts * amazon: elasticache_subnet_group * amazon: ec2_win_password From 2367fb8934905fa86d3b52c16cac0ae5dcf3b673 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 09:44:24 -0400 Subject: [PATCH 0963/3617] added cs_facts to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20cd0517d3170c..976d4718a8d1e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ New Modules: * cloudtrail * cloudstack: cs_account * cloudstack: cs_affinitygroup + * cloudstack: cs_facts * cloudstack: cs_firewall * cloudstack: cs_iso * cloudstack: cs_instance From 83350c4156293f4f0bac0b8a625a6641569e7475 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 11:00:17 -0400 Subject: [PATCH 0964/3617] added ec2_ami_copy to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 976d4718a8d1e6..ba15c2063fc4b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ Deprecated Modules (new ones in parens): * nova_compute (os_server) New Modules: + * amazon: ec2_ami_copy * amazon: ec2_ami_find * amazon: ec2_eni * amazon: ec2_eni_facts From 415c6bdc7537302dafe54e675afa91a5ca08a59b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 11:18:55 -0400 Subject: [PATCH 0965/3617] added sensu mdoules to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba15c2063fc4b7..88642b64197e65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,8 @@ New Modules: * rabbitmq_binding * rabbitmq_exchange * rabbitmq_queue + * sensu_check + * sensu_subscription * vertica_configuration * vertica_facts * vertica_role From 3bad03d57afc69ae1db3ba76ce52132fd4ad3e52 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 16:30:20 -0400 Subject: [PATCH 0966/3617] cleaned up and optimized become handling paths --- lib/ansible/plugins/connections/ssh.py | 42 +++++++++++--------------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 7fb62e2263dd99..56cf996e80a56a 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -179,18 +179,19 @@ def _communicate(self, p, stdin, indata, sudoable=True): if self._connection_info.become_pass: self.check_incorrect_password(stdout) elif self.check_password_prompt(stdout): - raise AnsibleError('Missing %s password', self._connection_info.become_method) + raise AnsibleError('Missing %s password' % self._connection_info.become_method) - if p.stdout in rfd: - dat = os.read(p.stdout.fileno(), 9000) - stdout += dat - if dat == '': - rpipes.remove(p.stdout) if p.stderr in rfd: dat = os.read(p.stderr.fileno(), 9000) stderr += dat if dat == '': rpipes.remove(p.stderr) + elif p.stdout in rfd: + dat = os.read(p.stdout.fileno(), 9000) + stdout += dat + if dat == '': + rpipes.remove(p.stdout) + # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated @@ -324,9 +325,6 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): * detect prompt on stderr (no-tty) ''' - out = '' - err = '' - debug("Handling privilege escalation password prompt.") if self._connection_info.become and self._connection_info.become_pass: @@ -342,34 +340,30 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): break rfd, wfd, efd = select.select([p.stdout, p.stderr], [], [p.stdout], self._connection_info.timeout) - if p.stderr in rfd: + if not rfd: + # timeout. wrap up process communication + stdout, stderr = p.communicate() + raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output) + + elif p.stderr in rfd: chunk = p.stderr.read() - if not chunk: - raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) become_errput += chunk - self.check_incorrect_password(become_errput) - if p.stdout in rfd: + elif p.stdout in rfd: chunk = p.stdout.read() - if not chunk: - raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) become_output += chunk - if not rfd: - # timeout. wrap up process communication - stdout, stderr = p.communicate() - raise AnsibleError('Connection error waiting for privilege escalation password prompt: %s' % become_output) + if not chunk: + raise AnsibleError('Connection closed waiting for privilege escalation password prompt: %s ' % become_output) if not self.check_become_success(become_output): debug("Sending privilege escalation password.") stdin.write(self._connection_info.become_pass + '\n') else: - out += become_output - err += become_errput + no_prompt_out = become_output + no_prompt_err = become_errput - no_prompt_out = out - no_prompt_err = err (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable) From 102de96ebf43d6efad43ff66f9a1ce73f071e237 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 21 Jun 2015 01:24:35 -0400 Subject: [PATCH 0967/3617] avoid password handling when no password is supplied --- lib/ansible/plugins/connections/local.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py index 3655cb5b6dfd1d..e046dc6c393d47 100644 --- a/lib/ansible/plugins/connections/local.py +++ b/lib/ansible/plugins/connections/local.py @@ -75,7 +75,7 @@ def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ) debug("done running command with Popen()") - if self.prompt: + if self.prompt and self._connection_info.become_pass: fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) become_output = '' From 68325dbfe24adc6ae07eee95b66d580109ffe7f5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 21 Jun 2015 01:43:35 -0400 Subject: [PATCH 0968/3617] fixed remote tmp creation when becoem user is not root and '~/' instead of $HOME is the default --- lib/ansible/plugins/shell/sh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index f7ba06d93188c8..3385d9fb04cce0 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -62,7 +62,7 @@ def mkdtemp(self, basefile=None, system=False, mode=None): if not basefile: basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile) - if system and basetmp.startswith('$HOME'): + if system and basetmp.startswith('$HOME') or basetmp.startswith('~/'): basetmp = self.join_path('/tmp', basefile) cmd = 'mkdir -p %s' % basetmp if mode: From b34b606fcf73d2a1c46f9b4cc5972d105aeada63 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 21 Jun 2015 01:51:28 -0400 Subject: [PATCH 0969/3617] fixed and/or grouping --- lib/ansible/plugins/shell/sh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 3385d9fb04cce0..cdf67f4fa25235 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -62,7 +62,7 @@ def mkdtemp(self, basefile=None, system=False, mode=None): if not basefile: basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48)) basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile) - if system and basetmp.startswith('$HOME') or basetmp.startswith('~/'): + if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')): basetmp = self.join_path('/tmp', basefile) cmd = 'mkdir -p %s' % basetmp if mode: From 2aba3b4172d4f4ca7dd4cdb0033492beaf246d32 Mon Sep 17 00:00:00 2001 From: Peter Parente Date: Sun, 21 Jun 2015 15:39:22 -0400 Subject: [PATCH 0970/3617] Fix typo: "name" to "role" --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index b0e2e223cdc9ae..7bf006cf75d0e6 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -782,7 +782,7 @@ Parameterized roles are useful. If you are using a role and want to override a default, pass it as a parameter to the role like so:: roles: - - { name: apache, http_port: 8080 } + - { role: apache, http_port: 8080 } This makes it clear to the playbook reader that you've made a conscious choice to override some default in the role, or pass in some configuration that the role can't assume by itself. It also allows you to pass something site-specific that isn't really part of the From f17bdc4d616dbbe62d17721cd7aca806cb9530e0 Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Mon, 22 Jun 2015 00:37:44 -0400 Subject: [PATCH 0971/3617] Set the ansible_ssh_port variable instead of saving it internally for Host Fixes #11330 --- lib/ansible/inventory/host.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 29d6afd991208a..ffdbc6f9c3a7cb 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -49,7 +49,6 @@ def serialize(self): vars=self.vars.copy(), ipv4_address=self.ipv4_address, ipv6_address=self.ipv6_address, - port=self.port, gathered_facts=self._gathered_facts, groups=groups, ) @@ -61,7 +60,6 @@ def deserialize(self, data): self.vars = data.get('vars', dict()) self.ipv4_address = data.get('ipv4_address', '') self.ipv6_address = data.get('ipv6_address', '') - self.port = data.get('port') groups = data.get('groups', []) for group_data in groups: @@ -79,9 +77,9 @@ def __init__(self, name=None, port=None): self.ipv6_address = name if port and port != C.DEFAULT_REMOTE_PORT: - self.port = int(port) + self.set_variable('ansible_ssh_port', int(port)) else: - self.port = C.DEFAULT_REMOTE_PORT + self.set_variable('ansible_ssh_port', C.DEFAULT_REMOTE_PORT) self._gathered_facts = False From 97954ff658554a3a2292c09a8fd63132d18ee11b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 22 Jun 2015 00:53:34 -0400 Subject: [PATCH 0972/3617] Minor tweak to potentially speed the linear strategy up * Don't fetch vars for the task unless we're going to use them --- lib/ansible/plugins/strategies/linear.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index 9b78c6e13e34ca..efa96a35a7f744 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -165,11 +165,6 @@ def run(self, iterator, connection_info): # corresponding action plugin pass - debug("getting variables") - task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) - templar = Templar(loader=self._loader, variables=task_vars) - debug("done getting variables") - # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(): @@ -191,6 +186,11 @@ def run(self, iterator, connection_info): else: raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds) else: + debug("getting variables") + task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) + templar = Templar(loader=self._loader, variables=task_vars) + debug("done getting variables") + if not callback_sent: temp_task = task.copy() temp_task.name = templar.template(temp_task.get_name(), fail_on_undefined=False) From ff251a0dcc69249b4da1f0770bb1356b9f8391c2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 22 Jun 2015 02:06:07 -0400 Subject: [PATCH 0973/3617] Catch runtime errors due to recursion when calculating group depth Fixes #7708 --- lib/ansible/inventory/group.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py index 17f3ff744faee1..8dbda6315608ad 100644 --- a/lib/ansible/inventory/group.py +++ b/lib/ansible/inventory/group.py @@ -17,6 +17,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.errors import AnsibleError from ansible.utils.debug import debug class Group: @@ -99,9 +100,12 @@ def add_child_group(self, group): def _check_children_depth(self): - for group in self.child_groups: - group.depth = max([self.depth+1, group.depth]) - group._check_children_depth() + try: + for group in self.child_groups: + group.depth = max([self.depth+1, group.depth]) + group._check_children_depth() + except RuntimeError: + raise AnsibleError("The group named '%s' has a recursive dependency loop." % self.name) def add_host(self, host): From cb5f630f33c7635baa2072ce944f07b780512662 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 22 Jun 2015 11:23:23 -0400 Subject: [PATCH 0974/3617] Don't post_validate vars and vars_files on Play objects Fixes #11343 --- lib/ansible/playbook/base.py | 14 ++++++++------ lib/ansible/playbook/play.py | 14 ++++++++++++++ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index 211fff3a3abb19..2d931748ebbbc9 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -254,15 +254,17 @@ def post_validate(self, templar): raise AnsibleParserError("the field '%s' is required but was not set" % name) try: - # if the attribute contains a variable, template it now - value = templar.template(getattr(self, name)) - - # run the post-validator if present + # Run the post-validator if present. These methods are responsible for + # using the given templar to template the values, if required. method = getattr(self, '_post_validate_%s' % name, None) if method: - value = method(attribute, value, all_vars, templar._fail_on_undefined_errors) + value = method(attribute, getattr(self, name), templar) else: - # otherwise, just make sure the attribute is of the type it should be + # if the attribute contains a variable, template it now + value = templar.template(getattr(self, name)) + + # and make sure the attribute is of the type it should be + if value is not None: if attribute.isa == 'string': value = unicode(value) elif attribute.isa == 'int': diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ffa526d0ff8f7f..093a4e1d4722b8 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -187,6 +187,20 @@ def _load_roles(self, attr, ds): roles.append(Role.load(ri)) return roles + def _post_validate_vars(self, attr, value, templar): + ''' + Override post validation of vars on the play, as we don't want to + template these too early. + ''' + return value + + def _post_validate_vars_files(self, attr, value, templar): + ''' + Override post validation of vars_files on the play, as we don't want to + template these too early. + ''' + return value + # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set def _compile_roles(self): From 7490044bbe28029afa9e3099d86eae9fda5f88b7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 22 Jun 2015 21:03:55 -0400 Subject: [PATCH 0975/3617] Implement play_hosts magic variable (and ansible_current_hosts) Fixes #8073 --- lib/ansible/plugins/strategies/__init__.py | 12 ++++++++++++ lib/ansible/plugins/strategies/linear.py | 1 + lib/ansible/vars/__init__.py | 9 +++++++++ 3 files changed, 22 insertions(+) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index e9cdd7d35ce93f..83e045bfe398c2 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -104,6 +104,17 @@ def get_hosts_remaining(self, play): def get_failed_hosts(self, play): return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts] + def add_tqm_variables(self, vars, play): + ''' + Base class method to add extra variables/information to the list of task + vars sent through the executor engine regarding the task queue manager state. + ''' + + new_vars = vars.copy() + new_vars['ansible_current_hosts'] = self.get_hosts_remaining(play) + new_vars['ansible_failed_hosts'] = self.get_failed_hosts(play) + return new_vars + def _queue_task(self, host, task, task_vars, connection_info): ''' handles queueing the task up to be sent to a worker ''' @@ -374,6 +385,7 @@ def run_handlers(self, iterator, connection_info): for host in self._notified_handlers[handler_name]: if not handler.has_triggered(host): task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) + task_vars = self.add_tqm_variables(task_vars, play=iterator._play) self._queue_task(host, handler, task_vars, connection_info) handler.flag_for_host(host) self._process_pending_results(iterator) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index efa96a35a7f744..1ce9677f8f9b10 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -188,6 +188,7 @@ def run(self, iterator, connection_info): else: debug("getting variables") task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) + task_vars = self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) debug("done getting variables") diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 239d77ca658bc4..2d1168543901ff 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -227,6 +227,15 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): if self._inventory is not None: all_vars['inventory_dir'] = self._inventory.basedir() + if play: + # add the list of hosts in the play, as adjusted for limit/filters + # FIXME: play_hosts should be deprecated in favor of ansible_play_hosts, + # however this would take work in the templating engine, so for now + # we'll add both so we can give users something transitional to use + host_list = [x.name for x in self._inventory.get_hosts()] + all_vars['play_hosts'] = host_list + all_vars['ansible_play_hosts'] = host_list + # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token From 61e367f549053ca7bfb8a0f969debc0957e3cbfb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Jun 2015 10:14:04 -0700 Subject: [PATCH 0976/3617] Better error messages when the file to be transferred does not exist. --- lib/ansible/plugins/connections/jail.py | 32 +++++++++++++------------ lib/ansible/plugins/connections/zone.py | 31 +++++++++++++----------- 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py index 4a47d5101e35ba..0c8c9def2796a5 100644 --- a/lib/ansible/plugins/connections/jail.py +++ b/lib/ansible/plugins/connections/jail.py @@ -134,25 +134,27 @@ def put_file(self, in_path, out_path): vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) - with open(in_path, 'rb') as in_file: - try: - p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) - except OSError: - raise errors.AnsibleError("jail connection requires dd command in the jail") - try: - stdout, stderr = p.communicate() - except: - traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) - if p.returncode != 0: - raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) + try: + with open(in_path, 'rb') as in_file: + try: + p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + except OSError: + raise errors.AnsibleError("jail connection requires dd command in the jail") + try: + stdout, stderr = p.communicate() + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + except IOError: + raise errors.AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): ''' fetch a file from jail to local ''' vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) - try: p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) except OSError: @@ -164,10 +166,10 @@ def fetch_file(self, in_path, out_path): out_file.write(chunk) except: traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) + raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) stdout, stderr = p.communicate() if p.returncode != 0: - raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) + raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): ''' terminate the connection; nothing to do here ''' diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py index ffcabfca5fefcb..7e6fa5fe6021b7 100644 --- a/lib/ansible/plugins/connections/zone.py +++ b/lib/ansible/plugins/connections/zone.py @@ -147,18 +147,21 @@ def put_file(self, in_path, out_path): vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) - with open(in_path, 'rb') as in_file: - try: - p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) - except OSError: - raise errors.AnsibleError("zone connection requires dd command in the zone") - try: - stdout, stderr = p.communicate() - except: - traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) - if p.returncode != 0: - raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) + try: + with open(in_path, 'rb') as in_file: + try: + p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + except OSError: + raise errors.AnsibleError("jail connection requires dd command in the jail") + try: + stdout, stderr = p.communicate() + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + except IOError: + raise errors.AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): ''' fetch a file from zone to local ''' @@ -178,10 +181,10 @@ def fetch_file(self, in_path, out_path): out_file.write(chunk) except: traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) + raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) stdout, stderr = p.communicate() if p.returncode != 0: - raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr)) + raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): ''' terminate the connection; nothing to do here ''' From 952166f48eb0f5797b75b160fd156bbe1e8fc647 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Jun 2015 20:07:29 -0700 Subject: [PATCH 0977/3617] Fix problem with chroot connection plugins and symlinks from within the chroot. --- lib/ansible/plugins/connections/chroot.py | 92 ++++++++++++++--------- 1 file changed, 56 insertions(+), 36 deletions(-) diff --git a/lib/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connections/chroot.py index 3ecc0f70301aa8..7e3cbe33532115 100644 --- a/lib/ansible/plugins/connections/chroot.py +++ b/lib/ansible/plugins/connections/chroot.py @@ -1,5 +1,6 @@ # Based on local.py (c) 2012, Michael DeHaan # (c) 2013, Maykel Moya +# (c) 2015, Toshio Kuratomi # # This file is part of Ansible # @@ -21,13 +22,14 @@ import distutils.spawn import traceback import os -import shutil import subprocess from ansible import errors from ansible import utils from ansible.callbacks import vvv import ansible.constants as C +BUFSIZE = 65536 + class Connection(object): ''' Local chroot based connections ''' @@ -64,8 +66,21 @@ def connect(self, port=None): return self - def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): - ''' run a command on the chroot ''' + def _generate_cmd(self, executable, cmd): + if executable: + local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] + else: + local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd) + return local_cmd + + def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE): + ''' run a command on the chroot. This is only needed for implementing + put_file() get_file() so that we don't have to read the whole file + into memory. + + compared to exec_command() it looses some niceties like being able to + return the process's exit code immediately. + ''' if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported: raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method) @@ -74,60 +89,65 @@ def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executab raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") # We enter chroot as root so we ignore privlege escalation? - - if executable: - local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] - else: - local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd) + local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.chroot) p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), cwd=self.runner.basedir, - stdin=subprocess.PIPE, + stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return p + + def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None): + ''' run a command on the chroot ''' + + p = self._buffered_exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data) + stdout, stderr = p.communicate() return (p.returncode, '', stdout, stderr) def put_file(self, in_path, out_path): ''' transfer a file from local to chroot ''' - if not out_path.startswith(os.path.sep): - out_path = os.path.join(os.path.sep, out_path) - normpath = os.path.normpath(out_path) - out_path = os.path.join(self.chroot, normpath[1:]) - vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) - if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) + try: - shutil.copyfile(in_path, out_path) - except shutil.Error: - traceback.print_exc() - raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) + with open(in_path, 'rb') as in_file: + try: + p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + except OSError: + raise errors.AnsibleError("chroot connection requires dd command in the chroot") + try: + stdout, stderr = p.communicate() + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) except IOError: - traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) + raise errors.AnsibleError("file or module does not exist at: %s" % in_path) def fetch_file(self, in_path, out_path): ''' fetch a file from chroot to local ''' - if not in_path.startswith(os.path.sep): - in_path = os.path.join(os.path.sep, in_path) - normpath = os.path.normpath(in_path) - in_path = os.path.join(self.chroot, normpath[1:]) - vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) - if not os.path.exists(in_path): - raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path) + try: - shutil.copyfile(in_path, out_path) - except shutil.Error: - traceback.print_exc() - raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path)) - except IOError: - traceback.print_exc() - raise errors.AnsibleError("failed to transfer file to %s" % out_path) + p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) + except OSError: + raise errors.AnsibleError("chroot connection requires dd command in the jail") + + with open(out_path, 'wb+') as out_file: + try: + for chunk in p.stdout.read(BUFSIZE): + out_file.write(chunk) + except: + traceback.print_exc() + raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise errors.AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) def close(self): ''' terminate the connection; nothing to do here ''' From aa53212a9b252151c9c34038864d8c93d8002117 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 10:19:31 -0400 Subject: [PATCH 0978/3617] Don't use all task params for vars, just the module args --- lib/ansible/playbook/task.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 58788df65b4cda..44f76c1e13446d 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -197,7 +197,8 @@ def get_vars(self): if self._task_include: all_vars.update(self._task_include.get_vars()) - all_vars.update(self.serialize()) + if isinstance(self.args, dict): + all_vars.update(self.args) if 'tags' in all_vars: del all_vars['tags'] From 24d2202591f8d9976a2719f3400b4cd116ce6515 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 10:19:50 -0400 Subject: [PATCH 0979/3617] Make sure role parsing can handle a few more types in includes/defs --- lib/ansible/playbook/role/definition.py | 6 +++++- lib/ansible/playbook/role/include.py | 4 +++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py index 0cb1e45760dfa8..d46bca6b2e9d13 100644 --- a/lib/ansible/playbook/role/definition.py +++ b/lib/ansible/playbook/role/definition.py @@ -55,8 +55,12 @@ def load(data, variable_manager=None, loader=None): raise AnsibleError("not implemented") def preprocess_data(self, ds): + # role names that are simply numbers can be parsed by PyYAML + # as integers even when quoted, so turn it into a string type + if isinstance(ds, int): + ds = "%s" % ds - assert isinstance(ds, dict) or isinstance(ds, string_types) + assert isinstance(ds, dict) or isinstance(ds, string_types) or isinstance(ds, AnsibleBaseYAMLObject) if isinstance(ds, dict): ds = super(RoleDefinition, self).preprocess_data(ds) diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py index b063aecc350eaf..93cf0e21794e21 100644 --- a/lib/ansible/playbook/role/include.py +++ b/lib/ansible/playbook/role/include.py @@ -24,6 +24,7 @@ import os from ansible.errors import AnsibleError, AnsibleParserError +from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject from ansible.playbook.attribute import Attribute, FieldAttribute from ansible.playbook.role.definition import RoleDefinition @@ -42,7 +43,8 @@ def __init__(self, role_basedir=None): @staticmethod def load(data, current_role_path=None, parent_role=None, variable_manager=None, loader=None): - assert isinstance(data, string_types) or isinstance(data, dict) + + assert isinstance(data, string_types) or isinstance(data, dict) or isinstance(data, AnsibleBaseYAMLObject) ri = RoleInclude(role_basedir=current_role_path) return ri.load_data(data, variable_manager=variable_manager, loader=loader) From 72d4b40a26f670c16843e18e359b023916780893 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 10:39:49 -0400 Subject: [PATCH 0980/3617] Don't allow empty (none) loop values Fixes #8593 --- lib/ansible/playbook/task.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 44f76c1e13446d..1570173f420759 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -136,7 +136,9 @@ def _preprocess_loop(self, ds, new_ds, k, v): loop_name = k.replace("with_", "") if new_ds.get('loop') is not None: - raise AnsibleError("duplicate loop in task: %s" % loop_name) + raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds) + if v is None: + raise AnsibleError("you must specify a value when using %s" % k, obj=ds) new_ds['loop'] = loop_name new_ds['loop_args'] = v From 125e6f49a19efdfa854fdab6d5bd0fdfa17d0a5b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 08:49:01 -0700 Subject: [PATCH 0981/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cf273bbaeba32a..5f6128a3003fb2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cf273bbaeba32a2e9ffab3616cbc2d1835bffc07 +Subproject commit 5f6128a3003fb22889f593942fc430bb1f1e92a3 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index dd6e8f354aaeee..44eb758dc7a52e 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit dd6e8f354aaeeeaccc1566ab14cfd368d6ec1f72 +Subproject commit 44eb758dc7a52ee315398c036b30082db73a0c0a From d19700944dd3b844e0024a10c1acd16274809677 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 08:52:57 -0700 Subject: [PATCH 0982/3617] URL has changed --- docsite/rst/quickstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/quickstart.rst b/docsite/rst/quickstart.rst index 161748d9f02784..055e4aecabb16a 100644 --- a/docsite/rst/quickstart.rst +++ b/docsite/rst/quickstart.rst @@ -3,7 +3,7 @@ Quickstart Video We've recorded a short video that shows how to get started with Ansible that you may like to use alongside the documentation. -The `quickstart video `_ is about 30 minutes long and will show you some of the basics about your +The `quickstart video `_ is about 30 minutes long and will show you some of the basics about your first steps with Ansible. Enjoy, and be sure to visit the rest of the documentation to learn more. From 6aae500a2c74d0ade0625ee085f0c08632fc98f8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 08:58:26 -0700 Subject: [PATCH 0983/3617] Documentation fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 5f6128a3003fb2..a1538b490ed71f 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 5f6128a3003fb22889f593942fc430bb1f1e92a3 +Subproject commit a1538b490ed71fc291035daa4aaf184369e3fa86 From 006391eb832801f72e47062a817b76daf39329ac Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 12:12:21 -0400 Subject: [PATCH 0984/3617] Add the improved exception reporting to the minimal callback plugin --- lib/ansible/plugins/callback/minimal.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index c6b2282e62fd76..d0c314e1b9018c 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -38,8 +38,19 @@ def v2_on_any(self, *args, **kwargs): pass def v2_runner_on_failed(self, result, ignore_errors=False): - if 'exception' in result._result and self._display.verbosity < 3: + if 'exception' in result._result: + if self._display.verbosity < 3: + # extract just the actual error message from the exception text + error = result._result['exception'].strip().split('\n')[-1] + msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error + else: + msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] + + self._display.display(msg, color='red') + + # finally, remove the exception from the result so it's not shown every time del result._result['exception'] + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red') def v2_runner_on_ok(self, result): From 42467777593e3a4897c86362d3ec9fb09f517862 Mon Sep 17 00:00:00 2001 From: Hugh Saunders Date: Tue, 23 Jun 2015 12:12:38 -0400 Subject: [PATCH 0985/3617] Re-implement the ssh connection retry, originally added in 2df690 --- lib/ansible/plugins/connections/ssh.py | 64 ++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py index 56cf996e80a56a..f0c2db6bf99465 100644 --- a/lib/ansible/plugins/connections/ssh.py +++ b/lib/ansible/plugins/connections/ssh.py @@ -18,18 +18,20 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import gettext +import fcntl +import hmac import os -import re -import subprocess -import shlex import pipes +import pty +import pwd import random +import re import select -import fcntl -import hmac -import pwd -import gettext -import pty +import shlex +import subprocess +import time + from hashlib import sha1 from ansible import constants as C @@ -276,8 +278,52 @@ def lock_host_keys(self, lock): # fcntl.lockf(self.process_lockfile, action) # fcntl.lockf(self.output_lockfile, action) + def exec_command(self, *args, **kwargs): + """ + Wrapper around _exec_command to retry in the case of an ssh failure + + Will retry if: + * an exception is caught + * ssh returns 255 + Will not retry if + * remaining_tries is <2 + * retries limit reached + """ + + remaining_tries = int(C.ANSIBLE_SSH_RETRIES) + 1 + cmd_summary = "%s..." % args[0] + for attempt in xrange(remaining_tries): + try: + return_tuple = self._exec_command(*args, **kwargs) + # 0 = success + # 1-254 = remote command return code + # 255 = failure from the ssh command itself + if return_tuple[0] != 255 or attempt == (remaining_tries - 1): + break + else: + raise AnsibleConnectionFailure("Failed to connect to the host via ssh.") + except (AnsibleConnectionFailure, Exception) as e: + if attempt == remaining_tries - 1: + raise e + else: + pause = 2 ** attempt - 1 + if pause > 30: + pause = 30 + + if isinstance(e, AnsibleConnectionFailure): + msg = "ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt, cmd_summary, pause) + else: + msg = "ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), pausing for %d seconds" % (attempt, e, cmd_summary, pause) + + self._display.vv(msg) + + time.sleep(pause) + continue + + + return return_tuple - def exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): + def _exec_command(self, cmd, tmp_path, in_data=None, sudoable=True): ''' run a command on the remote host ''' super(Connection, self).exec_command(cmd, tmp_path, in_data=in_data, sudoable=sudoable) From 4c6adcf14378fc05358535c67b2b2a18c75a60f0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 13:32:50 -0400 Subject: [PATCH 0986/3617] Submodule pointer update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a1538b490ed71f..a1181b490b7e00 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a1538b490ed71fc291035daa4aaf184369e3fa86 +Subproject commit a1181b490b7e00953a954878f3694a32378deca4 From 0b16580567c3a796487c9e848ff2623363ab6380 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 14:29:39 -0400 Subject: [PATCH 0987/3617] Add in playbook_dir magic variable --- lib/ansible/vars/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 2d1168543901ff..8c098b30f10b92 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -213,6 +213,8 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # FIXME: make sure all special vars are here # Finally, we create special vars + all_vars['playbook_dir'] = loader.get_basedir() + if host: all_vars['groups'] = [group.name for group in host.get_groups()] From d0d9be30d5c9c3b282e6a10914b12d7fb4847687 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 15:48:48 -0400 Subject: [PATCH 0988/3617] Correctly compile handler blocks for dependent roles --- lib/ansible/playbook/role/__init__.py | 7 ++++++- lib/ansible/plugins/strategies/__init__.py | 4 ---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index b453d937405013..c24e6499d7ff21 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -288,7 +288,12 @@ def get_task_blocks(self): return self._task_blocks[:] def get_handler_blocks(self): - return self._handler_blocks[:] + block_list = [] + for dep in self.get_direct_dependencies(): + dep_blocks = dep.get_handler_blocks() + block_list.extend(dep_blocks) + block_list.extend(self._handler_blocks) + return block_list def has_run(self): ''' diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 83e045bfe398c2..180cf3245d1f31 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -366,10 +366,6 @@ def run_handlers(self, iterator, connection_info): result = True - # FIXME: getting the handlers from the iterators play should be - # a method on the iterator, which may also filter the list - # of handlers based on the notified list - for handler_block in iterator._play.handlers: # FIXME: handlers need to support the rescue/always portions of blocks too, # but this may take some work in the iterator and gets tricky when From e461241d7b585e36ad47470ac7c913a6cd189660 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 14:44:07 -0700 Subject: [PATCH 0989/3617] Fix fetch_file() method --- lib/ansible/plugins/connections/chroot.py | 4 +++- lib/ansible/plugins/connections/jail.py | 4 +++- lib/ansible/plugins/connections/zone.py | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connections/chroot.py index 7e3cbe33532115..f7b2cb962c24c4 100644 --- a/lib/ansible/plugins/connections/chroot.py +++ b/lib/ansible/plugins/connections/chroot.py @@ -140,8 +140,10 @@ def fetch_file(self, in_path, out_path): with open(out_path, 'wb+') as out_file: try: - for chunk in p.stdout.read(BUFSIZE): + chunk = p.stdout.read(BUFSIZE) + while chunk: out_file.write(chunk) + chunk = p.stdout.read(BUFSIZE) except: traceback.print_exc() raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py index 0c8c9def2796a5..480a8441515c4a 100644 --- a/lib/ansible/plugins/connections/jail.py +++ b/lib/ansible/plugins/connections/jail.py @@ -162,8 +162,10 @@ def fetch_file(self, in_path, out_path): with open(out_path, 'wb+') as out_file: try: - for chunk in p.stdout.read(BUFSIZE): + chunk = p.stdout.read(BUFSIZE) + while chunk: out_file.write(chunk) + chunk = p.stdout.read(BUFSIZE) except: traceback.print_exc() raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py index 7e6fa5fe6021b7..e4dfedc9e4bedb 100644 --- a/lib/ansible/plugins/connections/zone.py +++ b/lib/ansible/plugins/connections/zone.py @@ -177,8 +177,10 @@ def fetch_file(self, in_path, out_path): with open(out_path, 'wb+') as out_file: try: - for chunk in p.stdout.read(BUFSIZE): + chunk = p.stdout.read(BUFSIZE) + while chunk: out_file.write(chunk) + chunk = p.stdout.read(BUFSIZE) except: traceback.print_exc() raise errors.AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) From 7b4ff28b8780bca35669d98b2480e5a549741ddf Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 14:44:58 -0700 Subject: [PATCH 0990/3617] Creating modules: use if __name__ --- docsite/rst/developing_modules.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 9e784c6418e688..f08cda8e68dcc8 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -238,7 +238,8 @@ The 'group' and 'user' modules are reasonably non-trivial and showcase what this Key parts include always ending the module file with:: from ansible.module_utils.basic import * - main() + if __name__ == '__main__': + main() And instantiating the module class like:: @@ -483,6 +484,12 @@ Module checklist * The return structure should be consistent, even if NA/None are used for keys normally returned under other options. * Are module actions idempotent? If not document in the descriptions or the notes * Import module snippets `from ansible.module_utils.basic import *` at the bottom, conserves line numbers for debugging. +* Call your :func:`main` from a condtional so that it would be possible to + test them in the future example:: + + if __name__ == '__main__': + main() + * Try to normalize parameters with other modules, you can have aliases for when user is more familiar with underlying API name for the option * Being pep8 compliant is nice, but not a requirement. Specifically, the 80 column limit now hinders readability more that it improves it * Avoid '`action`/`command`', they are imperative and not declarative, there are other ways to express the same thing From a1a7d6c46247f313a8a9c2a1878e034324894c4b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 15:17:26 -0700 Subject: [PATCH 0991/3617] Fix forwarding the user-given params from fetch_url() to open_url() --- lib/ansible/module_utils/urls.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 2725980fcb53d9..54bdd8d2d67b60 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -504,8 +504,8 @@ def fetch_url(module, url, data=None, headers=None, method=None, r = None info = dict(url=url) try: - r = open_url(url, data=None, headers=None, method=None, - use_proxy=True, force=False, last_mod_time=None, timeout=10, + r = open_url(url, data=data, headers=headers, method=method, + use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=username, url_password=password, http_agent=http_agent) info.update(r.info()) From 874df00f748a43806610cf15e668ac076b6d71fe Mon Sep 17 00:00:00 2001 From: danasmera Date: Tue, 23 Jun 2015 20:44:17 -0400 Subject: [PATCH 0992/3617] Add double-quote to a variable precedening color --- docsite/rst/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index ba3ae1264ffd3d..c691cd2af8d5a8 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -11,7 +11,7 @@ How can I set the PATH or any other environment variable for a task or entire pl Setting environment variables can be done with the `environment` keyword. It can be used at task or playbook level:: environment: - PATH: {{ ansible_env.PATH }}:/thingy/bin + PATH: "{{ ansible_env.PATH }}":/thingy/bin SOME: value From b8434db3cc2c1a872615c74e2e3a817442002c7e Mon Sep 17 00:00:00 2001 From: danasmera Date: Tue, 23 Jun 2015 20:48:13 -0400 Subject: [PATCH 0993/3617] fix: Add double-quote to a variable precedening color --- docsite/rst/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index c691cd2af8d5a8..faac872fad7a3b 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -11,7 +11,7 @@ How can I set the PATH or any other environment variable for a task or entire pl Setting environment variables can be done with the `environment` keyword. It can be used at task or playbook level:: environment: - PATH: "{{ ansible_env.PATH }}":/thingy/bin + PATH: "{{ ansible_env.PATH }}:/thingy/bin" SOME: value From 270be6a6f5852c5563976f060c80eff64decc89c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 22:27:45 -0700 Subject: [PATCH 0994/3617] Fix exec_command to not use a shell --- lib/ansible/plugins/connections/chroot.py | 14 ++++++++++---- lib/ansible/plugins/connections/jail.py | 12 +++++++++--- lib/ansible/plugins/connections/zone.py | 12 +++++++++--- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/lib/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connections/chroot.py index f7b2cb962c24c4..7cc1afa718b48a 100644 --- a/lib/ansible/plugins/connections/chroot.py +++ b/lib/ansible/plugins/connections/chroot.py @@ -22,9 +22,11 @@ import distutils.spawn import traceback import os +import shlex import subprocess from ansible import errors from ansible import utils +from ansible.utils.unicode import to_bytes from ansible.callbacks import vvv import ansible.constants as C @@ -70,7 +72,11 @@ def _generate_cmd(self, executable, cmd): if executable: local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] else: - local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd) + # Prev to python2.7.3, shlex couldn't handle unicode type strings + cmd = to_bytes(cmd) + cmd = shlex.split(cmd) + local_cmd = [self.chroot_cmd, self.chroot] + local_cmd += cmd return local_cmd def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE): @@ -88,11 +94,11 @@ def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We enter chroot as root so we ignore privlege escalation? + # We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])? local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.chroot) - p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), + p = subprocess.Popen(local_cmd, shell=False, cwd=self.runner.basedir, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -136,7 +142,7 @@ def fetch_file(self, in_path, out_path): try: p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), None) except OSError: - raise errors.AnsibleError("chroot connection requires dd command in the jail") + raise errors.AnsibleError("chroot connection requires dd command in the chroot") with open(out_path, 'wb+') as out_file: try: diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py index 480a8441515c4a..1e1f5b9307eea8 100644 --- a/lib/ansible/plugins/connections/jail.py +++ b/lib/ansible/plugins/connections/jail.py @@ -23,8 +23,10 @@ import distutils.spawn import traceback import os +import shlex import subprocess from ansible import errors +from ansible.utils.unicode import to_bytes from ansible.callbacks import vvv import ansible.constants as C @@ -92,7 +94,11 @@ def _generate_cmd(self, executable, cmd): if executable: local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd] else: - local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd) + # Prev to python2.7.3, shlex couldn't handle unicode type strings + cmd = to_bytes(cmd) + cmd = shlex.split(cmd) + local_cmd = [self.jexec_cmd, self.jail] + local_cmd += cmd return local_cmd def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None, stdin=subprocess.PIPE): @@ -110,11 +116,11 @@ def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # Ignores privilege escalation + # We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])? local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.jail) - p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), + p = subprocess.Popen(local_cmd, shell=False, cwd=self.runner.basedir, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py index e4dfedc9e4bedb..019cfb9a91c779 100644 --- a/lib/ansible/plugins/connections/zone.py +++ b/lib/ansible/plugins/connections/zone.py @@ -24,8 +24,10 @@ import distutils.spawn import traceback import os +import shlex import subprocess from ansible import errors +from ansible.utils.unicode import to_bytes from ansible.callbacks import vvv import ansible.constants as C @@ -101,7 +103,11 @@ def _generate_cmd(self, executable, cmd): ### TODO: Why was "-c" removed from here? (vs jail.py) local_cmd = [self.zlogin_cmd, self.zone, executable, cmd] else: - local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd) + # Prev to python2.7.3, shlex couldn't handle unicode type strings + cmd = to_bytes(cmd) + cmd = shlex.split(cmd) + local_cmd = [self.zlogin_cmd, self.zone] + local_cmd += cmd return local_cmd def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None, stdin=subprocess.PIPE): @@ -119,11 +125,11 @@ def _buffered_exec_command(self, cmd, tmp_path, become_user=None, sudoable=False if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - # We happily ignore privilege escalation + # We enter zone as root so we ignore privilege escalation (probably need to fix in case we have to become a specific used [ex: postgres admin])? local_cmd = self._generate_cmd(executable, cmd) vvv("EXEC %s" % (local_cmd), host=self.zone) - p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring), + p = subprocess.Popen(local_cmd, shell=False, cwd=self.runner.basedir, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) From 548a7288a90c49e9b50ccf197da307eae525b899 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 01:00:22 -0700 Subject: [PATCH 0995/3617] Use BUFSIZE when putting file as well as fetching file. --- lib/ansible/plugins/connections/chroot.py | 2 +- lib/ansible/plugins/connections/jail.py | 2 +- lib/ansible/plugins/connections/zone.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/connections/chroot.py b/lib/ansible/plugins/connections/chroot.py index 7cc1afa718b48a..cc5cee7803d912 100644 --- a/lib/ansible/plugins/connections/chroot.py +++ b/lib/ansible/plugins/connections/chroot.py @@ -121,7 +121,7 @@ def put_file(self, in_path, out_path): try: with open(in_path, 'rb') as in_file: try: - p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file) except OSError: raise errors.AnsibleError("chroot connection requires dd command in the chroot") try: diff --git a/lib/ansible/plugins/connections/jail.py b/lib/ansible/plugins/connections/jail.py index 1e1f5b9307eea8..d12318391cebe4 100644 --- a/lib/ansible/plugins/connections/jail.py +++ b/lib/ansible/plugins/connections/jail.py @@ -143,7 +143,7 @@ def put_file(self, in_path, out_path): try: with open(in_path, 'rb') as in_file: try: - p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file) except OSError: raise errors.AnsibleError("jail connection requires dd command in the jail") try: diff --git a/lib/ansible/plugins/connections/zone.py b/lib/ansible/plugins/connections/zone.py index 019cfb9a91c779..82256742a14beb 100644 --- a/lib/ansible/plugins/connections/zone.py +++ b/lib/ansible/plugins/connections/zone.py @@ -156,7 +156,7 @@ def put_file(self, in_path, out_path): try: with open(in_path, 'rb') as in_file: try: - p = self._buffered_exec_command('dd of=%s' % out_path, None, stdin=in_file) + p = self._buffered_exec_command('dd of=%s bs=%s' % (out_path, BUFSIZE), None, stdin=in_file) except OSError: raise errors.AnsibleError("jail connection requires dd command in the jail") try: From 4fbd4ae18b39883152f790bf2e59fdfdff973bc7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 24 Jun 2015 11:27:22 -0400 Subject: [PATCH 0996/3617] Update VariableManager test for additional magic variable playbook_dir --- test/units/vars/test_variable_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py index 273f9238edbba0..4371008bb9bf05 100644 --- a/test/units/vars/test_variable_manager.py +++ b/test/units/vars/test_variable_manager.py @@ -42,7 +42,7 @@ def test_basic_manager(self): if 'omit' in vars: del vars['omit'] - self.assertEqual(vars, dict()) + self.assertEqual(vars, dict(playbook_dir='.')) self.assertEqual( v._merge_dicts( From 4942f181007e8ac861d84f8151ee23973f1aa35c Mon Sep 17 00:00:00 2001 From: Gerard Lynch Date: Wed, 24 Jun 2015 16:50:14 +0100 Subject: [PATCH 0997/3617] added role_path to magic var section --- docsite/rst/playbooks_variables.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 7bf006cf75d0e6..905ef10e2ba78f 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -614,6 +614,8 @@ Don't worry about any of this unless you think you need it. You'll know when yo Also available, *inventory_dir* is the pathname of the directory holding Ansible's inventory host file, *inventory_file* is the pathname and the filename pointing to the Ansible's inventory host file. +And finally, *role_path* will return the current role's pathname (since 1.8). This will only work inside a role. + .. _variable_file_separation_details: Variable File Separation From ed07a90289991152392b7baa8287afb6521e30b5 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 24 Jun 2015 11:40:59 -0700 Subject: [PATCH 0998/3617] added six to install-from-source docs --- docsite/rst/intro_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 0f13c561f71243..53abad4fc1e385 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -126,7 +126,7 @@ If you don't have pip installed in your version of Python, install pip:: Ansible also uses the following Python modules that need to be installed:: - $ sudo pip install paramiko PyYAML Jinja2 httplib2 + $ sudo pip install paramiko PyYAML Jinja2 httplib2 six Note when updating ansible, be sure to not only update the source tree, but also the "submodules" in git which point at Ansible's own modules (not the same kind of modules, alas). From 256a323de56d8259c9cd65ae4c55ab761d432b85 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 24 Jun 2015 15:03:34 -0400 Subject: [PATCH 0999/3617] Submodule update --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index a1181b490b7e00..725ce906f69ab5 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit a1181b490b7e00953a954878f3694a32378deca4 +Subproject commit 725ce906f69ab543ca05e9850797a0c384b12b25 From 332ca927d96cdae40110454a16ba041b008de6c8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 22 Jun 2015 01:17:09 -0400 Subject: [PATCH 1000/3617] Fix parent attribute lookup Using 'value is None' instead of 'not value', in order to account for boolean values which may be false Fixes #11232 --- lib/ansible/playbook/block.py | 11 ++++++----- lib/ansible/playbook/task.py | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index a82aae1e67b545..57a22c8cc1d692 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -260,19 +260,19 @@ def _get_parent_attribute(self, attr, extend=False): ''' value = self._attributes[attr] - if self._parent_block and (not value or extend): + if self._parent_block and (value is None or extend): parent_value = getattr(self._parent_block, attr) if extend: value = self._extend_value(value, parent_value) else: value = parent_value - if self._task_include and (not value or extend): + if self._task_include and (value is None or extend): parent_value = getattr(self._task_include, attr) if extend: value = self._extend_value(value, parent_value) else: value = parent_value - if self._role and (not value or extend): + if self._role and (value is None or extend): parent_value = getattr(self._role, attr) if extend: value = self._extend_value(value, parent_value) @@ -289,9 +289,10 @@ def _get_parent_attribute(self, attr, extend=False): else: value = dep_value - if value and not extend: + if value is not None and not extend: break - if self._play and (not value or extend): + + if self._play and (value is None or extend): parent_value = getattr(self._play, attr) if extend: value = self._extend_value(value, parent_value) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 1570173f420759..f0a7350954e404 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -297,13 +297,13 @@ def _get_parent_attribute(self, attr, extend=False): Generic logic to get the attribute or parent attribute for a task value. ''' value = self._attributes[attr] - if self._block and (not value or extend): + if self._block and (value is None or extend): parent_value = getattr(self._block, attr) if extend: value = self._extend_value(value, parent_value) else: value = parent_value - if self._task_include and (not value or extend): + if self._task_include and (value is None or extend): parent_value = getattr(self._task_include, attr) if extend: value = self._extend_value(value, parent_value) From 160e71e2cf3977f578644fec5487d4b02c013b4d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 10:22:37 -0700 Subject: [PATCH 1001/3617] Some flake8 cleanup --- lib/ansible/module_utils/basic.py | 35 +++++++++++++++---------------- 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 1888a7c501eb45..ffd159601d62a5 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -66,7 +66,6 @@ import pwd import platform import errno -import tempfile from itertools import imap, repeat try: @@ -113,7 +112,6 @@ from systemd import journal has_journal = True except ImportError: - import syslog has_journal = False try: @@ -121,10 +119,10 @@ except ImportError: # a replacement for literal_eval that works with python 2.4. from: # https://mail.python.org/pipermail/python-list/2009-September/551880.html - # which is essentially a cut/past from an earlier (2.6) version of python's + # which is essentially a cut/paste from an earlier (2.6) version of python's # ast.py - from compiler import parse - from compiler.ast import * + from compiler import ast, parse + def _literal_eval(node_or_string): """ Safely evaluate an expression node or a string containing a Python @@ -135,21 +133,22 @@ def _literal_eval(node_or_string): _safe_names = {'None': None, 'True': True, 'False': False} if isinstance(node_or_string, basestring): node_or_string = parse(node_or_string, mode='eval') - if isinstance(node_or_string, Expression): + if isinstance(node_or_string, ast.Expression): node_or_string = node_or_string.node + def _convert(node): - if isinstance(node, Const) and isinstance(node.value, (basestring, int, float, long, complex)): - return node.value - elif isinstance(node, Tuple): + if isinstance(node, ast.Const) and isinstance(node.value, (basestring, int, float, long, complex)): + return node.value + elif isinstance(node, ast.Tuple): return tuple(map(_convert, node.nodes)) - elif isinstance(node, List): + elif isinstance(node, ast.List): return list(map(_convert, node.nodes)) - elif isinstance(node, Dict): + elif isinstance(node, ast.Dict): return dict((_convert(k), _convert(v)) for k, v in node.items) - elif isinstance(node, Name): + elif isinstance(node, ast.Name): if node.name in _safe_names: return _safe_names[node.name] - elif isinstance(node, UnarySub): + elif isinstance(node, ast.UnarySub): return -_convert(node.expr) raise ValueError('malformed string') return _convert(node_or_string) @@ -680,7 +679,6 @@ def set_mode_if_different(self, path, mode, changed): new_underlying_stat = os.stat(path) if underlying_stat.st_mode != new_underlying_stat.st_mode: os.chmod(path, stat.S_IMODE(underlying_stat.st_mode)) - q_stat = os.stat(path) except OSError, e: if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links pass @@ -709,7 +707,8 @@ def _symbolic_mode_to_octal(self, path_stat, symbolic_mode): operator = match.group('operator') perms = match.group('perms') - if users == 'a': users = 'ugo' + if users == 'a': + users = 'ugo' for user in users: mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms) @@ -1086,7 +1085,7 @@ def _check_argument_types(self): if is_invalid: self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) - except ValueError, e: + except ValueError: self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted)) def _set_defaults(self, pre=True): @@ -1158,7 +1157,7 @@ def _log_invocation(self): journal_args.append((arg.upper(), str(log_args[arg]))) try: journal.send("%s %s" % (module, msg), **dict(journal_args)) - except IOError, e: + except IOError: # fall back to syslog since logging to journal failed syslog.openlog(str(module), 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_INFO, msg) #1 @@ -1568,7 +1567,7 @@ def run_command(self, args, check_rc=False, close_fds=True, executable=None, dat # if we're checking for prompts, do it now if prompt_re: if prompt_re.search(stdout) and not data: - return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") + return (257, stdout, "A prompt was encountered while running a command, but no input data was specified") # only break out if no pipes are left to read or # the pipes are completely read and # the process is terminated From 00aed57295f01699c6f52419b0c715191abf4762 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 07:13:46 -0700 Subject: [PATCH 1002/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 725ce906f69ab5..50912c9092eb56 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 725ce906f69ab543ca05e9850797a0c384b12b25 +Subproject commit 50912c9092eb567c5dc61c47eecd2ccc585ae364 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 44eb758dc7a52e..dec7d95d514ca8 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 44eb758dc7a52ee315398c036b30082db73a0c0a +Subproject commit dec7d95d514ca89c2784b63d836dd6fb872bdd9c From 9911a947ed7b23bbd47ab776c8c356d6de3be4eb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 08:17:58 -0700 Subject: [PATCH 1003/3617] Vendorize match_hostname code so that ansible can push it out to clients along with the code that uses it. --- lib/ansible/module_utils/urls.py | 169 +++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 54bdd8d2d67b60..27b10742f7c42a 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -5,6 +5,7 @@ # to the complete work. # # Copyright (c), Michael DeHaan , 2012-2013 +# Copyright (c), Toshio Kuratomi , 2015 # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, @@ -25,6 +26,60 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# The match_hostname function and supporting code is under the terms and +# conditions of the Python Software Foundation License. They were taken from +# the Python3 standard library and adapted for use in Python2. See comments in the +# source for which code precisely is under this License. PSF License text +# follows: +# +# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +# -------------------------------------------- +# +# 1. This LICENSE AGREEMENT is between the Python Software Foundation +# ("PSF"), and the Individual or Organization ("Licensee") accessing and +# otherwise using this software ("Python") in source or binary form and +# its associated documentation. +# +# 2. Subject to the terms and conditions of this License Agreement, PSF hereby +# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +# analyze, test, perform and/or display publicly, prepare derivative works, +# distribute, and otherwise use Python alone or in any derivative version, +# provided, however, that PSF's License Agreement and PSF's notice of copyright, +# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are +# retained in Python alone or in any derivative version prepared by Licensee. +# +# 3. In the event Licensee prepares a derivative work that is based on +# or incorporates Python or any part thereof, and wants to make +# the derivative work available to others as provided herein, then +# Licensee hereby agrees to include in any such work a brief summary of +# the changes made to Python. +# +# 4. PSF is making Python available to Licensee on an "AS IS" +# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +# INFRINGE ANY THIRD PARTY RIGHTS. +# +# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. +# +# 6. This License Agreement will automatically terminate upon a material +# breach of its terms and conditions. +# +# 7. Nothing in this License Agreement shall be deemed to create any +# relationship of agency, partnership, or joint venture between PSF and +# Licensee. This License Agreement does not grant permission to use PSF +# trademarks or trade name in a trademark sense to endorse or promote +# products or services of Licensee, or any third party. +# +# 8. By copying, installing or otherwise using Python, Licensee +# agrees to be bound by the terms and conditions of this License +# Agreement. try: import urllib2 @@ -53,6 +108,120 @@ except ImportError: HAS_MATCH_HOSTNAME = False +if not HAS_MATCH_HOSTNAME: + ### + ### The following block of code is under the terms and conditions of the + ### Python Software Foundation License + ### + + """The match_hostname() function from Python 3.4, essential when using SSL.""" + + import re + + class CertificateError(ValueError): + pass + + + def _dnsname_match(dn, hostname, max_wildcards=1): + """Matching according to RFC 6125, section 6.4.3 + + http://tools.ietf.org/html/rfc6125#section-6.4.3 + """ + pats = [] + if not dn: + return False + + # Ported from python3-syntax: + # leftmost, *remainder = dn.split(r'.') + parts = dn.split(r'.') + leftmost = parts[0] + remainder = parts[1:] + + wildcards = leftmost.count('*') + if wildcards > max_wildcards: + # Issue #17980: avoid denials of service by refusing more + # than one wildcard per fragment. A survey of established + # policy among SSL implementations showed it to be a + # reasonable choice. + raise CertificateError( + "too many wildcards in certificate DNS name: " + repr(dn)) + + # speed up common case w/o wildcards + if not wildcards: + return dn.lower() == hostname.lower() + + # RFC 6125, section 6.4.3, subitem 1. + # The client SHOULD NOT attempt to match a presented identifier in which + # the wildcard character comprises a label other than the left-most label. + if leftmost == '*': + # When '*' is a fragment by itself, it matches a non-empty dotless + # fragment. + pats.append('[^.]+') + elif leftmost.startswith('xn--') or hostname.startswith('xn--'): + # RFC 6125, section 6.4.3, subitem 3. + # The client SHOULD NOT attempt to match a presented identifier + # where the wildcard character is embedded within an A-label or + # U-label of an internationalized domain name. + pats.append(re.escape(leftmost)) + else: + # Otherwise, '*' matches any dotless string, e.g. www* + pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) + + # add the remaining fragments, ignore any wildcards + for frag in remainder: + pats.append(re.escape(frag)) + + pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) + return pat.match(hostname) + + + def match_hostname(cert, hostname): + """Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed, but IP addresses are not accepted for *hostname*. + + CertificateError is raised on failure. On success, the function + returns nothing. + """ + if not cert: + raise ValueError("empty or no certificate") + dnsnames = [] + san = cert.get('subjectAltName', ()) + for key, value in san: + if key == 'DNS': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if not dnsnames: + # The subject is only checked when there is no dNSName entry + # in subjectAltName + for sub in cert.get('subject', ()): + for key, value in sub: + # XXX according to RFC 2818, the most specific Common Name + # must be used. + if key == 'commonName': + if _dnsname_match(value, hostname): + return + dnsnames.append(value) + if len(dnsnames) > 1: + raise CertificateError("hostname %r " + "doesn't match either of %s" + % (hostname, ', '.join(map(repr, dnsnames)))) + elif len(dnsnames) == 1: + raise CertificateError("hostname %r " + "doesn't match %r" + % (hostname, dnsnames[0])) + else: + raise CertificateError("no appropriate commonName or " + "subjectAltName fields were found") + + ### + ### End of Python Software Foundation Licensed code + ### + + HAS_MATCH_HOSTNAME = True + + import httplib import os import re From 784b18cb24ad307ac3d4373f0381466684452269 Mon Sep 17 00:00:00 2001 From: Silvio Tomatis Date: Thu, 25 Jun 2015 19:50:17 +0200 Subject: [PATCH 1004/3617] Update link to github --- docsite/rst/developing_plugins.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_plugins.rst b/docsite/rst/developing_plugins.rst index a54e8830f21e23..c2349ed676f614 100644 --- a/docsite/rst/developing_plugins.rst +++ b/docsite/rst/developing_plugins.rst @@ -54,7 +54,7 @@ Filter Plugins If you want more Jinja2 filters available in a Jinja2 template (filters like to_yaml and to_json are provided by default), they can be extended by writing a filter plugin. Most of the time, when someone comes up with an idea for a new filter they would like to make available in a playbook, we'll just include them in 'core.py' instead. -Jump into `lib/ansible/runner/filter_plugins/ `_ for details. +Jump into `lib/ansible/runner/filter_plugins/ `_ for details. .. _developing_callbacks: From cf7744f2f131708acd67c1312f622a3d4e639455 Mon Sep 17 00:00:00 2001 From: Dustin Lundquist Date: Thu, 25 Jun 2015 11:54:54 -0700 Subject: [PATCH 1005/3617] IPv6 SLAAC address computation filter Jinja2 filter to compute SLAAC address. Usage: {{ '2db8::/64' | slaac(ansible_eth0.macaddress) }} --- lib/ansible/plugins/filter/ipaddr.py | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/lib/ansible/plugins/filter/ipaddr.py b/lib/ansible/plugins/filter/ipaddr.py index 5d9d6e3136728d..a1de6aaedd4782 100644 --- a/lib/ansible/plugins/filter/ipaddr.py +++ b/lib/ansible/plugins/filter/ipaddr.py @@ -587,6 +587,38 @@ def nthhost(value, query=''): return False +# Returns the SLAAC address within a network for a given HW/MAC address. +# Usage: +# +# - prefix | slaac(mac) +def slaac(value, query = ''): + ''' Get the SLAAC address within given network ''' + try: + vtype = ipaddr(value, 'type') + if vtype == 'address': + v = ipaddr(value, 'cidr') + elif vtype == 'network': + v = ipaddr(value, 'subnet') + + if v.version != 6: + return False + + value = netaddr.IPNetwork(v) + except: + return False + + if not query: + return False + + try: + mac = hwaddr(query, alias = 'slaac') + + eui = netaddr.EUI(mac) + except: + return False + + return eui.ipv6(value.network) + # ---- HWaddr / MAC address filters ---- @@ -645,6 +677,7 @@ class FilterModule(object): 'ipv6': ipv6, 'ipsubnet': ipsubnet, 'nthhost': nthhost, + 'slaac': slaac, # MAC / HW addresses 'hwaddr': hwaddr, From b9b1e294d7151aa2b0dbeeb597a7a2e3c80ecbed Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Wed, 10 Jun 2015 21:22:57 +0200 Subject: [PATCH 1006/3617] cloudstack: add get_or_failback() --- lib/ansible/module_utils/cloudstack.py | 8 ++++++++ v1/ansible/module_utils/cloudstack.py | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 13d4c59a0149c9..5b67c745c4bae7 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -77,6 +77,14 @@ def _connect(self): else: self.cs = CloudStack(**read_config()) + + def get_or_fallback(self, key=None, fallback_key=None): + value = self.module.params.get(key) + if not value: + value = self.module.params.get(fallback_key) + return value + + # TODO: for backward compatibility only, remove if not used anymore def _has_changed(self, want_dict, current_dict, only_keys=None): return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys) diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index 13d4c59a0149c9..5b67c745c4bae7 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -77,6 +77,14 @@ def _connect(self): else: self.cs = CloudStack(**read_config()) + + def get_or_fallback(self, key=None, fallback_key=None): + value = self.module.params.get(key) + if not value: + value = self.module.params.get(fallback_key) + return value + + # TODO: for backward compatibility only, remove if not used anymore def _has_changed(self, want_dict, current_dict, only_keys=None): return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys) From 79527235307935c3867cd8c8120d86df2c7d801f Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Fri, 26 Jun 2015 09:24:02 +0200 Subject: [PATCH 1007/3617] cloudstack: fix domain name is not unique, use full path --- lib/ansible/module_utils/cloudstack.py | 7 ++++--- v1/ansible/module_utils/cloudstack.py | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py index 5b67c745c4bae7..752defec2b6214 100644 --- a/lib/ansible/module_utils/cloudstack.py +++ b/lib/ansible/module_utils/cloudstack.py @@ -274,12 +274,13 @@ def get_domain(self, key=None): return None args = {} - args['name'] = domain args['listall'] = True domains = self.cs.listDomains(**args) if domains: - self.domain = domains['domain'][0] - return self._get_by_key(key, self.domain) + for d in domains['domain']: + if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]: + self.domain = d + return self._get_by_key(key, self.domain) self.module.fail_json(msg="Domain '%s' not found" % domain) diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py index 5b67c745c4bae7..752defec2b6214 100644 --- a/v1/ansible/module_utils/cloudstack.py +++ b/v1/ansible/module_utils/cloudstack.py @@ -274,12 +274,13 @@ def get_domain(self, key=None): return None args = {} - args['name'] = domain args['listall'] = True domains = self.cs.listDomains(**args) if domains: - self.domain = domains['domain'][0] - return self._get_by_key(key, self.domain) + for d in domains['domain']: + if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]: + self.domain = d + return self._get_by_key(key, self.domain) self.module.fail_json(msg="Domain '%s' not found" % domain) From b723f9a09a91b125b684343815dc23dbd88f52ed Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 26 Jun 2015 10:54:38 -0400 Subject: [PATCH 1008/3617] Allow squashed loop items to use name=foo-{{item}} Fixes #9235 Fixes #11184 --- lib/ansible/executor/task_executor.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index ddd557f9998f93..8405389593b01b 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -186,8 +186,14 @@ def _squash_items(self, items, variables): variables['item'] = item templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) if self._task.evaluate_conditional(templar, variables): - final_items.append(item) - return [",".join(final_items)] + if templar._contains_vars(self._task.args['name']): + new_item = templar.template(self._task.args['name']) + final_items.append(new_item) + else: + final_items.append(item) + joined_items = ",".join(final_items) + self._task.args['name'] = joined_items + return [joined_items] else: return items From a6a86a5bdbcfef8d41dc0cd62cfde3c3e1a14d47 Mon Sep 17 00:00:00 2001 From: Gerard Lynch Date: Fri, 26 Jun 2015 21:49:04 +0100 Subject: [PATCH 1009/3617] added missing filters, changed since to new in version --- docsite/rst/playbooks_filters.rst | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst index 0cb42213b44665..4e35cee522ec88 100644 --- a/docsite/rst/playbooks_filters.rst +++ b/docsite/rst/playbooks_filters.rst @@ -17,9 +17,27 @@ Filters For Formatting Data The following filters will take a data structure in a template and render it in a slightly different format. These are occasionally useful for debugging:: + {{ some_variable | to_json }} + {{ some_variable | to_yaml }} + +For human readable output, you can use:: + {{ some_variable | to_nice_json }} {{ some_variable | to_nice_yaml }} +Alternatively, you may be reading in some already formatted data:: + + {{ some_variable | from_json }} + {{ some_variable | from_yaml }} + +for example:: + + tasks: + - shell: cat /some/path/to/file.json + register: result + + - set_fact: myvar="{{ result.stdout | from_json }}" + .. _filters_used_with_conditionals: Filters Often Used With Conditionals @@ -300,7 +318,11 @@ Hash types available depend on the master system running ansible, Other Useful Filters -------------------- -To use one value on true and another on false (since 1.9):: +To add quotes for shell usage:: + + - shell: echo={{ string_value | quote }} + +To use one value on true and another on false (new in version 1.9):: {{ (name == "John") | ternary('Mr','Ms') }} @@ -324,6 +346,10 @@ To get the real path of a link (new in version 1.8):: {{ path | realpath }} +To get the relative path of a link, from a start point (new in version 1.7):: + + {{ path | relpath('/etc') }} + To work with Base64 encoded strings:: {{ encoded | b64decode }} From 25fc0c7e1b087e872188da0f7858d331ac7c1574 Mon Sep 17 00:00:00 2001 From: Uli Martens Date: Fri, 26 Jun 2015 16:54:13 -0400 Subject: [PATCH 1010/3617] Fixing bug in failed_when results introduced by c3c398c --- lib/ansible/executor/task_result.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py index 99ac06c8eb3715..ad209a036cd998 100644 --- a/lib/ansible/executor/task_result.py +++ b/lib/ansible/executor/task_result.py @@ -43,7 +43,8 @@ def is_skipped(self): return self._check_key('skipped') def is_failed(self): - if 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]: + if 'failed_when_result' in self._result or \ + 'results' in self._result and True in [True for x in self._result['results'] if 'failed_when_result' in x]: return self._check_key('failed_when_result') else: return self._check_key('failed') or self._result.get('rc', 0) != 0 From 072955480343c188e91e72f4f1272884b5b165d8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 26 Jun 2015 16:00:11 -0400 Subject: [PATCH 1011/3617] added win_scheduled_task plugin to changelog --- CHANGELOG.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 88642b64197e65..d4c4205b79cb96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,9 +66,7 @@ New Modules: * osx_defaults * pear * proxmox - * proxmox_template - * puppet - * pushover + * proxmox_template * puppet * pushover * pushbullet * rabbitmq_binding * rabbitmq_exchange @@ -88,6 +86,7 @@ New Modules: * webfaction_mailbox * webfaction_site * win_environment + * win_scheduled_task * zabbix_host * zabbix_hostmacro * zabbix_screen From 123d665acbd9349163b39d895f5f98b7e7e019c3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 26 Jun 2015 16:15:57 -0400 Subject: [PATCH 1012/3617] added ec2_vpc_net new module to changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4c4205b79cb96..916d1914ebcf95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,8 +19,9 @@ New Modules: * amazon: ec2_ami_find * amazon: ec2_eni * amazon: ec2_eni_facts - * amazon: elasticache_subnet_group + * amazon: ec2_vpc_net * amazon: ec2_win_password + * amazon: elasticache_subnet_group * amazon: iam * amazon: iam_policy * circonus_annotation From 4fbf26a4784ce5f6bae0824e69a2496c9e1d936a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 26 Jun 2015 16:18:51 -0400 Subject: [PATCH 1013/3617] added rax_mon_* mnodules to changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 916d1914ebcf95..eae3ec1034060f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -69,6 +69,10 @@ New Modules: * proxmox * proxmox_template * puppet * pushover * pushbullet + * rax: rax_mon_alarm + * rax: rax_mon_check + * rax: rax_mon_entity + * rax: rax_mon_notification * rabbitmq_binding * rabbitmq_exchange * rabbitmq_queue From 9ff0645fa2a0b7a72a9726d0755ec7f343116dfa Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 26 Jun 2015 16:21:38 -0400 Subject: [PATCH 1014/3617] add3ed missing rax mon module --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eae3ec1034060f..6e4e085b5d06c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ New Modules: * rax: rax_mon_check * rax: rax_mon_entity * rax: rax_mon_notification + * rax: rax_mon_notification_plan * rabbitmq_binding * rabbitmq_exchange * rabbitmq_queue From a11b65814c2086d83255b5fd940535e6f5601abc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 26 Jun 2015 16:58:14 -0400 Subject: [PATCH 1015/3617] added win_iss modules, corrected bad line join in prev commit --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e4e085b5d06c9..64faebfa60ca5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,7 +67,9 @@ New Modules: * osx_defaults * pear * proxmox - * proxmox_template * puppet * pushover + * proxmox_template + * puppet + * pushover * pushbullet * rax: rax_mon_alarm * rax: rax_mon_check @@ -93,6 +95,11 @@ New Modules: * webfaction_site * win_environment * win_scheduled_task + * win_iis_virtualdirectory + * win_iis_webapplication + * win_iis_webapppool + * win_iis_webbinding + * win_iis_website * zabbix_host * zabbix_hostmacro * zabbix_screen From e153f76c9551ed461f377f66c1a51d83dc65bb12 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 27 Jun 2015 00:02:08 -0400 Subject: [PATCH 1016/3617] now validate that we do get a vault password --- lib/ansible/cli/vault.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 05a48065771c2e..edd054f434d3bc 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -76,6 +76,9 @@ def run(self): elif self.options.ask_vault_pass: self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False) + if not self.vault_pass: + raise AnsibleOptionsError("A password is required to use Ansible's Vault") + self.execute() def execute_create(self): From f68223b9ed8e4405abfcdc53f8ace2cba441c017 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 27 Jun 2015 00:58:03 -0400 Subject: [PATCH 1017/3617] Don't add module args into variables at all Getting recursive errors otherwise, so this is probably not something we want to do. This most likely only worked in v1 due to the fact that module args were templated earlier than the point in Runner() when they were fed into the templating engine. --- lib/ansible/playbook/task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index f0a7350954e404..012cd4695a00d9 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -199,8 +199,8 @@ def get_vars(self): if self._task_include: all_vars.update(self._task_include.get_vars()) - if isinstance(self.args, dict): - all_vars.update(self.args) + #if isinstance(self.args, dict): + # all_vars.update(self.args) if 'tags' in all_vars: del all_vars['tags'] From bb8d87ceb6d41a3e9d268ee14b8e91088cfa8219 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 27 Jun 2015 01:01:08 -0400 Subject: [PATCH 1018/3617] Allow field attributes which are lists to validate the type of the list items Starting to apply this for tags too, however it is not correcting things as would be expected. --- lib/ansible/playbook/attribute.py | 3 ++- lib/ansible/playbook/base.py | 4 ++++ lib/ansible/playbook/taggable.py | 4 +++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index 8a727a01930de3..b2e89c7733eb90 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -21,12 +21,13 @@ class Attribute: - def __init__(self, isa=None, private=False, default=None, required=False): + def __init__(self, isa=None, private=False, default=None, required=False, listof=None): self.isa = isa self.private = private self.default = default self.required = required + self.listof = listof class FieldAttribute(Attribute): pass diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index 2d931748ebbbc9..e33bedf3c86ea9 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -274,6 +274,10 @@ def post_validate(self, templar): elif attribute.isa == 'list': if not isinstance(value, list): value = [ value ] + if attribute.listof is not None: + for item in value: + if not isinstance(item, attribute.listof): + raise AnsibleParserError("the field '%s' should be a list of %s, but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds()) elif attribute.isa == 'dict' and not isinstance(value, dict): raise TypeError() diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 40e05d1817a9e7..6ddd4b7439af64 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from six import string_types + from ansible.errors import AnsibleError from ansible.playbook.attribute import FieldAttribute from ansible.template import Templar @@ -26,7 +28,7 @@ class Taggable: untagged = set(['untagged']) - _tags = FieldAttribute(isa='list', default=[]) + _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int)) def __init__(self): super(Taggable, self).__init__() From 94011160b3870191b7a13af39275a3591fb42fc7 Mon Sep 17 00:00:00 2001 From: Erik Weathers Date: Fri, 26 Jun 2015 23:30:13 -0700 Subject: [PATCH 1019/3617] fix typo in module-development comment: by -> but --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index f08cda8e68dcc8..74daba60d44aa9 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -292,7 +292,7 @@ will evaluate to True when check mode is enabled. For example:: ) if module.check_mode: - # Check if any changes would be made by don't actually make those changes + # Check if any changes would be made but don't actually make those changes module.exit_json(changed=check_if_system_state_would_be_changed()) Remember that, as module developer, you are responsible for ensuring that no From cbae9253078c2ca72d512a0330f275398403af3d Mon Sep 17 00:00:00 2001 From: Sharif Nassar Date: Tue, 23 Jun 2015 13:00:32 -0700 Subject: [PATCH 1020/3617] Clarify that setting ssh_args trumps control_path --- docsite/rst/intro_configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index ca5d581779660c..f8671fb5f1f253 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -680,7 +680,7 @@ If set, this will pass a specific set of options to Ansible rather than Ansible' ssh_args = -o ControlMaster=auto -o ControlPersist=60s In particular, users may wish to raise the ControlPersist time to encourage performance. A value of 30 minutes may -be appropriate. +be appropriate. If `ssh_args` is set, the default ``control_path`` setting is not used. .. _control_path: @@ -700,7 +700,7 @@ may wish to shorten the string to something like the below:: Ansible 1.4 and later will instruct users to run with "-vvvv" in situations where it hits this problem and if so it is easy to tell there is too long of a Control Path filename. This may be frequently -encountered on EC2. +encountered on EC2. This setting is ignored if ``ssh_args`` is set. .. _scp_if_ssh: From fde99d809548d5e04d0f81967c71080a5b000630 Mon Sep 17 00:00:00 2001 From: Erik Weathers Date: Fri, 26 Jun 2015 23:38:06 -0700 Subject: [PATCH 1021/3617] change 'stage' to 'staging', as it a much more common term for a pre-production environment, and there are already many references to 'staging' appearing in the ansible code and docs, so let's be consistent --- docsite/rst/playbooks_best_practices.rst | 14 +++++++------- docsite/rst/test_strategies.rst | 10 +++++----- plugins/inventory/ec2.ini | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index 43c642d583ca3e..adb8d5ca7c2d9a 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -28,7 +28,7 @@ Directory Layout The top level of the directory would contain files and directories like so:: production # inventory file for production servers - stage # inventory file for stage environment + staging # inventory file for staging environment group_vars/ group1 # here we assign variables to particular groups @@ -78,9 +78,9 @@ If you are using a cloud provider, you should not be managing your inventory in This does not just apply to clouds -- If you have another system maintaining a canonical list of systems in your infrastructure, usage of dynamic inventory is a great idea in general. -.. _stage_vs_prod: +.. _staging_vs_prod: -How to Differentiate Stage vs Production +How to Differentiate Staging vs Production ````````````````````````````````````````` If managing static inventory, it is frequently asked how to differentiate different types of environments. The following example @@ -285,14 +285,14 @@ all the time -- you can have situational plays that you use at different times a Ansible allows you to deploy and configure using the same tool, so you would likely reuse groups and just keep the OS configuration in separate playbooks from the app deployment. -.. _stage_vs_production: +.. _staging_vs_production: -Stage vs Production +Staging vs Production +++++++++++++++++++ -As also mentioned above, a good way to keep your stage (or testing) and production environments separate is to use a separate inventory file for stage and production. This way you pick with -i what you are targeting. Keeping them all in one file can lead to surprises! +As also mentioned above, a good way to keep your staging (or testing) and production environments separate is to use a separate inventory file for staging and production. This way you pick with -i what you are targeting. Keeping them all in one file can lead to surprises! -Testing things in a stage environment before trying in production is always a great idea. Your environments need not be the same +Testing things in a staging environment before trying in production is always a great idea. Your environments need not be the same size and you can use group variables to control the differences between those environments. .. _rolling_update: diff --git a/docsite/rst/test_strategies.rst b/docsite/rst/test_strategies.rst index a3abf160906bef..03792c3f9946b1 100644 --- a/docsite/rst/test_strategies.rst +++ b/docsite/rst/test_strategies.rst @@ -114,14 +114,14 @@ Testing Lifecycle If writing some degree of basic validation of your application into your playbooks, they will run every time you deploy. -As such, deploying into a local development VM and a stage environment will both validate that things are according to plan +As such, deploying into a local development VM and a staging environment will both validate that things are according to plan ahead of your production deploy. Your workflow may be something like this:: - Use the same playbook all the time with embedded tests in development - - Use the playbook to deploy to a stage environment (with the same playbooks) that simulates production - - Run an integration test battery written by your QA team against stage + - Use the playbook to deploy to a staging environment (with the same playbooks) that simulates production + - Run an integration test battery written by your QA team against staging - Deploy to production, with the same integrated tests. Something like an integration test battery should be written by your QA team if you are a production webservice. This would include @@ -213,7 +213,7 @@ If desired, the above techniques may be extended to enable continuous deployment The workflow may look like this:: - Write and use automation to deploy local development VMs - - Have a CI system like Jenkins deploy to a stage environment on every code change + - Have a CI system like Jenkins deploy to a staging environment on every code change - The deploy job calls testing scripts to pass/fail a build on every deploy - If the deploy job succeeds, it runs the same deploy playbook against production inventory @@ -241,7 +241,7 @@ as part of a Continuous Integration/Continuous Delivery pipeline, as is covered The focus should not be on infrastructure testing, but on application testing, so we strongly encourage getting together with your QA team and ask what sort of tests would make sense to run every time you deploy development VMs, and which sort of tests they would like -to run against the stage environment on every deploy. Obviously at the development stage, unit tests are great too. But don't unit +to run against the staging environment on every deploy. Obviously at the development stage, unit tests are great too. But don't unit test your playbook. Ansible describes states of resources declaratively, so you don't have to. If there are cases where you want to be sure of something though, that's great, and things like stat/assert are great go-to modules for that purpose. diff --git a/plugins/inventory/ec2.ini b/plugins/inventory/ec2.ini index 6583160f0f7b7d..1d7428b2edad2b 100644 --- a/plugins/inventory/ec2.ini +++ b/plugins/inventory/ec2.ini @@ -91,10 +91,10 @@ group_by_rds_engine = True group_by_rds_parameter_group = True # If you only want to include hosts that match a certain regular expression -# pattern_include = stage-* +# pattern_include = staging-* # If you want to exclude any hosts that match a certain regular expression -# pattern_exclude = stage-* +# pattern_exclude = staging-* # Instance filters can be used to control which instances are retrieved for # inventory. For the full list of possible filters, please read the EC2 API @@ -102,14 +102,14 @@ group_by_rds_parameter_group = True # Filters are key/value pairs separated by '=', to list multiple filters use # a list separated by commas. See examples below. -# Retrieve only instances with (key=value) env=stage tag -# instance_filters = tag:env=stage +# Retrieve only instances with (key=value) env=staging tag +# instance_filters = tag:env=staging # Retrieve only instances with role=webservers OR role=dbservers tag # instance_filters = tag:role=webservers,tag:role=dbservers -# Retrieve only t1.micro instances OR instances with tag env=stage -# instance_filters = instance-type=t1.micro,tag:env=stage +# Retrieve only t1.micro instances OR instances with tag env=staging +# instance_filters = instance-type=t1.micro,tag:env=staging # You can use wildcards in filter values also. Below will list instances which # tag Name value matches webservers1* From de4d4bcc80b78b7f03f58649e10035c6f7996ad2 Mon Sep 17 00:00:00 2001 From: Anuvrat Parashar Date: Sat, 27 Jun 2015 12:30:45 +0530 Subject: [PATCH 1022/3617] grammatical rearrangements. --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 25dae8f5f3b90a..241e418d311abc 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -14,7 +14,7 @@ What Can Be Encrypted With Vault The vault feature can encrypt any structured data file used by Ansible. This can include "group_vars/" or "host_vars/" inventory variables, variables loaded by "include_vars" or "vars_files", or variable files passed on the ansible-playbook command line with "-e @file.yml" or "-e @file.json". Role variables and defaults are also included! -Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault. If you'd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted. However, that might be a little much and could annoy your coworkers :) +Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault. If you'd not like to betray even the variables you are using, you can go as far as keeping individual task files entirely encrypted. However, that might be a little too much and could annoy your coworkers :) .. _creating_files: From c17d8b943900ec2b58e11206ba997d6400140c19 Mon Sep 17 00:00:00 2001 From: Anuvrat Parashar Date: Sat, 27 Jun 2015 12:34:12 +0530 Subject: [PATCH 1023/3617] [grammar nazi] rearrangment. --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 241e418d311abc..745b6f21c22e68 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -14,7 +14,7 @@ What Can Be Encrypted With Vault The vault feature can encrypt any structured data file used by Ansible. This can include "group_vars/" or "host_vars/" inventory variables, variables loaded by "include_vars" or "vars_files", or variable files passed on the ansible-playbook command line with "-e @file.yml" or "-e @file.json". Role variables and defaults are also included! -Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault. If you'd not like to betray even the variables you are using, you can go as far as keeping individual task files entirely encrypted. However, that might be a little too much and could annoy your coworkers :) +Because Ansible tasks, handlers, and so on are also data, these too can be encrypted with vault. If you'd not like to betray even the variables you are using, you can go as far as keeping individual task files entirely encrypted. However, that might be a little too much and could annoy your coworkers :) .. _creating_files: From 0eb1c880ddac9547560040311739b5ca8291a642 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 27 Jun 2015 15:18:18 -0400 Subject: [PATCH 1024/3617] Use itertools instead of set for tags, as the data may not hash well The tags field may contain bad data before it is post_validated, however some methods assumed it would be a simple list or string. Using itertools gets us around the problem of the data potentially not being hashable Fixes #9380 --- lib/ansible/playbook/base.py | 8 +++++++- lib/ansible/playbook/taggable.py | 3 ++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index e33bedf3c86ea9..4ff7f11c097682 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import itertools import uuid from functools import partial @@ -232,6 +233,10 @@ def copy(self): new_me._loader = self._loader new_me._variable_manager = self._variable_manager + # if the ds value was set on the object, copy it to the new copy too + if hasattr(self, '_ds'): + new_me._ds = self._ds + return new_me def post_validate(self, templar): @@ -340,7 +345,8 @@ def _extend_value(self, value, new_value): if not isinstance(new_value, list): new_value = [ new_value ] - return list(set(value + new_value)) + #return list(set(value + new_value)) + return [i for i,_ in itertools.groupby(value + new_value)] def __getstate__(self): return self.serialize() diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 6ddd4b7439af64..d140f52a12eec0 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import itertools from six import string_types from ansible.errors import AnsibleError @@ -67,7 +68,7 @@ def evaluate_tags(self, only_tags, skip_tags, all_vars): else: tags = set([tags]) else: - tags = set(tags) + tags = [i for i,_ in itertools.groupby(tags)] else: # this makes intersection work for untagged tags = self.__class__.untagged From 8ef28253e35457a254d526ef8cbc1a8387d7d9ba Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 27 Jun 2015 15:37:10 -0400 Subject: [PATCH 1025/3617] Properly catch and report conditional test failures --- lib/ansible/playbook/conditional.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index ff00a01de27c45..0cc07195155af6 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from jinja2.exceptions import UndefinedError + from ansible.errors import * from ansible.playbook.attribute import FieldAttribute from ansible.template import Templar @@ -53,9 +55,14 @@ def evaluate_conditional(self, templar, all_vars): False if any of them evaluate as such. ''' - for conditional in self.when: - if not self._check_conditional(conditional, templar, all_vars): - return False + try: + for conditional in self.when: + if not self._check_conditional(conditional, templar, all_vars): + return False + except UndefinedError, e: + raise AnsibleError("The conditional check '%s' failed due to an undefined variable. The error was: %s" % (conditional, e), obj=self.get_ds()) + except Exception, e: + raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (conditional, e), obj=self.get_ds()) return True From f433e709f253ad653726dcf19cb9f864686c15b6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 27 Jun 2015 20:04:34 -0400 Subject: [PATCH 1026/3617] Fix templating of hostvars values Also adds play information into the hostvars creation, to assure the variable manager used there has access to vars and vars_files Fixes #9501 Fixes #8213 Fixes #7844 --- lib/ansible/vars/__init__.py | 2 +- lib/ansible/vars/hostvars.py | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 8c098b30f10b92..4e8d6bda3c38ae 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -219,7 +219,7 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): all_vars['groups'] = [group.name for group in host.get_groups()] if self._inventory is not None: - hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader) + hostvars = HostVars(vars_manager=self, play=play, inventory=self._inventory, loader=loader) all_vars['hostvars'] = hostvars all_vars['groups'] = self._inventory.groups_list() diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index 45b3340229d261..166bdbe2579660 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -26,22 +26,19 @@ class HostVars(dict): ''' A special view of vars_cache that adds values from the inventory when needed. ''' - def __init__(self, vars_manager, inventory, loader): + def __init__(self, vars_manager, play, inventory, loader): self._vars_manager = vars_manager + self._play = play self._inventory = inventory self._loader = loader self._lookup = {} - #self.update(vars_cache) - def __getitem__(self, host_name): if host_name not in self._lookup: host = self._inventory.get_host(host_name) - result = self._vars_manager.get_vars(loader=self._loader, host=host) - #result.update(self._vars_cache.get(host, {})) - #templar = Templar(variables=self._vars_cache, loader=self._loader) - #self._lookup[host] = templar.template(result) - self._lookup[host_name] = result + result = self._vars_manager.get_vars(loader=self._loader, play=self._play, host=host) + templar = Templar(variables=result, loader=self._loader) + self._lookup[host_name] = templar.template(result) return self._lookup[host_name] From 9d9cd0c42ca9a401f299f8cb805aafe3c0817b9e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 28 Jun 2015 00:30:27 -0400 Subject: [PATCH 1027/3617] Handle getting the ds for Conditionals which may not be mixed in --- lib/ansible/playbook/conditional.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index 0cc07195155af6..ae7a5f0ba4c893 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -55,14 +55,21 @@ def evaluate_conditional(self, templar, all_vars): False if any of them evaluate as such. ''' + # since this is a mixin, it may not have an underlying datastructure + # associated with it, so we pull it out now in case we need it for + # error reporting below + ds = None + if hasattr(self, 'get_ds'): + ds = self.get_ds() + try: for conditional in self.when: if not self._check_conditional(conditional, templar, all_vars): return False except UndefinedError, e: - raise AnsibleError("The conditional check '%s' failed due to an undefined variable. The error was: %s" % (conditional, e), obj=self.get_ds()) + raise AnsibleError("The conditional check '%s' failed due to an undefined variable. The error was: %s" % (conditional, e), obj=ds) except Exception, e: - raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (conditional, e), obj=self.get_ds()) + raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (conditional, e), obj=ds) return True From 24226646fc43198d7c20f9590248b7189a4c8b96 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 28 Jun 2015 01:00:32 -0400 Subject: [PATCH 1028/3617] When loading the play hosts list, enforce some consistency Fixes #9580 --- lib/ansible/playbook/play.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 093a4e1d4722b8..c3d9aea06ba30b 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from six import string_types + from ansible.errors import AnsibleError, AnsibleParserError from ansible.playbook.attribute import Attribute, FieldAttribute @@ -57,7 +59,7 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='string', default='smart') - _hosts = FieldAttribute(isa='list', default=[], required=True) + _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types) _name = FieldAttribute(isa='string', default='') # Variable Attributes @@ -121,6 +123,28 @@ def preprocess_data(self, ds): return super(Play, self).preprocess_data(ds) + def _load_hosts(self, attr, ds): + ''' + Loads the hosts from the given datastructure, which might be a list + or a simple string. We also switch integers in this list back to strings, + as the YAML parser will turn things that look like numbers into numbers. + ''' + + if isinstance(ds, (string_types, int)): + ds = [ ds ] + + if not isinstance(ds, list): + raise AnsibleParserError("'hosts' must be specified as a list or a single pattern", obj=ds) + + # YAML parsing of things that look like numbers may have + # resulted in integers showing up in the list, so convert + # them back to strings to prevent problems + for idx,item in enumerate(ds): + if isinstance(item, int): + ds[idx] = "%s" % item + + return ds + def _load_vars(self, attr, ds): ''' Vars in a play can be specified either as a dictionary directly, or From e6251542a412c7db01cf9be24d29ca31fdb3e4ac Mon Sep 17 00:00:00 2001 From: yunano Date: Sun, 28 Jun 2015 22:07:32 +0900 Subject: [PATCH 1029/3617] fix small typo for wantlist --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64faebfa60ca5a..9226e5674ac426 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -145,7 +145,7 @@ Major changes: * Added travis integration to github for basic tests, this should speed up ticket triage and merging. * environment: directive now can also be applied to play and is inhertited by tasks, which can still override it. * expanded facts and OS/distribution support for existing facts and improved performance with pypy. -* new 'wantlist' option to lookups allows for selecting a list typed variable vs a command delimited string as the return. +* new 'wantlist' option to lookups allows for selecting a list typed variable vs a comma delimited string as the return. * the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes). * allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules). * sped up YAML parsing in ansible by up to 25% by switching to CParser loader. From 21c14363fdab8c4d7cd5a8c900153744746c511d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 29 Jun 2015 10:55:48 -0400 Subject: [PATCH 1030/3617] Allow callback plugins to be whitelisted --- lib/ansible/constants.py | 1 + lib/ansible/executor/task_queue_manager.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 8f9c5bf5103ff6..db0cabb10fa06f 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -188,6 +188,7 @@ def shell_expand_path(path): DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) +DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', None, islist=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index debcf6873d8e70..b1d905be7ad712 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -144,6 +144,8 @@ def _load_callbacks(self, stdout_callback): if callback_name != stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True + elif C.DEFAULT_CALLBACK_WHITELIST is not None and callback_name not in C.DEFAULT_CALLBACK_WHITELIST: + continue loaded_plugins.append(callback_plugin(self._display)) else: From 881dbb6da122598029107e63dc6b1cfe51f2bc2c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 05:58:42 -0700 Subject: [PATCH 1031/3617] Add building of docs to travis --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index e53b870597ce8c..83b0fc7fd68afc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,5 +13,6 @@ install: - pip install tox script: - tox + - make -C docsite all after_success: - coveralls From be6db1a730270a8e89636da9630dcac8e3e093fc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 08:05:58 -0700 Subject: [PATCH 1032/3617] Refactor the argspec type checking and add path as a type --- lib/ansible/module_utils/basic.py | 146 ++++++++++++++++++------------ 1 file changed, 90 insertions(+), 56 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index ffd159601d62a5..e89809ff12e68f 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -351,9 +351,9 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self.check_mode = False self.no_log = no_log self.cleanup_files = [] - + self.aliases = {} - + if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.iteritems(): if k not in self.argument_spec: @@ -366,7 +366,7 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self.params = self._load_params() self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log'] - + self.aliases = self._handle_aliases() if check_invalid_arguments: @@ -380,6 +380,16 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self._set_defaults(pre=True) + + self._CHECK_ARGUMENT_TYPES_DISPATCHER = { + 'str': self._check_type_str, + 'list': self._check_type_list, + 'dict': self._check_type_dict, + 'bool': self._check_type_bool, + 'int': self._check_type_int, + 'float': self._check_type_float, + 'path': self._check_type_path, + } if not bypass_checks: self._check_required_arguments() self._check_argument_values() @@ -1021,6 +1031,76 @@ def safe_eval(self, str, locals=None, include_exceptions=False): return (str, e) return str + def _check_type_str(self, value): + if isinstance(value, basestring): + return value + # Note: This could throw a unicode error if value's __str__() method + # returns non-ascii. Have to port utils.to_bytes() if that happens + return str(value) + + def _check_type_list(self, value): + if isinstance(value, list): + return value + + if isinstance(value, basestring): + return value.split(",") + elif isinstance(value, int) or isinstance(value, float): + return [ str(value) ] + + raise TypeError('%s cannot be converted to a list' % type(value)) + + def _check_type_dict(self, value): + if isinstance(value, dict): + return value + + if isinstance(value, basestring): + if value.startswith("{"): + try: + return json.loads(value) + except: + (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) + if exc is not None: + raise TypeError('unable to evaluate string as dictionary') + return result + elif '=' in value: + return dict([x.strip().split("=", 1) for x in value.split(",")]) + else: + raise TypeError("dictionary requested, could not parse JSON or key=value") + + raise TypeError('%s cannot be converted to a dict' % type(value)) + + def _check_type_bool(self, value): + if isinstance(value, bool): + return value + + if isinstance(value, basestring): + return self.boolean(value) + + raise TypeError('%s cannot be converted to a bool' % type(value)) + + def _check_type_int(self, value): + if isinstance(value, int): + return value + + if isinstance(value, basestring): + return int(value) + + raise TypeError('%s cannot be converted to an int' % type(value)) + + def _check_type_float(self, value): + if isinstance(value, float): + return value + + if isinstance(value, basestring): + return float(value) + + raise TypeError('%s cannot be converted to a float' % type(value)) + + def _check_type_path(self, value): + value = self._check_type_str(value) + return os.path.expanduser(os.path.expandvars(value)) + + def _check_argument_types(self): ''' ensure all arguments have the requested type ''' for (k, v) in self.argument_spec.iteritems(): @@ -1034,59 +1114,13 @@ def _check_argument_types(self): is_invalid = False try: - if wanted == 'str': - if not isinstance(value, basestring): - self.params[k] = str(value) - elif wanted == 'list': - if not isinstance(value, list): - if isinstance(value, basestring): - self.params[k] = value.split(",") - elif isinstance(value, int) or isinstance(value, float): - self.params[k] = [ str(value) ] - else: - is_invalid = True - elif wanted == 'dict': - if not isinstance(value, dict): - if isinstance(value, basestring): - if value.startswith("{"): - try: - self.params[k] = json.loads(value) - except: - (result, exc) = self.safe_eval(value, dict(), include_exceptions=True) - if exc is not None: - self.fail_json(msg="unable to evaluate dictionary for %s" % k) - self.params[k] = result - elif '=' in value: - self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")]) - else: - self.fail_json(msg="dictionary requested, could not parse JSON or key=value") - else: - is_invalid = True - elif wanted == 'bool': - if not isinstance(value, bool): - if isinstance(value, basestring): - self.params[k] = self.boolean(value) - else: - is_invalid = True - elif wanted == 'int': - if not isinstance(value, int): - if isinstance(value, basestring): - self.params[k] = int(value) - else: - is_invalid = True - elif wanted == 'float': - if not isinstance(value, float): - if isinstance(value, basestring): - self.params[k] = float(value) - else: - is_invalid = True - else: - self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) - - if is_invalid: - self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted)) - except ValueError: - self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted)) + type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted] + except KeyError: + self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k)) + try: + self.params[k] = type_checker(value) + except (TypeError, ValueError): + self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s" % (k, type(value), wanted)) def _set_defaults(self, pre=True): for (k,v) in self.argument_spec.iteritems(): From d612838116314aa9652a5b9e951a524ffc0fd8e9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 08:30:00 -0700 Subject: [PATCH 1033/3617] Add packages needed to build the docs --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 83b0fc7fd68afc..4ee974e89995e4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ addons: packages: - python2.4 install: - - pip install tox + - pip install tox PyYAML Jinja2 sphinx script: - tox - make -C docsite all From c440762b61f4ab4b04eac122c793ca5f219c3b26 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 29 Jun 2015 12:09:16 -0500 Subject: [PATCH 1034/3617] Make the wait_timeout for rax tests a configurable default --- .../roles/prepare_rax_tests/defaults/main.yml | 2 + .../integration/roles/test_rax/tasks/main.yml | 30 ++++++++++++++ .../roles/test_rax_cbs/tasks/main.yml | 6 +++ .../test_rax_cbs_attachments/tasks/main.yml | 7 ++++ .../roles/test_rax_cdb/tasks/main.yml | 11 +++++ .../test_rax_cdb_database/tasks/main.yml | 2 + .../roles/test_rax_clb/tasks/main.yml | 40 +++++++++++++++++++ .../roles/test_rax_clb_nodes/tasks/main.yml | 5 +++ .../roles/test_rax_facts/tasks/main.yml | 2 + .../roles/test_rax_meta/tasks/main.yml | 2 + .../test_rax_scaling_group/tasks/main.yml | 2 + 11 files changed, 109 insertions(+) diff --git a/test/integration/roles/prepare_rax_tests/defaults/main.yml b/test/integration/roles/prepare_rax_tests/defaults/main.yml index 48eec978abb0c5..be6d700943ccc6 100644 --- a/test/integration/roles/prepare_rax_tests/defaults/main.yml +++ b/test/integration/roles/prepare_rax_tests/defaults/main.yml @@ -14,3 +14,5 @@ rackspace_alt_image_name: "CentOS 6 (PVHVM)" rackspace_alt_image_human_id: "centos-6-pvhvm" rackspace_alt_flavor: "general1-1" + +rackspace_wait_timeout: 600 diff --git a/test/integration/roles/test_rax/tasks/main.yml b/test/integration/roles/test_rax/tasks/main.yml index e91c0a949feefd..6f64cbc9bf3cf0 100644 --- a/test/integration/roles/test_rax/tasks/main.yml +++ b/test/integration/roles/test_rax/tasks/main.yml @@ -119,6 +119,7 @@ name: "{{ resource_prefix }}-1" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 1" @@ -141,6 +142,7 @@ flavor: "{{ rackspace_flavor }}" name: "{{ resource_prefix }}-2" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax basic idepmpotency 1 @@ -163,6 +165,7 @@ flavor: "{{ rackspace_flavor }}" name: "{{ resource_prefix }}-2" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax basic idempotency 2 @@ -185,6 +188,7 @@ name: "{{ resource_prefix }}-2" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 2" @@ -211,6 +215,7 @@ meta: foo: bar wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax basic idepmpotency with meta 1 @@ -236,6 +241,7 @@ meta: foo: bar wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax basic idempotency with meta 2 @@ -260,6 +266,7 @@ meta: foo: bar wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 3" @@ -285,6 +292,7 @@ name: "{{ resource_prefix }}-4" count: 2 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax basic idepmpotency multi server 1 @@ -306,6 +314,7 @@ name: "{{ resource_prefix }}-4" count: 2 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax basic idempotency multi server 2 @@ -327,6 +336,7 @@ name: "{{ resource_prefix }}-4" count: 3 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax basic idempotency multi server 3 @@ -349,6 +359,7 @@ count: 3 state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 4" @@ -375,6 +386,7 @@ count: 2 group: "{{ resource_prefix }}-5" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group without exact_count 1 @@ -398,6 +410,7 @@ count: 2 group: "{{ resource_prefix }}-5" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" state: absent register: rax @@ -425,6 +438,7 @@ count: 2 group: "{{ resource_prefix }}-6" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group without exact_count non-idempotency 1 @@ -448,6 +462,7 @@ count: 2 group: "{{ resource_prefix }}-6" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group without exact_count non-idempotency 2 @@ -470,6 +485,7 @@ count: 4 group: "{{ resource_prefix }}-6" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" state: absent register: rax @@ -498,6 +514,7 @@ exact_count: true group: "{{ resource_prefix }}-7" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group with exact_count 1 @@ -522,6 +539,7 @@ exact_count: true group: "{{ resource_prefix }}-7" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group with exact_count 2 @@ -545,6 +563,7 @@ exact_count: true group: "{{ resource_prefix }}-7" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group with exact_count 3 @@ -570,6 +589,7 @@ exact_count: true group: "{{ resource_prefix }}-7" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 7" @@ -597,6 +617,7 @@ group: "{{ resource_prefix }}-8" auto_increment: false wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group without exact_count and disabled auto_increment 1 @@ -621,6 +642,7 @@ group: "{{ resource_prefix }}-8" auto_increment: false wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" state: absent register: rax @@ -649,6 +671,7 @@ exact_count: true group: "{{ resource_prefix }}-9" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group with exact_count and no printf 1 @@ -673,6 +696,7 @@ exact_count: true group: "{{ resource_prefix }}-9" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 9" @@ -701,6 +725,7 @@ exact_count: true group: "{{ resource_prefix }}-10" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group with exact_count and offset 1 @@ -726,6 +751,7 @@ exact_count: true group: "{{ resource_prefix }}-10" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 10" @@ -754,6 +780,7 @@ exact_count: true group: "{{ resource_prefix }}-11" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax multi server group with exact_count and offset 1 @@ -779,6 +806,7 @@ exact_count: true group: "{{ resource_prefix }}-11" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete integration 11" @@ -803,6 +831,7 @@ flavor: "{{ rackspace_flavor }}" name: "{{ resource_prefix }}-12" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate rax instance_ids absent 1 (create) @@ -827,6 +856,7 @@ - "{{ rax.success.0.rax_id }}" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax2 - name: Validate rax instance_ids absent 2 (delete) diff --git a/test/integration/roles/test_rax_cbs/tasks/main.yml b/test/integration/roles/test_rax_cbs/tasks/main.yml index de810c654052f2..ae6f5c68e35084 100644 --- a/test/integration/roles/test_rax_cbs/tasks/main.yml +++ b/test/integration/roles/test_rax_cbs/tasks/main.yml @@ -55,6 +55,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-1" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs - name: Validate rax_cbs creds, region and name @@ -116,6 +117,7 @@ name: "{{ resource_prefix }}-2" size: 150 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs - name: Validate rax_cbs creds, region and valid size @@ -177,6 +179,7 @@ name: "{{ resource_prefix }}-3" volume_type: SSD wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs - name: Validate rax_cbs creds, region and valid volume_size @@ -218,6 +221,7 @@ name: "{{ resource_prefix }}-4" description: "{{ resource_prefix }}-4 description" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs - name: Validate rax_cbs creds, region and description @@ -261,6 +265,7 @@ meta: foo: bar wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs - name: Validate rax_cbs creds, region and meta @@ -302,6 +307,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-6" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs_1 - name: Validate rax_cbs with idempotency 1 diff --git a/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml index 6750105c1e60ce..0321fe10e17652 100644 --- a/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml +++ b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml @@ -80,6 +80,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-rax_cbs_attachments" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs - name: Validate volume build @@ -102,6 +103,7 @@ flavor: "{{ rackspace_flavor }}" name: "{{ resource_prefix }}-rax_cbs_attachments" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate CloudServer build @@ -147,6 +149,7 @@ volume: "{{ rax_cbs.volume.id }}" device: /dev/xvde wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs_attachments - name: Validate rax_cbs_attachments creds, region, server, volume and device (valid) @@ -166,6 +169,7 @@ volume: "{{ rax_cbs.volume.id }}" device: /dev/xvde wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cbs_attachments - name: Validate idempotent present test @@ -183,6 +187,7 @@ volume: "{{ rax_cbs.volume.id }}" device: /dev/xvde wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" state: absent register: rax_cbs_attachments @@ -202,6 +207,7 @@ volume: "{{ rax_cbs.volume.id }}" device: /dev/xvde wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" state: absent register: rax_cbs_attachments @@ -242,6 +248,7 @@ instance_ids: "{{ rax.instances[0].id }}" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete" diff --git a/test/integration/roles/test_rax_cdb/tasks/main.yml b/test/integration/roles/test_rax_cdb/tasks/main.yml index fe4bdd9c0d99ac..f5336e54d0156b 100644 --- a/test/integration/roles/test_rax_cdb/tasks/main.yml +++ b/test/integration/roles/test_rax_cdb/tasks/main.yml @@ -73,6 +73,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-1" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: Validate rax_cdb with creds, region and name @@ -92,6 +93,7 @@ name: "{{ resource_prefix }}-1" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: "Validate delete integration 1" @@ -113,6 +115,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-2" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: Validate rax_cdb idempotent test 1 @@ -130,6 +133,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-2" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: Validate rax_cdb idempotent test 2 @@ -148,6 +152,7 @@ name: "{{ resource_prefix }}-2" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: "Validate delete integration 2" @@ -167,6 +172,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-3" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: Validate rax_cdb resize volume 1 @@ -185,6 +191,7 @@ name: "{{ resource_prefix }}-3" volume: 3 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" wait_timeout: 600 register: rax_cdb @@ -204,6 +211,7 @@ name: "{{ resource_prefix }}-3" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: "Validate delete integration 3" @@ -223,6 +231,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-4" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: Validate rax_cdb resize flavor 1 @@ -241,6 +250,7 @@ name: "{{ resource_prefix }}-4" flavor: 2 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" wait_timeout: 600 register: rax_cdb @@ -260,6 +270,7 @@ name: "{{ resource_prefix }}-4" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: "Validate delete integration 4" diff --git a/test/integration/roles/test_rax_cdb_database/tasks/main.yml b/test/integration/roles/test_rax_cdb_database/tasks/main.yml index a8f5caa335d60b..548641b6ebf4e7 100644 --- a/test/integration/roles/test_rax_cdb_database/tasks/main.yml +++ b/test/integration/roles/test_rax_cdb_database/tasks/main.yml @@ -92,6 +92,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-rax_cdb_database" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: Validate build @@ -204,6 +205,7 @@ name: "{{ resource_prefix }}-rax_cdb_database" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_cdb - name: Validate Delete diff --git a/test/integration/roles/test_rax_clb/tasks/main.yml b/test/integration/roles/test_rax_clb/tasks/main.yml index 2426fa3ae5944a..ae6776b56f42a5 100644 --- a/test/integration/roles/test_rax_clb/tasks/main.yml +++ b/test/integration/roles/test_rax_clb/tasks/main.yml @@ -73,6 +73,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-1" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb with creds, region and name @@ -95,6 +96,7 @@ name: "{{ resource_prefix }}-1" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 1" @@ -116,6 +118,7 @@ name: "{{ resource_prefix }}-2" protocol: TCP wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb with creds, region, name and protocol @@ -137,6 +140,7 @@ name: "{{ resource_prefix }}-2" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 2" @@ -158,6 +162,7 @@ protocol: TCP port: 8080 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb with creds, region, name, protocol and port @@ -179,6 +184,7 @@ name: "{{ resource_prefix }}-3" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 3" @@ -201,6 +207,7 @@ port: 8080 type: SERVICENET wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb with creds, region, name, protocol and type @@ -222,6 +229,7 @@ name: "{{ resource_prefix }}-4" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 4" @@ -245,6 +253,7 @@ type: SERVICENET timeout: 1 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" ignore_errors: true register: rax_clb @@ -269,6 +278,7 @@ type: SERVICENET timeout: 60 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb with creds, region, name, protocol, type and timeout @@ -290,6 +300,7 @@ name: "{{ resource_prefix }}-5" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 5" @@ -314,6 +325,7 @@ timeout: 60 algorithm: RANDOM wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb with creds, region, name, protocol, type, timeout and algorithm @@ -336,6 +348,7 @@ name: "{{ resource_prefix }}-6" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 6" @@ -357,6 +370,7 @@ type: BAD timeout: 1 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" ignore_errors: true register: rax_clb @@ -379,6 +393,7 @@ protocol: BAD timeout: 1 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" ignore_errors: true register: rax_clb @@ -401,6 +416,7 @@ algorithm: BAD timeout: 1 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" ignore_errors: true register: rax_clb @@ -428,6 +444,7 @@ meta: foo: bar wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb with creds, region, name, protocol, type, timeout, algorithm and metadata @@ -451,6 +468,7 @@ name: "{{ resource_prefix }}-7" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 7" @@ -470,6 +488,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-8-HTTP" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_http - name: Validate rax_clb with shared VIP HTTP @@ -489,6 +508,7 @@ protocol: HTTPS port: 443 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" vip_id: "{{ (rax_clb_http.balancer.virtual_ips|first).id }}" register: rax_clb_https @@ -508,6 +528,7 @@ name: "{{ resource_prefix }}-8-HTTP" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_http - name: "Delete integration 8 HTTPS" @@ -518,6 +539,7 @@ name: "{{ resource_prefix }}-8-HTTPS" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_http - name: "Validate delete integration 8" @@ -537,6 +559,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-9" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_p1 - name: Validate rax_clb with updated protocol 1 @@ -555,6 +578,7 @@ name: "{{ resource_prefix }}-9" protocol: TCP wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_p2 - name: Validate rax_clb with updated protocol 2 @@ -574,6 +598,7 @@ name: "{{ resource_prefix }}-9" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 9" @@ -592,6 +617,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-10" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_a1 - name: Validate rax_clb with updated algorithm 1 @@ -609,6 +635,7 @@ name: "{{ resource_prefix }}-10" algorithm: RANDOM wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_a2 - name: Validate rax_clb with updated algorithm 2 @@ -628,6 +655,7 @@ name: "{{ resource_prefix }}-10" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 10" @@ -647,6 +675,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-11" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_1 - name: Validate rax_clb with updated port 1 @@ -664,6 +693,7 @@ name: "{{ resource_prefix }}-11" port: 8080 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_2 - name: Validate rax_clb with updated port 2 @@ -683,6 +713,7 @@ name: "{{ resource_prefix }}-11" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 11" @@ -702,6 +733,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-12" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_1 - name: Validate rax_clb with updated timeout 1 @@ -719,6 +751,7 @@ name: "{{ resource_prefix }}-12" timeout: 60 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_2 - name: Validate rax_clb with updated timeout 2 @@ -738,6 +771,7 @@ name: "{{ resource_prefix }}-12" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 12" @@ -757,6 +791,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-13" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_1 - name: Validate rax_clb with invalid updated type 1 @@ -773,6 +808,7 @@ name: "{{ resource_prefix }}-13" type: SERVICENET wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_2 ignore_errors: true @@ -790,6 +826,7 @@ name: "{{ resource_prefix }}-13" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 13" @@ -809,6 +846,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-14" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_1 - name: Validate rax_clb with updated meta 1 @@ -827,6 +865,7 @@ meta: foo: bar wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_2 - name: Validate rax_clb with updated meta 2 @@ -847,6 +886,7 @@ name: "{{ resource_prefix }}-14" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 14" diff --git a/test/integration/roles/test_rax_clb_nodes/tasks/main.yml b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml index 01bbf9dd9a3659..05bc269e64a7f0 100644 --- a/test/integration/roles/test_rax_clb_nodes/tasks/main.yml +++ b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml @@ -74,6 +74,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-clb" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb creation @@ -158,6 +159,7 @@ address: '172.16.0.1' port: 80 wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_nodes - name: Validate rax_clb_nodes creds, region, load_balancer_id, address and port @@ -180,6 +182,7 @@ node_id: "{{ rax_clb_nodes.node.id }}" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb_nodes - name: Validate delete integration 1 @@ -201,6 +204,7 @@ port: 80 type: secondary wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" ignore_errors: true register: rax_clb_nodes @@ -222,6 +226,7 @@ name: "{{ rax_clb.balancer.name }}" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 3" diff --git a/test/integration/roles/test_rax_facts/tasks/main.yml b/test/integration/roles/test_rax_facts/tasks/main.yml index 374fd8c7c033ab..2627f83e5b06d1 100644 --- a/test/integration/roles/test_rax_facts/tasks/main.yml +++ b/test/integration/roles/test_rax_facts/tasks/main.yml @@ -122,6 +122,7 @@ flavor: "{{ rackspace_flavor }}" name: "{{ resource_prefix }}-rax_facts" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate build @@ -267,6 +268,7 @@ name: "{{ resource_prefix }}-rax_facts" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete" diff --git a/test/integration/roles/test_rax_meta/tasks/main.yml b/test/integration/roles/test_rax_meta/tasks/main.yml index b31336fc54a472..fe1ae3f65b5a8a 100644 --- a/test/integration/roles/test_rax_meta/tasks/main.yml +++ b/test/integration/roles/test_rax_meta/tasks/main.yml @@ -119,6 +119,7 @@ meta: foo: bar wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: Validate build @@ -322,6 +323,7 @@ - "{{ rax.success.0.rax_id }}" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax - name: "Validate delete" diff --git a/test/integration/roles/test_rax_scaling_group/tasks/main.yml b/test/integration/roles/test_rax_scaling_group/tasks/main.yml index f9189b5ba51451..42ba1c32069bdf 100644 --- a/test/integration/roles/test_rax_scaling_group/tasks/main.yml +++ b/test/integration/roles/test_rax_scaling_group/tasks/main.yml @@ -269,6 +269,7 @@ region: "{{ rackspace_region }}" name: "{{ resource_prefix }}-clb" wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: Validate rax_clb creation @@ -867,6 +868,7 @@ name: "{{ rax_clb.balancer.name }}" state: absent wait: true + wait_timeout: "{{ rackspace_wait_timeout }}" register: rax_clb - name: "Validate delete integration 3" From d88a42570e459d962c33ceb92466f64075fdc808 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Mon, 29 Jun 2015 21:56:36 +0200 Subject: [PATCH 1035/3617] Adds a check for 'not None' values when iterating ElastiCache SecurityGroups keys --- plugins/inventory/ec2.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index e07efac4c0cf4c..081990cd8f94ca 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -791,7 +791,11 @@ def add_elasticache_cluster(self, cluster, region): # Inventory: Group by security group if self.group_by_security_group and not is_redis: - if 'SecurityGroups' in cluster: + + # Check for the existance of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) @@ -879,7 +883,11 @@ def add_elasticache_node(self, node, cluster, region): # Inventory: Group by security group if self.group_by_security_group: - if 'SecurityGroups' in cluster: + + # Check for the existance of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) From 4059904a18fef4a3e3b4c139f12c1367b39ed4d7 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 29 Jun 2015 15:39:57 -0500 Subject: [PATCH 1036/3617] Add splitext filter --- docsite/rst/playbooks_filters.rst | 5 +++++ lib/ansible/plugins/filter/core.py | 1 + 2 files changed, 6 insertions(+) diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst index 4e35cee522ec88..10ea62f6a251c9 100644 --- a/docsite/rst/playbooks_filters.rst +++ b/docsite/rst/playbooks_filters.rst @@ -350,6 +350,11 @@ To get the relative path of a link, from a start point (new in version 1.7):: {{ path | relpath('/etc') }} +To get the root and extension of a path or filename (new in version 2.0):: + + # with path == 'nginx.conf' the return would be ('nginx', '.conf') + {{ path | splitext }} + To work with Base64 encoded strings:: {{ encoded | b64decode }} diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index a717c5bd817bce..e8e3e17f7753ab 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -316,6 +316,7 @@ def filters(self): 'expanduser': partial(unicode_wrap, os.path.expanduser), 'realpath': partial(unicode_wrap, os.path.realpath), 'relpath': partial(unicode_wrap, os.path.relpath), + 'splitext': partial(unicode_wrap, os.path.splitext), # failure testing 'failed' : failed, From df77d087a52cd7ab004ef1d1b9be6606f1962f3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Victor=20Schr=C3=B6der?= Date: Mon, 29 Jun 2015 23:28:55 +0200 Subject: [PATCH 1037/3617] Adds the check for 'not None' also when building host_info dict for ElastiCache clusters, nodes and replication groups --- plugins/inventory/ec2.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/plugins/inventory/ec2.py b/plugins/inventory/ec2.py index 081990cd8f94ca..864a64f5edcedb 100755 --- a/plugins/inventory/ec2.py +++ b/plugins/inventory/ec2.py @@ -1117,10 +1117,14 @@ def get_host_info_dict_from_describe_dict(self, describe_dict): # Target: Almost everything elif key == 'ec2_security_groups': - sg_ids = [] - for sg in value: - sg_ids.append(sg['SecurityGroupId']) - host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Skip if SecurityGroups is None + # (it is possible to have the key defined but no value in it). + if value is not None: + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers From 2d1cb7f3288a62403286e1ce410f16c11aaf1bb1 Mon Sep 17 00:00:00 2001 From: Henry Finucane Date: Mon, 29 Jun 2015 14:55:11 -0700 Subject: [PATCH 1038/3617] Treat generators like lists and tuples --- lib/ansible/plugins/filter/ipaddr.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/filter/ipaddr.py b/lib/ansible/plugins/filter/ipaddr.py index 5d9d6e3136728d..1b34f0a1c4e901 100644 --- a/lib/ansible/plugins/filter/ipaddr.py +++ b/lib/ansible/plugins/filter/ipaddr.py @@ -16,6 +16,7 @@ # along with Ansible. If not, see . from functools import partial +import types try: import netaddr @@ -319,7 +320,7 @@ def ipaddr(value, query = '', version = False, alias = 'ipaddr'): return False # Check if value is a list and parse each element - elif isinstance(value, (list, tuple)): + elif isinstance(value, (list, tuple, types.GeneratorType)): _ret = [] for element in value: @@ -457,7 +458,7 @@ def ipaddr(value, query = '', version = False, alias = 'ipaddr'): def ipwrap(value, query = ''): try: - if isinstance(value, (list, tuple)): + if isinstance(value, (list, tuple, types.GeneratorType)): _ret = [] for element in value: if ipaddr(element, query, version = False, alias = 'ipwrap'): From 2a5fbd85700b719df9c2af22f0ccc61633ee4ac6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 29 Jun 2015 15:41:51 -0400 Subject: [PATCH 1039/3617] Winrm fixes for devel * Include fixes for winrm connection plugin from v1 code * Fixing shell plugin use --- lib/ansible/plugins/action/__init__.py | 37 +++++++-------------- lib/ansible/plugins/connections/__init__.py | 13 ++++++++ lib/ansible/plugins/connections/winrm.py | 4 +-- lib/ansible/plugins/shell/powershell.py | 16 +++++++-- 4 files changed, 40 insertions(+), 30 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index d98c980e494beb..83f0f4765ca652 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -31,7 +31,6 @@ from ansible.errors import AnsibleError from ansible.executor.module_common import modify_module from ansible.parsing.utils.jsonify import jsonify -from ansible.plugins import shell_loader from ansible.utils.debug import debug from ansible.utils.unicode import to_bytes @@ -53,18 +52,6 @@ def __init__(self, task, connection, connection_info, loader, templar, shared_lo self._templar = templar self._shared_loader_obj = shared_loader_obj - # load the shell plugin for this action/connection - if self._connection_info.shell: - shell_type = self._connection_info.shell - elif hasattr(connection, '_shell'): - shell_type = getattr(connection, '_shell') - else: - shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) - - self._shell = shell_loader.get(shell_type) - if not self._shell: - raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type) - self._supports_check_mode = True def _configure_module(self, module_name, module_args, task_vars=dict()): @@ -104,7 +91,7 @@ def _compute_environment_string(self): # if type(enviro) != dict: # raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro) - return self._shell.env_prefix(**enviro) + return self._connection._shell.env_prefix(**enviro) def _early_needs_tmp_path(self): ''' @@ -151,7 +138,7 @@ def _make_tmp_path(self): if self._connection_info.remote_user != 'root' or self._connection_info.become and self._connection_info.become_user != 'root': tmp_mode = 'a+rx' - cmd = self._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) + cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) debug("executing _low_level_execute_command to create the tmp path") result = self._low_level_execute_command(cmd, None, sudoable=False) debug("done with creation of tmp path") @@ -176,8 +163,8 @@ def _make_tmp_path(self): raise AnsibleError(output) # FIXME: do we still need to do this? - #rc = self._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '') - rc = self._shell.join_path(result['stdout'].strip(), '').splitlines()[-1] + #rc = self._connection._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '') + rc = self._connection._shell.join_path(result['stdout'].strip(), '').splitlines()[-1] # Catch failure conditions, files should never be # written to locations in /. @@ -190,7 +177,7 @@ def _remove_tmp_path(self, tmp_path): '''Remove a temporary path we created. ''' if tmp_path and "-tmp-" in tmp_path: - cmd = self._shell.remove(tmp_path, recurse=True) + cmd = self._connection._shell.remove(tmp_path, recurse=True) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. debug("calling _low_level_execute_command to remove the tmp path") @@ -229,7 +216,7 @@ def _remote_chmod(self, tmp, mode, path, sudoable=False): Issue a remote chmod command ''' - cmd = self._shell.chmod(mode, path) + cmd = self._connection._shell.chmod(mode, path) debug("calling _low_level_execute_command to chmod the remote path") res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable) debug("done with chmod call") @@ -244,7 +231,7 @@ def _remote_checksum(self, tmp, path): # variable manager data #python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python') python_interp = 'python' - cmd = self._shell.checksum(path, python_interp) + cmd = self._connection._shell.checksum(path, python_interp) debug("calling _low_level_execute_command to get the remote checksum") data = self._low_level_execute_command(cmd, tmp, sudoable=True) debug("done getting the remote checksum") @@ -280,7 +267,7 @@ def _remote_expand_user(self, path, tmp): if self._connection_info.become and self._connection_info.become_user: expand_path = '~%s' % self._connection_info.become_user - cmd = self._shell.expand_user(expand_path) + cmd = self._connection._shell.expand_user(expand_path) debug("calling _low_level_execute_command to expand the remote user path") data = self._low_level_execute_command(cmd, tmp, sudoable=False) debug("done expanding the remote user path") @@ -293,7 +280,7 @@ def _remote_expand_user(self, path, tmp): return path if len(split_path) > 1: - return self._shell.join_path(initial_fragment, *split_path[1:]) + return self._connection._shell.join_path(initial_fragment, *split_path[1:]) else: return initial_fragment @@ -346,7 +333,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var remote_module_path = None if not tmp and self._late_needs_tmp_path(tmp, module_style): tmp = self._make_tmp_path() - remote_module_path = self._shell.join_path(tmp, module_name) + remote_module_path = self._connection._shell.join_path(tmp, module_name) # FIXME: async stuff here? #if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES): @@ -379,7 +366,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp - cmd = self._shell.build_module_command(environment_string, shebang, cmd, rm_tmp) + cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, rm_tmp) cmd = cmd.strip() sudoable = True @@ -396,7 +383,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var if self._connection_info.become and self._connection_info.become_user != 'root': # not sudoing to root, so maybe can't delete files as that other user # have to clean up temp files as original user in a second step - cmd2 = self._shell.remove(tmp, recurse=True) + cmd2 = self._connection._shell.remove(tmp, recurse=True) self._low_level_execute_command(cmd2, tmp, sudoable=False) try: diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py index e6abc911021600..449c9b9e696612 100644 --- a/lib/ansible/plugins/connections/__init__.py +++ b/lib/ansible/plugins/connections/__init__.py @@ -31,6 +31,7 @@ from ansible import constants as C from ansible.errors import AnsibleError +from ansible.plugins import shell_loader # FIXME: this object should be created upfront and passed through # the entire chain of calls to here, as there are other things @@ -71,6 +72,18 @@ def __init__(self, connection_info, new_stdin, *args, **kwargs): self.success_key = None self.prompt = None + # load the shell plugin for this action/connection + if connection_info.shell: + shell_type = connection_info.shell + elif hasattr(self, '_shell_type'): + shell_type = getattr(self, '_shell_type') + else: + shell_type = os.path.basename(C.DEFAULT_EXECUTABLE) + + self._shell = shell_loader.get(shell_type) + if not self._shell: + raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type) + def _become_method_supported(self): ''' Checks if the current class supports this privilege escalation method ''' diff --git a/lib/ansible/plugins/connections/winrm.py b/lib/ansible/plugins/connections/winrm.py index 3fe769617e1c43..68103cd71d08f7 100644 --- a/lib/ansible/plugins/connections/winrm.py +++ b/lib/ansible/plugins/connections/winrm.py @@ -47,7 +47,6 @@ from ansible.utils.path import makedirs_safe from ansible.utils.unicode import to_bytes - class Connection(ConnectionBase): '''WinRM connections over HTTP/HTTPS.''' @@ -63,8 +62,7 @@ def __init__(self, *args, **kwargs): self.protocol = None self.shell_id = None self.delegate = None - - self._shell = shell_loader.get('powershell') + self._shell_type = 'powershell' # TODO: Add runas support self.become_methods_supported=[] diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py index e4331e46c6559d..3377d5786f3fa1 100644 --- a/lib/ansible/plugins/shell/powershell.py +++ b/lib/ansible/plugins/shell/powershell.py @@ -59,12 +59,24 @@ def mkdtemp(self, basefile, system=False, mode=None): # FIXME: Support system temp path! return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile) - def md5(self, path): + def expand_user(self, user_home_path): + # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does + # not seem to work remotely, though by default we are always starting + # in the user's home directory. + if user_home_path == '~': + script = 'Write-Host (Get-Location).Path' + elif user_home_path.startswith('~\\'): + script = 'Write-Host ((Get-Location).Path + "%s")' % _escape(user_home_path[1:]) + else: + script = 'Write-Host "%s"' % _escape(user_home_path) + return self._encode_script(script) + + def checksum(self, path, *args, **kwargs): path = self._escape(path) script = ''' If (Test-Path -PathType Leaf "%(path)s") { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); From 927072546b4ffb12d6642643d44551de945b390f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 29 Jun 2015 22:49:02 -0400 Subject: [PATCH 1040/3617] Fixing up some issues with plugin loading --- lib/ansible/executor/task_queue_manager.py | 7 ++++--- lib/ansible/playbook/role/__init__.py | 4 +++- lib/ansible/plugins/strategies/__init__.py | 3 ++- lib/ansible/template/__init__.py | 4 +++- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index b1d905be7ad712..169b08c3ecee2a 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -60,6 +60,7 @@ def __init__(self, inventory, variable_manager, loader, display, options, passwo self._options = options self._stats = AggregateStats() self.passwords = passwords + self._stdout_callback = stdout_callback # a special flag to help us exit cleanly self._terminated = False @@ -73,9 +74,6 @@ def __init__(self, inventory, variable_manager, loader, display, options, passwo self._final_q = multiprocessing.Queue() - # load callback plugins - self._callback_plugins = self._load_callbacks(stdout_callback) - # create the pool of worker threads, based on the number of forks specified try: fileno = sys.stdin.fileno() @@ -206,6 +204,9 @@ def run(self, play): are done with the current task). ''' + # load callback plugins + self._callback_plugins = self._load_callbacks(self._stdout_callback) + if play.vars_prompt: for var in play.vars_prompt: if 'name' not in var: diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index c24e6499d7ff21..c84f0f8677576c 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -37,7 +37,7 @@ from ansible.playbook.role.include import RoleInclude from ansible.playbook.role.metadata import RoleMetadata from ansible.playbook.taggable import Taggable -from ansible.plugins import get_all_plugin_loaders +from ansible.plugins import get_all_plugin_loaders, push_basedir from ansible.utils.vars import combine_vars @@ -136,6 +136,8 @@ def _load_role_data(self, role_include, parent_role=None): self._variable_manager = role_include.get_variable_manager() self._loader = role_include.get_loader() + push_basedir(self._role_path) + if parent_role: self.add_parent(parent_role) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 180cf3245d1f31..6eae821682979e 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -29,7 +29,7 @@ from ansible.playbook.handler import Handler from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role import ROLE_CACHE, hash_params -from ansible.plugins import filter_loader, lookup_loader, module_loader +from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader from ansible.utils.debug import debug @@ -44,6 +44,7 @@ class SharedPluginLoaderObj: the forked processes over the queue easier ''' def __init__(self): + self.basdirs = _basedirs[:] self.filter_loader = filter_loader self.lookup_loader = lookup_loader self.module_loader = module_loader diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 1841560abbad6f..8ce243f55f22a5 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -29,7 +29,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable -from ansible.plugins import filter_loader, lookup_loader +from ansible.plugins import _basedirs, filter_loader, lookup_loader from ansible.template.safe_eval import safe_eval from ansible.template.template import AnsibleJ2Template from ansible.template.vars import AnsibleJ2Vars @@ -60,6 +60,8 @@ def __init__(self, loader, shared_loader_obj=None, variables=dict()): self._available_variables = variables if shared_loader_obj: + global _basedirs + _basedirs = shared_loader_obj.basedirs[:] self._filter_loader = getattr(shared_loader_obj, 'filter_loader') self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader') else: From 9785e5397eb0c761bcbb5655ef3a3dffe1f301d0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 29 Jun 2015 22:51:53 -0400 Subject: [PATCH 1041/3617] Fix typo in SharedObjectLoader field basedirs --- lib/ansible/plugins/strategies/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 6eae821682979e..0b78a245dd42c3 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -44,7 +44,7 @@ class SharedPluginLoaderObj: the forked processes over the queue easier ''' def __init__(self): - self.basdirs = _basedirs[:] + self.basedirs = _basedirs[:] self.filter_loader = filter_loader self.lookup_loader = lookup_loader self.module_loader = module_loader From 7416e0054183ae6335d13087eb98015f99239a2c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 29 Jun 2015 13:26:01 -0400 Subject: [PATCH 1042/3617] fixed condition for loading whitelisted callbacks --- lib/ansible/executor/task_queue_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 169b08c3ecee2a..c3143a3004ea53 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -142,7 +142,7 @@ def _load_callbacks(self, stdout_callback): if callback_name != stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True - elif C.DEFAULT_CALLBACK_WHITELIST is not None and callback_name not in C.DEFAULT_CALLBACK_WHITELIST: + elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST: continue loaded_plugins.append(callback_plugin(self._display)) From a41caf722d7e3ac18c6f623dcc53a9aa2978d332 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 29 Jun 2015 13:26:18 -0400 Subject: [PATCH 1043/3617] added example of whitelisted callback --- examples/ansible.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 85eada17cc8545..3800a9ea464b05 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -38,6 +38,9 @@ gathering = implicit # uncomment this to disable SSH key host checking #host_key_checking = False +# enable additional callbacks +#callback_whitelist = timer + # change this for alternative sudo implementations sudo_exe = sudo From d149ea52228744f9885564da970d9f8339de36d5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 29 Jun 2015 13:26:30 -0400 Subject: [PATCH 1044/3617] ported timer.py callback to v2 --- lib/ansible/plugins/callback/timer.py | 35 +++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 lib/ansible/plugins/callback/timer.py diff --git a/lib/ansible/plugins/callback/timer.py b/lib/ansible/plugins/callback/timer.py new file mode 100644 index 00000000000000..4b28a19af0938b --- /dev/null +++ b/lib/ansible/plugins/callback/timer.py @@ -0,0 +1,35 @@ +import os +import datetime +from datetime import datetime, timedelta + +from ansible.plugins.callback import CallbackBase + +class CallbackModule(CallbackBase): + """ + This callback module tells you how long your plays ran for. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + + start_time = datetime.now() + + def __init__(self, display): + + super(CallbackModule, self).__init__(display) + + start_time = datetime.now() + self._display.warning("Timerv2 plugin is active from included callbacks.") + + def days_hours_minutes_seconds(self, timedelta): + minutes = (timedelta.seconds//60)%60 + r_seconds = timedelta.seconds - (minutes * 60) + return timedelta.days, timedelta.seconds//3600, minutes, r_seconds + + def playbook_on_stats(self, stats): + self.v2_playbook_on_stats(stats) + + def v2_playbook_on_stats(self, stats): + end_time = datetime.now() + timedelta = end_time - self.start_time + self._display.display("Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(timedelta))) + From 62e780c74a67cd796fca00df5d7180eefdb1bde3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 29 Jun 2015 13:27:06 -0400 Subject: [PATCH 1045/3617] moved to actual live plugin directory and ported to v2 --- plugins/callbacks/timer.py | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100644 plugins/callbacks/timer.py diff --git a/plugins/callbacks/timer.py b/plugins/callbacks/timer.py deleted file mode 100644 index bca867c26383b6..00000000000000 --- a/plugins/callbacks/timer.py +++ /dev/null @@ -1,27 +0,0 @@ -import os -import datetime -from datetime import datetime, timedelta - - -class CallbackModule(object): - """ - This callback module tells you how long your plays ran for. - """ - - start_time = datetime.now() - - def __init__(self): - start_time = datetime.now() - print "Timer plugin is active." - - def days_hours_minutes_seconds(self, timedelta): - minutes = (timedelta.seconds//60)%60 - r_seconds = timedelta.seconds - (minutes * 60) - return timedelta.days, timedelta.seconds//3600, minutes, r_seconds - - def playbook_on_stats(self, stats): - end_time = datetime.now() - timedelta = end_time - self.start_time - print "Playbook run took %s days, %s hours, %s minutes, %s seconds" % (self.days_hours_minutes_seconds(timedelta)) - - From f7da725d53254d588b5a1ddf4390b2d8c4b3ef9f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 29 Jun 2015 20:46:04 -0400 Subject: [PATCH 1046/3617] added bundler to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9226e5674ac426..bc3a1a796e53d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ New Modules: * amazon: elasticache_subnet_group * amazon: iam * amazon: iam_policy + * bundler * circonus_annotation * consul * consul_acl From 0cfebb87602eea69354491ed0305e35a267d7d39 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 30 Jun 2015 07:17:50 -0400 Subject: [PATCH 1047/3617] Fixes a bug whereby tags are expected to be a set Fixes #11424 Fixes #11429 --- lib/ansible/playbook/taggable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index d140f52a12eec0..1e9c6e82bfc7e4 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -68,7 +68,7 @@ def evaluate_tags(self, only_tags, skip_tags, all_vars): else: tags = set([tags]) else: - tags = [i for i,_ in itertools.groupby(tags)] + tags = set([i for i,_ in itertools.groupby(tags)]) else: # this makes intersection work for untagged tags = self.__class__.untagged From 43f81c7c0178178564517448227742a85d819e29 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 09:38:12 -0500 Subject: [PATCH 1048/3617] Fix YAML formatting issue for rax integration tests --- test/integration/roles/test_rax_cbs/tasks/main.yml | 4 ++-- .../roles/test_rax_cbs_attachments/tasks/main.yml | 2 +- test/integration/roles/test_rax_cdb/tasks/main.yml | 4 ++-- .../roles/test_rax_cdb_database/tasks/main.yml | 4 ++-- test/integration/roles/test_rax_clb/tasks/main.yml | 10 +++++----- .../roles/test_rax_clb_nodes/tasks/main.yml | 2 +- test/integration/roles/test_rax_facts/tasks/main.yml | 2 +- test/integration/roles/test_rax_keypair/tasks/main.yml | 2 +- test/integration/roles/test_rax_meta/tasks/main.yml | 2 +- test/integration/roles/test_rax_network/tasks/main.yml | 4 ++-- .../roles/test_rax_scaling_group/tasks/main.yml | 4 ++-- 11 files changed, 20 insertions(+), 20 deletions(-) diff --git a/test/integration/roles/test_rax_cbs/tasks/main.yml b/test/integration/roles/test_rax_cbs/tasks/main.yml index ae6f5c68e35084..4df926c1a4a2fa 100644 --- a/test/integration/roles/test_rax_cbs/tasks/main.yml +++ b/test/integration/roles/test_rax_cbs/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_cbs|failed - - rax_cbs.msg == 'missing required arguments: name' + - 'rax_cbs.msg == "missing required arguments: name"' # ============================================================ @@ -165,7 +165,7 @@ assert: that: - rax_cbs|failed - - "rax_cbs.msg == 'value of volume_type must be one of: SSD,SATA, got: fail'" + - 'rax_cbs.msg == "value of volume_type must be one of: SSD,SATA, got: fail"' # ============================================================ diff --git a/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml index 0321fe10e17652..9c8933cb6a1a9c 100644 --- a/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml +++ b/test/integration/roles/test_rax_cbs_attachments/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_cbs_attachments|failed - - rax_cbs_attachments.msg == 'missing required arguments: server,volume,device' + - 'rax_cbs_attachments.msg == "missing required arguments: server,volume,device"' # ============================================================ diff --git a/test/integration/roles/test_rax_cdb/tasks/main.yml b/test/integration/roles/test_rax_cdb/tasks/main.yml index f5336e54d0156b..3ba86375d34acf 100644 --- a/test/integration/roles/test_rax_cdb/tasks/main.yml +++ b/test/integration/roles/test_rax_cdb/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_cdb|failed - - rax_cdb.msg == 'missing required arguments: name' + - 'rax_cdb.msg == "missing required arguments: name"' # ============================================================ @@ -60,7 +60,7 @@ assert: that: - rax_cdb|failed - - rax_cdb.msg == 'missing required arguments: name' + - 'rax_cdb.msg == "missing required arguments: name"' # ============================================================ diff --git a/test/integration/roles/test_rax_cdb_database/tasks/main.yml b/test/integration/roles/test_rax_cdb_database/tasks/main.yml index 548641b6ebf4e7..cee0a4bbc3f007 100644 --- a/test/integration/roles/test_rax_cdb_database/tasks/main.yml +++ b/test/integration/roles/test_rax_cdb_database/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_cdb_database|failed - - rax_cdb_database.msg == 'missing required arguments: name,cdb_id' + - 'rax_cdb_database.msg == "missing required arguments: name,cdb_id"' # ============================================================ @@ -24,7 +24,7 @@ assert: that: - rax_cdb_database|failed - - rax_cdb_database.msg == 'missing required arguments: cdb_id' + - 'rax_cdb_database.msg == "missing required arguments: cdb_id"' # ============================================================ diff --git a/test/integration/roles/test_rax_clb/tasks/main.yml b/test/integration/roles/test_rax_clb/tasks/main.yml index ae6776b56f42a5..25472b20cf8084 100644 --- a/test/integration/roles/test_rax_clb/tasks/main.yml +++ b/test/integration/roles/test_rax_clb/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_clb|failed - - rax_clb.msg == 'missing required arguments: name' + - 'rax_clb.msg == "missing required arguments: name"' # ============================================================ @@ -60,7 +60,7 @@ assert: that: - rax_clb|failed - - rax_clb.msg == 'missing required arguments: name' + - 'rax_clb.msg == "missing required arguments: name"' # ============================================================ @@ -378,7 +378,7 @@ assert: that: - rax_clb|failed - - "rax_clb.msg == 'value of type must be one of: PUBLIC,SERVICENET, got: BAD'" + - 'rax_clb.msg == "value of type must be one of: PUBLIC,SERVICENET, got: BAD"' # ============================================================ @@ -401,7 +401,7 @@ assert: that: - rax_clb|failed - - "rax_clb.msg == 'value of protocol must be one of: DNS_TCP,DNS_UDP,FTP,HTTP,HTTPS,IMAPS,IMAPv4,LDAP,LDAPS,MYSQL,POP3,POP3S,SMTP,TCP,TCP_CLIENT_FIRST,UDP,UDP_STREAM,SFTP, got: BAD'" + - 'rax_clb.msg == "value of protocol must be one of: DNS_TCP,DNS_UDP,FTP,HTTP,HTTPS,IMAPS,IMAPv4,LDAP,LDAPS,MYSQL,POP3,POP3S,SMTP,TCP,TCP_CLIENT_FIRST,UDP,UDP_STREAM,SFTP, got: BAD"' # ============================================================ @@ -424,7 +424,7 @@ assert: that: - rax_clb|failed - - "rax_clb.msg == 'value of algorithm must be one of: RANDOM,LEAST_CONNECTIONS,ROUND_ROBIN,WEIGHTED_LEAST_CONNECTIONS,WEIGHTED_ROUND_ROBIN, got: BAD'" + - 'rax_clb.msg == "value of algorithm must be one of: RANDOM,LEAST_CONNECTIONS,ROUND_ROBIN,WEIGHTED_LEAST_CONNECTIONS,WEIGHTED_ROUND_ROBIN, got: BAD"' # ============================================================ diff --git a/test/integration/roles/test_rax_clb_nodes/tasks/main.yml b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml index 05bc269e64a7f0..9364dc05a05e39 100644 --- a/test/integration/roles/test_rax_clb_nodes/tasks/main.yml +++ b/test/integration/roles/test_rax_clb_nodes/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_clb_nodes|failed - - rax_clb_nodes.msg == 'missing required arguments: load_balancer_id' + - 'rax_clb_nodes.msg == "missing required arguments: load_balancer_id"' # ============================================================ diff --git a/test/integration/roles/test_rax_facts/tasks/main.yml b/test/integration/roles/test_rax_facts/tasks/main.yml index 2627f83e5b06d1..07969d597688ff 100644 --- a/test/integration/roles/test_rax_facts/tasks/main.yml +++ b/test/integration/roles/test_rax_facts/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_facts|failed - - rax_facts.msg == 'one of the following is required: address,id,name' + - 'rax_facts.msg == "one of the following is required: address,id,name"' # ============================================================ diff --git a/test/integration/roles/test_rax_keypair/tasks/main.yml b/test/integration/roles/test_rax_keypair/tasks/main.yml index f7f10a467838b6..84ba5b5a58440a 100644 --- a/test/integration/roles/test_rax_keypair/tasks/main.yml +++ b/test/integration/roles/test_rax_keypair/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_keypair|failed - - rax_keypair.msg == 'missing required arguments: name' + - 'rax_keypair.msg == "missing required arguments: name"' # ============================================================ diff --git a/test/integration/roles/test_rax_meta/tasks/main.yml b/test/integration/roles/test_rax_meta/tasks/main.yml index fe1ae3f65b5a8a..92d38cf126e10d 100644 --- a/test/integration/roles/test_rax_meta/tasks/main.yml +++ b/test/integration/roles/test_rax_meta/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_meta|failed - - rax_meta.msg == 'one of the following is required: address,id,name' + - 'rax_meta.msg == "one of the following is required: address,id,name"' # ============================================================ diff --git a/test/integration/roles/test_rax_network/tasks/main.yml b/test/integration/roles/test_rax_network/tasks/main.yml index 27eda8b273e771..47da22a92d351f 100644 --- a/test/integration/roles/test_rax_network/tasks/main.yml +++ b/test/integration/roles/test_rax_network/tasks/main.yml @@ -8,7 +8,7 @@ assert: that: - rax_network|failed - - rax_network.msg == 'missing required arguments: label' + - 'rax_network.msg == "missing required arguments: label"' # ============================================================ @@ -61,7 +61,7 @@ assert: that: - rax_network|failed - - rax_network.msg == 'missing required arguments: cidr' + - 'rax_network.msg == "missing required arguments: cidr"' # ============================================================ diff --git a/test/integration/roles/test_rax_scaling_group/tasks/main.yml b/test/integration/roles/test_rax_scaling_group/tasks/main.yml index 42ba1c32069bdf..efe3f86ee77d01 100644 --- a/test/integration/roles/test_rax_scaling_group/tasks/main.yml +++ b/test/integration/roles/test_rax_scaling_group/tasks/main.yml @@ -622,7 +622,7 @@ that: - rax_scaling_group|success - not rax_scaling_group|changed - - rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'AUTO' + - "rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'AUTO'" - name: Change disk_config 2 rax_scaling_group: @@ -644,7 +644,7 @@ that: - rax_scaling_group|success - rax_scaling_group|changed - - rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'MANUAL' + - "rax_scaling_group.autoscale_group.launchConfiguration.args.server['OS-DCF:diskConfig'] == 'MANUAL'" # ============================================================ From 65fdcf8b9df93a7804e35203c119c593f919f7e7 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 10:13:40 -0500 Subject: [PATCH 1049/3617] Check for name or pkg when templating squashed items. Fixes #11430 --- lib/ansible/executor/task_executor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 8405389593b01b..1f46b0c705a376 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -179,15 +179,15 @@ def _squash_items(self, items, variables): Squash items down to a comma-separated list for certain modules which support it (typically package management modules). ''' - if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS: final_items = [] for item in items: variables['item'] = item templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) if self._task.evaluate_conditional(templar, variables): - if templar._contains_vars(self._task.args['name']): - new_item = templar.template(self._task.args['name']) + name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) + if templar._contains_vars(name): + new_item = templar.template(name) final_items.append(new_item) else: final_items.append(item) From 2cd3a1be00e595ab2d26d196e7d18859aff6f02f Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 11:02:33 -0500 Subject: [PATCH 1050/3617] assertRaises should be given an exception type. Fixes 11441 --- test/units/parsing/yaml/test_loader.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/units/parsing/yaml/test_loader.py b/test/units/parsing/yaml/test_loader.py index 37eeabff83b52a..8fd617eea1954a 100644 --- a/test/units/parsing/yaml/test_loader.py +++ b/test/units/parsing/yaml/test_loader.py @@ -29,6 +29,11 @@ from ansible.parsing.yaml.loader import AnsibleLoader +try: + from _yaml import ParserError +except ImportError: + from yaml.parser import ParserError + class TestAnsibleLoaderBasic(unittest.TestCase): @@ -123,7 +128,7 @@ def test_parse_short_dict(self): def test_error_conditions(self): stream = StringIO("""{""") loader = AnsibleLoader(stream, 'myfile.yml') - self.assertRaises(loader.get_single_data) + self.assertRaises(ParserError, loader.get_single_data) def test_front_matter(self): stream = StringIO("""---\nfoo: bar""") From 2576f480fd02ab9cdec33bb879b6b8477ffb706a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 30 Jun 2015 13:57:47 -0400 Subject: [PATCH 1051/3617] Restoring a state check to play_iterator, which otherwise broke block functionality --- lib/ansible/executor/play_iterator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 585c6556eb390f..8794e7e403418c 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -242,7 +242,7 @@ def mark_host_failed(self, host): self._host_states[host.name] = s def get_failed_hosts(self): - return dict((host, True) for (host, state) in self._host_states.iteritems() if state.fail_state != self.FAILED_NONE) + return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE) def get_original_task(self, host, task): ''' From ec4d1b11df5d2dc4f9bf13171eb83ec1c966b3e5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 30 Jun 2015 14:44:41 -0400 Subject: [PATCH 1052/3617] Fix some more handler issues * Only notify handlers when the task is changed * Don't run handlers on hosts which have failed --- lib/ansible/executor/process/result.py | 2 +- lib/ansible/plugins/strategies/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 1b8f4f5d31d660..7fbee9a1b658a0 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -123,7 +123,7 @@ def run(self): self._send_result(('host_task_skipped', result)) else: # if this task is notifying a handler, do it now - if result._task.notify: + if result._task.notify and result._result.get('changed', False): # The shared dictionary for notified handlers is a proxy, which # does not detect when sub-objects within the proxy are modified. # So, per the docs, we reassign the list so the proxy picks up and diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 0b78a245dd42c3..a298b199889a2b 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -380,7 +380,7 @@ def run_handlers(self, iterator, connection_info): break self._tqm.send_callback('v2_playbook_on_handler_task_start', handler) for host in self._notified_handlers[handler_name]: - if not handler.has_triggered(host): + if not handler.has_triggered(host) and host.name not in self._tqm._failed_hosts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) self._queue_task(host, handler, task_vars, connection_info) From e89f1186e7e383eeda221af973605341202a63e8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 30 Jun 2015 14:46:43 -0400 Subject: [PATCH 1053/3617] Fix a tiny typo --- lib/ansible/utils/module_docs_fragments/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index 99897eee6d86bd..753d34d37713b8 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -30,7 +30,7 @@ class ModuleDocFragment(object): auth: description: - Dictionary containing auth information as needed by the cloud's auth - plugin strategy. For the default I{password) plugin, this would contain + plugin strategy. For the default I(password) plugin, this would contain I(auth_url), I(username), I(password), I(project_name) and any information about domains if the cloud supports them. For other plugins, this param will need to contain whatever parameters that auth plugin From 4b1a14eb164e0e916fe3897397c61c9492a80cd1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 11:13:17 -0700 Subject: [PATCH 1054/3617] Fix title length (for docs formatting) --- docsite/rst/playbooks_best_practices.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index adb8d5ca7c2d9a..4347c4841f6ede 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -80,8 +80,8 @@ in your infrastructure, usage of dynamic inventory is a great idea in general. .. _staging_vs_prod: -How to Differentiate Staging vs Production -````````````````````````````````````````` +How to Differentiate Staging vs Production +`````````````````````````````````````````` If managing static inventory, it is frequently asked how to differentiate different types of environments. The following example shows a good way to do this. Similar methods of grouping could be adapted to dynamic inventory (for instance, consider applying the AWS From 54e7c8a3f735f929d06d07a0844a85fd082d6e08 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 12:50:42 -0700 Subject: [PATCH 1055/3617] Add python requirement to the documentation for openstack modules requiring shade --- lib/ansible/utils/module_docs_fragments/openstack.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py index 99897eee6d86bd..4dd89139e4b2ee 100644 --- a/lib/ansible/utils/module_docs_fragments/openstack.py +++ b/lib/ansible/utils/module_docs_fragments/openstack.py @@ -98,6 +98,7 @@ class ModuleDocFragment(object): required: false default: public requirements: + - python >= 2.7 - shade notes: - The standard OpenStack environment variables, such as C(OS_USERNAME) From 7c1d569a26b2b7a41d6b4bc9f442fbd7f8b8a188 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 30 Jun 2015 16:08:46 -0400 Subject: [PATCH 1056/3617] Make sure tags are pulled out of playbook includes properly Fixes #9862 --- lib/ansible/playbook/playbook_include.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index 1f4bddd4a32b49..f1629b4f1504a5 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -118,6 +118,8 @@ def _preprocess_include(self, ds, new_ds, k, v): # rejoin the parameter portion of the arguments and # then use parse_kv() to get a dict of params back params = parse_kv(" ".join(items[1:])) + if 'tags' in params: + new_ds['tags'] = params.pop('tags') if 'vars' in new_ds: # FIXME: see fixme above regarding merging vars raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds) From 0070e17750fa97bf69970c7be60658c698cc29d3 Mon Sep 17 00:00:00 2001 From: Anuvrat Parashar Date: Wed, 1 Jul 2015 09:29:44 +0530 Subject: [PATCH 1057/3617] full rewrite of the paragraph. following @abadger's suggestion[1] in the comments [1] https://github.com/ansible/ansible/pull/11410#issuecomment-116049590 --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 745b6f21c22e68..3fbcd87369db2e 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -14,7 +14,7 @@ What Can Be Encrypted With Vault The vault feature can encrypt any structured data file used by Ansible. This can include "group_vars/" or "host_vars/" inventory variables, variables loaded by "include_vars" or "vars_files", or variable files passed on the ansible-playbook command line with "-e @file.yml" or "-e @file.json". Role variables and defaults are also included! -Because Ansible tasks, handlers, and so on are also data, these too can be encrypted with vault. If you'd not like to betray even the variables you are using, you can go as far as keeping individual task files entirely encrypted. However, that might be a little too much and could annoy your coworkers :) +Ansible tasks, handlers, and so on are also data so these can be encrypted with vault as well. If you don't want to even reveal the variables you are using you can go as far as keeping individual task files entirely encrypted. However, that might be a little too much and could annoy your coworkers :) .. _creating_files: From c6ed1ff4adccf1363e9988774f84f208eb522e9c Mon Sep 17 00:00:00 2001 From: soarpenguin Date: Wed, 1 Jul 2015 12:16:01 +0800 Subject: [PATCH 1058/3617] fix type error. --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index c6a4e75c47d7f7..a46a40933e3f69 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -171,7 +171,7 @@ def normalize_become_options(self): self.options.become_method = 'sudo' elif self.options.su: self.options.become = True - options.become_method = 'su' + self.options.become_method = 'su' def validate_conflicts(self, vault_opts=False, runas_opts=False): From f9bf6ce4d0bd90cc08eb296aa04c1474b1870a41 Mon Sep 17 00:00:00 2001 From: Anuvrat Parashar Date: Wed, 1 Jul 2015 09:54:02 +0530 Subject: [PATCH 1059/3617] makes it more concise. @msabramos's suggestions[1] incorporated. [1] https://github.com/ansible/ansible/pull/11410#issuecomment-116319780 --- docsite/rst/playbooks_vault.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_vault.rst b/docsite/rst/playbooks_vault.rst index 3fbcd87369db2e..5cb1eb90c9c59f 100644 --- a/docsite/rst/playbooks_vault.rst +++ b/docsite/rst/playbooks_vault.rst @@ -14,7 +14,7 @@ What Can Be Encrypted With Vault The vault feature can encrypt any structured data file used by Ansible. This can include "group_vars/" or "host_vars/" inventory variables, variables loaded by "include_vars" or "vars_files", or variable files passed on the ansible-playbook command line with "-e @file.yml" or "-e @file.json". Role variables and defaults are also included! -Ansible tasks, handlers, and so on are also data so these can be encrypted with vault as well. If you don't want to even reveal the variables you are using you can go as far as keeping individual task files entirely encrypted. However, that might be a little too much and could annoy your coworkers :) +Ansible tasks, handlers, and so on are also data so these can be encrypted with vault as well. To hide the names of variables that you're using, you can encrypt the task files in their entirety. However, that might be a little too much and could annoy your coworkers :) .. _creating_files: From 4889d04fc623ac0a5081d1ff4d99fd236440804f Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Wed, 1 Jul 2015 10:02:54 +0530 Subject: [PATCH 1060/3617] fixes 11448 , yum with with_items --- lib/ansible/executor/task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 1f46b0c705a376..1bfc88d8f2e145 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -181,11 +181,11 @@ def _squash_items(self, items, variables): ''' if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS: final_items = [] + name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) for item in items: variables['item'] = item templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables) if self._task.evaluate_conditional(templar, variables): - name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None) if templar._contains_vars(name): new_item = templar.template(name) final_items.append(new_item) From 0a2a9557b82bbc65813211194faeb00f43c43b40 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Jul 2015 05:21:46 -0400 Subject: [PATCH 1061/3617] now allows for users to use ^D to not input a password fixes #11413 --- lib/ansible/cli/__init__.py | 64 ++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index c6a4e75c47d7f7..77d8543b38098f 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -108,21 +108,24 @@ def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_ vault_pass = None new_vault_pass = None - if ask_vault_pass: - vault_pass = getpass.getpass(prompt="Vault password: ") - - if ask_vault_pass and confirm_vault: - vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ") - if vault_pass != vault_pass2: - raise errors.AnsibleError("Passwords do not match") - - if ask_new_vault_pass: - new_vault_pass = getpass.getpass(prompt="New Vault password: ") - - if ask_new_vault_pass and confirm_new: - new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") - if new_vault_pass != new_vault_pass2: - raise errors.AnsibleError("Passwords do not match") + try: + if ask_vault_pass: + vault_pass = getpass.getpass(prompt="Vault password: ") + + if ask_vault_pass and confirm_vault: + vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ") + if vault_pass != vault_pass2: + raise errors.AnsibleError("Passwords do not match") + + if ask_new_vault_pass: + new_vault_pass = getpass.getpass(prompt="New Vault password: ") + + if ask_new_vault_pass and confirm_new: + new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ") + if new_vault_pass != new_vault_pass2: + raise errors.AnsibleError("Passwords do not match") + except EOFError: + pass # enforce no newline chars at the end of passwords if vault_pass: @@ -141,20 +144,23 @@ def ask_passwords(self): becomepass = None become_prompt = '' - if op.ask_pass: - sshpass = getpass.getpass(prompt="SSH password: ") - become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper() - if sshpass: - sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') - else: - become_prompt = "%s password: " % op.become_method.upper() - - if op.become_ask_pass: - becomepass = getpass.getpass(prompt=become_prompt) - if op.ask_pass and becomepass == '': - becomepass = sshpass - if becomepass: - becomepass = to_bytes(becomepass) + try: + if op.ask_pass: + sshpass = getpass.getpass(prompt="SSH password: ") + become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper() + if sshpass: + sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr') + else: + become_prompt = "%s password: " % op.become_method.upper() + + if op.become_ask_pass: + becomepass = getpass.getpass(prompt=become_prompt) + if op.ask_pass and becomepass == '': + becomepass = sshpass + if becomepass: + becomepass = to_bytes(becomepass) + except EOFError: + pass return (sshpass, becomepass) From a155f65a89419f17d71b178cc1d5e0471e4ffab3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 07:23:26 -0700 Subject: [PATCH 1062/3617] Disable docs checks --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 4ee974e89995e4..975bc3e35d2d9d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,6 @@ install: - pip install tox PyYAML Jinja2 sphinx script: - tox - - make -C docsite all + #- make -C docsite all after_success: - coveralls From 4d4512940ded2688d9be29b415aa2785112e49bd Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Wed, 1 Jul 2015 17:15:40 +0200 Subject: [PATCH 1063/3617] Fix "AttributeError: 'ActionModule' object has no attribute '_shell'" '_shell' was removed with commit 2a5fbd85700b719df9c2af22f0ccc61633ee4ac6 --- lib/ansible/plugins/action/async.py | 6 +++--- lib/ansible/plugins/action/copy.py | 12 ++++++------ lib/ansible/plugins/action/fetch.py | 4 ++-- lib/ansible/plugins/action/patch.py | 2 +- lib/ansible/plugins/action/script.py | 2 +- lib/ansible/plugins/action/template.py | 4 ++-- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 336457b0e5fd6d..0c73cd9d5c9836 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -36,8 +36,8 @@ def run(self, tmp=None, task_vars=dict()): tmp = self._make_tmp_path() module_name = self._task.action - async_module_path = self._shell.join_path(tmp, 'async_wrapper') - remote_module_path = self._shell.join_path(tmp, module_name) + async_module_path = self._connection._shell.join_path(tmp, 'async_wrapper') + remote_module_path = self._connection._shell.join_path(tmp, module_name) env_string = self._compute_environment_string() @@ -51,7 +51,7 @@ def run(self, tmp=None, task_vars=dict()): self._transfer_data(async_module_path, async_module_data) self._remote_chmod(tmp, 'a+rx', async_module_path) - argsfile = self._transfer_data(self._shell.join_path(tmp, 'arguments'), json.dumps(self._task.args)) + argsfile = self._transfer_data(self._connection._shell.join_path(tmp, 'arguments'), json.dumps(self._task.args)) async_limit = self._task.async async_jid = str(random.randint(0, 999999999999)) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index ef80275ec0c30e..e556c803156f43 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -115,8 +115,8 @@ def run(self, tmp=None, task_vars=dict()): # If it's recursive copy, destination is always a dir, # explicitly mark it so (note - copy module relies on this). - if not self._shell.path_has_trailing_slash(dest): - dest = self._shell.join_path(dest, '') + if not self._connection._shell.path_has_trailing_slash(dest): + dest = self._connection._shell.join_path(dest, '') else: source_files.append((source, os.path.basename(source))) @@ -151,10 +151,10 @@ def run(self, tmp=None, task_vars=dict()): # This is kind of optimization - if user told us destination is # dir, do path manipulation right away, otherwise we still check # for dest being a dir via remote call below. - if self._shell.path_has_trailing_slash(dest): - dest_file = self._shell.join_path(dest, source_rel) + if self._connection._shell.path_has_trailing_slash(dest): + dest_file = self._connection._shell.join_path(dest, source_rel) else: - dest_file = self._shell.join_path(dest) + dest_file = self._connection._shell.join_path(dest) # Attempt to get the remote checksum remote_checksum = self._remote_checksum(tmp, dest_file) @@ -167,7 +167,7 @@ def run(self, tmp=None, task_vars=dict()): return dict(failed=True, msg="can not use content with a dir as dest") else: # Append the relative source location to the destination and retry remote_checksum - dest_file = self._shell.join_path(dest, source_rel) + dest_file = self._connection._shell.join_path(dest, source_rel) remote_checksum = self._remote_checksum(tmp, dest_file) if remote_checksum != '1' and not force: diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py index 2123c5b162bd6c..bc652265ba7832 100644 --- a/lib/ansible/plugins/action/fetch.py +++ b/lib/ansible/plugins/action/fetch.py @@ -52,7 +52,7 @@ def run(self, tmp=None, task_vars=dict()): if source is None or dest is None: return dict(failed=True, msg="src and dest are required") - source = self._shell.join_path(source) + source = self._connection._shell.join_path(source) source = self._remote_expand_user(source, tmp) # calculate checksum for the remote file @@ -78,7 +78,7 @@ def run(self, tmp=None, task_vars=dict()): pass # calculate the destination name - if os.path.sep not in self._shell.join_path('a', ''): + if os.path.sep not in self._connection._shell.join_path('a', ''): source_local = source.replace('\\', '/') else: source_local = source diff --git a/lib/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py index 31dbd31fa4df31..f0dbdedf05c693 100644 --- a/lib/ansible/plugins/action/patch.py +++ b/lib/ansible/plugins/action/patch.py @@ -47,7 +47,7 @@ def run(self, tmp=None, task_vars=dict()): if tmp is None or "-tmp-" not in tmp: tmp = self._make_tmp_path() - tmp_src = self._shell.join_path(tmp, os.path.basename(src)) + tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src)) self._connection.put_file(src, tmp_src) if self._connection_info.become and self._connection_info.become_user != 'root': diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py index 7c248455150883..b3b95db9f8e581 100644 --- a/lib/ansible/plugins/action/script.py +++ b/lib/ansible/plugins/action/script.py @@ -71,7 +71,7 @@ def run(self, tmp=None, task_vars=None): source = self._loader.path_dwim(source) # transfer the file to a remote tmp location - tmp_src = self._shell.join_path(tmp, os.path.basename(source)) + tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source)) self._connection.put_file(source, tmp_src) sudoable = True diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index e841ab939c01f2..0b93f559c352cc 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -121,8 +121,8 @@ def run(self, tmp=None, task_vars=dict()): # dest_contents = base64.b64decode(dest_contents) # else: # raise Exception("unknown encoding, failed: %s" % dest_result.result) - - xfered = self._transfer_data(self._shell.join_path(tmp, 'source'), resultant) + + xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user if self._connection_info.become and self._connection_info.become_user != 'root': From b6c52ce1158223c14a70882ed7ccf96b10bd01c4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 1 Jul 2015 11:32:44 -0400 Subject: [PATCH 1064/3617] Allow role variables to be optionally kept in a private scope --- examples/ansible.cfg | 5 +++++ lib/ansible/constants.py | 1 + lib/ansible/vars/__init__.py | 6 ++++-- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 3800a9ea464b05..ac10f62d9e9f8c 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -71,6 +71,11 @@ timeout = 10 # this can also be set to 'merge'. #hash_behaviour = replace +# by default, variables from roles will be visible in the global variable +# scope. To prevent this, the following option can be enabled, and only +# tasks and handlers within the role will see the variables there +#private_role_vars = yes + # list any Jinja2 extensions to enable here: #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index db0cabb10fa06f..b291c371b8943e 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -129,6 +129,7 @@ def shell_expand_path(path): DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER') DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True) DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace') +DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True) DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None) DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 4e8d6bda3c38ae..6531b6a3209a0e 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -197,8 +197,10 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # whether or not vars files errors should be fatal at this # stage, or just base it on whether a host was specified? pass - for role in play.get_roles(): - all_vars = self._combine_vars(all_vars, role.get_vars()) + + if not C.DEFAULT_PRIVATE_ROLE_VARS: + for role in play.get_roles(): + all_vars = self._combine_vars(all_vars, role.get_vars()) if host: all_vars = self._combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict())) From 2e386deeae8cad0ab70f144b4f5aee73f814571d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 1 Jul 2015 11:55:00 -0400 Subject: [PATCH 1065/3617] Make undefined variables in debug var=foo more obvious Fixes #9935 --- lib/ansible/plugins/action/debug.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index 94056e496ce977..957e56e499d0f3 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -35,6 +35,8 @@ def run(self, tmp=None, task_vars=dict()): # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): results = self._templar.template(self._task.args['var'], convert_bare=True) + if results == self._task.args['var']: + results = "VARIABLE IS NOT DEFINED!" result = dict() result[self._task.args['var']] = results else: From fffb65d45fa55cc032e102bed0e7b94870d73408 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 09:34:17 -0700 Subject: [PATCH 1066/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 50912c9092eb56..ff69ce7912e2ce 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 50912c9092eb567c5dc61c47eecd2ccc585ae364 +Subproject commit ff69ce7912e2cee53e6737e377853a49c0482b1c diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index dec7d95d514ca8..4e48ef9ecace3a 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit dec7d95d514ca89c2784b63d836dd6fb872bdd9c +Subproject commit 4e48ef9ecace3a6eb92e3e4d2ef1a3ea2b7e33ab From dcb9b5a69fb0f8ed2a68798527bd98f467c441e3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 1 Jul 2015 12:38:56 -0400 Subject: [PATCH 1067/3617] Make --module-path work and expand tilde's in paths Fixes #9937 Fixes #9949 --- lib/ansible/cli/__init__.py | 13 +++++++++---- lib/ansible/executor/playbook_executor.py | 7 +++++++ 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 77d8543b38098f..4dc565461f270a 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -205,6 +205,10 @@ def validate_conflicts(self, vault_opts=False, runas_opts=False): "and become arguments ('--become', '--become-user', and '--ask-become-pass')" " are exclusive of each other") + @staticmethod + def expand_tilde(option, opt, value, parser): + setattr(parser.values, option.dest, os.path.expanduser(value)) + @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None, fork_opts=False): @@ -221,11 +225,12 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, if runtask_opts: parser.add_option('-i', '--inventory-file', dest='inventory', help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST, - default=C.DEFAULT_HOST_LIST) + default=C.DEFAULT_HOST_LIST, action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('--list-hosts', dest='listhosts', action='store_true', help='outputs a list of matching hosts; does not execute anything else') parser.add_option('-M', '--module-path', dest='module_path', - help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None) + help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None, + action="callback", callback=CLI.expand_tilde, type=str) parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", help="set additional variables as key=value or YAML/JSON", default=[]) @@ -239,8 +244,8 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, - dest='vault_password_file', help="vault password file") - + dest='vault_password_file', help="vault password file", action="callback", + callback=CLI.expand_tilde, type=str) if subset_opts: parser.add_option('-t', '--tags', dest='tags', default='all', diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 4e77838559c8d2..cf9b6a0290227d 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -25,6 +25,7 @@ from ansible.errors import * from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook +from ansible.plugins import module_loader from ansible.template import Templar from ansible.utils.color import colorize, hostcolor @@ -46,6 +47,12 @@ def __init__(self, playbooks, inventory, variable_manager, loader, display, opti self._options = options self.passwords = passwords + # make sure the module path (if specified) is parsed and + # added to the module_loader object + if options.module_path is not None: + for path in options.module_path.split(os.pathsep): + module_loader.add_directory(path) + if options.listhosts or options.listtasks or options.listtags or options.syntax: self._tqm = None else: From cf51d0a790c50cc9429d0e00b25f4a846b67dc5d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 1 Jul 2015 15:10:25 -0400 Subject: [PATCH 1068/3617] Fixing up some check-mode stuff --- lib/ansible/module_utils/basic.py | 6 +++--- lib/ansible/plugins/action/add_host.py | 5 ++--- lib/ansible/plugins/action/assemble.py | 8 +------- lib/ansible/plugins/action/async.py | 5 ++--- lib/ansible/plugins/action/copy.py | 14 +++++++------- lib/ansible/plugins/action/fetch.py | 5 ++--- lib/ansible/plugins/action/patch.py | 6 ++---- lib/ansible/plugins/action/raw.py | 7 +++---- lib/ansible/plugins/action/script.py | 7 ++----- lib/ansible/plugins/action/template.py | 16 ---------------- lib/ansible/plugins/action/unarchive.py | 15 ++------------- 11 files changed, 26 insertions(+), 68 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index e89809ff12e68f..62caf384ff5aa6 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -908,11 +908,11 @@ def _handle_aliases(self): def _check_for_check_mode(self): for (k,v) in self.params.iteritems(): - if k == '_ansible_check_mode': + if k == '_ansible_check_mode' and v: if not self.supports_check_mode: self.exit_json(skipped=True, msg="remote module does not support check mode") - if self.supports_check_mode: - self.check_mode = True + self.check_mode = True + break def _check_for_no_log(self): for (k,v) in self.params.iteritems(): diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py index e28361b7145f73..d7019d0f001dc5 100644 --- a/lib/ansible/plugins/action/add_host.py +++ b/lib/ansible/plugins/action/add_host.py @@ -31,9 +31,8 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): - # FIXME: is this necessary in v2? - #if self.runner.noop_on_check(inject): - # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module')) + if self._connection_info.check_mode: + return dict(skipped=True, msg='check mode not supported for this module') # Parse out any hostname:port patterns new_name = self._task.args.get('name', self._task.args.get('hostname', None)) diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index 49f861f08e9574..82a77519d695c7 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -133,14 +133,8 @@ def run(self, tmp=None, task_vars=dict()): ) ) - # FIXME: checkmode stuff - #if self.runner.noop_on_check(inject): - # return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant)) - #else: - # res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject) - # res.diff = dict(after=resultant) - # return res res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp) + # FIXME: diff stuff #res.diff = dict(after=resultant) return res else: diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 0c73cd9d5c9836..d7b164935a130e 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -28,9 +28,8 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): ''' transfer the given module name, plus the async module, then run it ''' - # FIXME: noop stuff needs to be sorted ut - #if self.runner.noop_on_check(inject): - # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module')) + if self._connection_info.check_mode: + return dict(skipped=True, msg='check mode not supported for this module') if not tmp: tmp = self._make_tmp_path() diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index e556c803156f43..9a984f03a5e311 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -191,13 +191,13 @@ def run(self, tmp=None, task_vars=dict()): # diff = {} diff = {} - # FIXME: noop stuff - #if self.runner.noop_on_check(inject): - # self._remove_tempfile_if_content_defined(content, content_tempfile) - # diffs.append(diff) - # changed = True - # module_result = dict(changed=True) - # continue + if self._connection_info.check_mode: + self._remove_tempfile_if_content_defined(content, content_tempfile) + # FIXME: diff stuff + #diffs.append(diff) + changed = True + module_return = dict(changed=True) + continue # Define a remote directory that we will copy the file to. tmp_src = tmp + 'source' diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py index bc652265ba7832..a00ad154cc1fc5 100644 --- a/lib/ansible/plugins/action/fetch.py +++ b/lib/ansible/plugins/action/fetch.py @@ -36,9 +36,8 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): ''' handler for fetch operations ''' - # FIXME: is this even required anymore? - #if self.runner.noop_on_check(inject): - # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module')) + if self._connection_info.check_mode: + return dict(skipped=True, msg='check mode not (yet) supported for this module') source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) diff --git a/lib/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py index f0dbdedf05c693..e50b647bcb634f 100644 --- a/lib/ansible/plugins/action/patch.py +++ b/lib/ansible/plugins/action/patch.py @@ -51,10 +51,8 @@ def run(self, tmp=None, task_vars=dict()): self._connection.put_file(src, tmp_src) if self._connection_info.become and self._connection_info.become_user != 'root': - # FIXME: noop stuff here - #if not self.runner.noop_on_check(inject): - # self._remote_chmod('a+r', tmp_src, tmp) - self._remote_chmod('a+r', tmp_src, tmp) + if not self._connection_info.check_mode: + self._remote_chmod('a+r', tmp_src, tmp) new_module_args = self._task.args.copy() new_module_args.update( diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py index f9cd56572b1ba5..a0da97798acf60 100644 --- a/lib/ansible/plugins/action/raw.py +++ b/lib/ansible/plugins/action/raw.py @@ -24,10 +24,9 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): - # FIXME: need to rework the noop stuff still - #if self.runner.noop_on_check(inject): - # # in --check mode, always skip this module execution - # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True)) + if self._connection_info.check_mode: + # in --check mode, always skip this module execution + return dict(skipped=True) executable = self._task.args.get('executable') result = self._low_level_execute_command(self._task.args.get('_raw_params'), tmp=tmp, executable=executable) diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py index b3b95db9f8e581..c377aa62fe6bf5 100644 --- a/lib/ansible/plugins/action/script.py +++ b/lib/ansible/plugins/action/script.py @@ -28,11 +28,8 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=None): ''' handler for file transfer operations ''' - # FIXME: noop stuff still needs to be sorted out - #if self.runner.noop_on_check(inject): - # # in check mode, always skip this module - # return ReturnData(conn=conn, comm_ok=True, - # result=dict(skipped=True, msg='check mode not supported for this module')) + if self._connection_info.check_mode: + return dict(skipped=True, msg='check mode not supported for this module') if not tmp: tmp = self._make_tmp_path() diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 0b93f559c352cc..54520b2f7e6cbb 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -139,15 +139,6 @@ def run(self, tmp=None, task_vars=dict()): ), ) - # FIXME: noop stuff needs to be sorted out - #if self.runner.noop_on_check(task_vars): - # return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant)) - #else: - # res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, task_vars=task_vars, complex_args=complex_args) - # if res.result.get('changed', False): - # res.diff = dict(before=dest_contents, after=resultant) - # return res - result = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars) if result.get('changed', False): result['diff'] = dict(before=dest_contents, after=resultant) @@ -169,12 +160,5 @@ def run(self, tmp=None, task_vars=dict()): ), ) - # FIXME: this may not be required anymore, as the checkmod params - # should be in the regular module args? - # be sure to task_vars the check mode param into the module args and - # rely on the file module to report its changed status - #if self.runner.noop_on_check(task_vars): - # new_module_args['CHECKMODE'] = True - return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars) diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py index ef5320b71941ea..e5b143e5976fe0 100644 --- a/lib/ansible/plugins/action/unarchive.py +++ b/lib/ansible/plugins/action/unarchive.py @@ -78,10 +78,8 @@ def run(self, tmp=None, task_vars=dict()): # fix file permissions when the copy is done as a different user if copy: if self._connection_info.become and self._connection_info.become_user != 'root': - # FIXME: noop stuff needs to be reworked - #if not self.runner.noop_on_check(task_vars): - # self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp) - self._remote_chmod(tmp, 'a+r', tmp_src) + if not self._connection_info.check_mode: + self._remote_chmod(tmp, 'a+r', tmp_src) # Build temporary module_args. new_module_args = self._task.args.copy() @@ -92,11 +90,6 @@ def run(self, tmp=None, task_vars=dict()): ), ) - # make sure checkmod is passed on correctly - # FIXME: noop again, probably doesn't need to be done here anymore? - #if self.runner.noop_on_check(task_vars): - # new_module_args['CHECKMODE'] = True - else: new_module_args = self._task.args.copy() new_module_args.update( @@ -104,10 +97,6 @@ def run(self, tmp=None, task_vars=dict()): original_basename=os.path.basename(source), ), ) - # make sure checkmod is passed on correctly - # FIXME: noop again, probably doesn't need to be done here anymore? - #if self.runner.noop_on_check(task_vars): - # module_args += " CHECKMODE=True" # execute the unarchive module now, with the updated args return self._execute_module(module_args=new_module_args, task_vars=task_vars) From 08e981b9f46e1b812a8d54d5cfb3856c42fde312 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Jul 2015 11:07:37 -0400 Subject: [PATCH 1069/3617] corrected api permissions --- lib/ansible/galaxy/api.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 lib/ansible/galaxy/api.py diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py old mode 100755 new mode 100644 From 13ac0ba1fee948627c9e487e9fe1ff110f074c03 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Jul 2015 11:11:20 -0400 Subject: [PATCH 1070/3617] now setuptools will pull the data dir with templates that are used by galaxy init --- lib/ansible/galaxy/data/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 lib/ansible/galaxy/data/__init__.py diff --git a/lib/ansible/galaxy/data/__init__.py b/lib/ansible/galaxy/data/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 From 9341148f04744b2b1c7f3fc69a66425cc343926e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 1 Jul 2015 16:09:05 -0400 Subject: [PATCH 1071/3617] Throw an error if with_first_found finds no files by default Fixes #9976 --- lib/ansible/plugins/lookup/first_found.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py index 091f104c62863f..e9fe9a676a579c 100644 --- a/lib/ansible/plugins/lookup/first_found.py +++ b/lib/ansible/plugins/lookup/first_found.py @@ -123,7 +123,7 @@ from jinja2.exceptions import UndefinedError -from ansible.errors import AnsibleUndefinedVariable +from ansible.errors import AnsibleLookupError, AnsibleUndefinedVariable from ansible.plugins.lookup import LookupBase from ansible.template import Templar from ansible.utils.boolean import boolean @@ -202,5 +202,5 @@ def run(self, terms, variables, **kwargs): if skip: return [] else: - return [None] + raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no files are found") From 08ad05c83bcd7b3dfc63a732f24e87bc41fb2f7d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 2 Jul 2015 02:50:57 -0400 Subject: [PATCH 1072/3617] Make sure callbacks are loaded in the tqm a bit earlier Fixes #11463 --- lib/ansible/executor/playbook_executor.py | 3 +++ lib/ansible/executor/task_queue_manager.py | 24 ++++++++-------------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index cf9b6a0290227d..91d5a69fc1fde6 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -122,6 +122,9 @@ def run(self): entry['plays'].append(p) else: + # make sure the tqm has callbacks loaded + self._tqm.load_callbacks() + # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index c3143a3004ea53..cdee3f045ea21a 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -61,6 +61,7 @@ def __init__(self, inventory, variable_manager, loader, display, options, passwo self._stats = AggregateStats() self.passwords = passwords self._stdout_callback = stdout_callback + self._callback_plugins = [] # a special flag to help us exit cleanly self._terminated = False @@ -115,21 +116,19 @@ def _initialize_notified_handlers(self, handlers): for handler in handler_list: self._notified_handlers[handler.get_name()] = [] - def _load_callbacks(self, stdout_callback): + def load_callbacks(self): ''' Loads all available callbacks, with the exception of those which utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout', only one such callback plugin will be loaded. ''' - loaded_plugins = [] - stdout_callback_loaded = False - if stdout_callback is None: - stdout_callback = C.DEFAULT_STDOUT_CALLBACK + if self._stdout_callback is None: + self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK - if stdout_callback not in callback_loader: - raise AnsibleError("Invalid callback for stdout specified: %s" % stdout_callback) + if self._stdout_callback not in callback_loader: + raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) for callback_plugin in callback_loader.all(class_only=True): if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: @@ -139,17 +138,15 @@ def _load_callbacks(self, stdout_callback): callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None) (callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path)) if callback_type == 'stdout': - if callback_name != stdout_callback or stdout_callback_loaded: + if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True elif C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST: continue - loaded_plugins.append(callback_plugin(self._display)) + self._callback_plugins.append(callback_plugin(self._display)) else: - loaded_plugins.append(callback_plugin()) - - return loaded_plugins + self._callback_plugins.append(callback_plugin()) def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): @@ -204,9 +201,6 @@ def run(self, play): are done with the current task). ''' - # load callback plugins - self._callback_plugins = self._load_callbacks(self._stdout_callback) - if play.vars_prompt: for var in play.vars_prompt: if 'name' not in var: From d91947ee960dce6fe8c5883b0c57e23b164d1e95 Mon Sep 17 00:00:00 2001 From: verm666 Date: Thu, 2 Jul 2015 15:36:56 +0300 Subject: [PATCH 1073/3617] facts: add aliases to ansible_all_ipv4_addresses on OpenBSD --- lib/ansible/module_utils/facts.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index aedd028b2428f7..cf75114c64e482 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -1997,7 +1997,7 @@ def get_default_interfaces(self, route_path): return interface['v4'], interface['v6'] - def get_interfaces_info(self, ifconfig_path): + def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'): interfaces = {} current_if = {} ips = dict( @@ -2007,7 +2007,7 @@ def get_interfaces_info(self, ifconfig_path): # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a' # when running the command 'ifconfig'. # Solaris must explicitly run the command 'ifconfig -a'. - rc, out, err = module.run_command([ifconfig_path, '-a']) + rc, out, err = module.run_command([ifconfig_path, ifconfig_options]) for line in out.split('\n'): @@ -2177,14 +2177,14 @@ class AIXNetwork(GenericBsdIfconfigNetwork, Network): platform = 'AIX' # AIX 'ifconfig -a' does not have three words in the interface line - def get_interfaces_info(self, ifconfig_path): + def get_interfaces_info(self, ifconfig_path, ifconfig_options): interfaces = {} current_if = {} ips = dict( all_ipv4_addresses = [], all_ipv6_addresses = [], ) - rc, out, err = module.run_command([ifconfig_path, '-a']) + rc, out, err = module.run_command([ifconfig_path, ifconfig_options]) for line in out.split('\n'): @@ -2264,6 +2264,10 @@ class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network): """ platform = 'OpenBSD' + # OpenBSD 'ifconfig -a' does not have information about aliases + def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'): + return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options) + # Return macaddress instead of lladdr def parse_lladdr_line(self, words, current_if, ips): current_if['macaddress'] = words[1] From f8593cc76b007872d5d590062e26a8c2d1a264c2 Mon Sep 17 00:00:00 2001 From: Jiri Tyr Date: Thu, 2 Jul 2015 14:37:51 +0100 Subject: [PATCH 1074/3617] Adding comment filter --- v1/ansible/runner/filter_plugins/core.py | 80 ++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/v1/ansible/runner/filter_plugins/core.py b/v1/ansible/runner/filter_plugins/core.py index bdf45509c3a610..f81da6f89426c4 100644 --- a/v1/ansible/runner/filter_plugins/core.py +++ b/v1/ansible/runner/filter_plugins/core.py @@ -270,6 +270,83 @@ def get_encrypted_password(password, hashtype='sha512', salt=None): def to_uuid(string): return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string))) +def comment(text, style='plain', **kw): + # Predefined comment types + comment_styles = { + 'plain': { + 'decoration': '# ' + }, + 'erlang': { + 'decoration': '% ' + }, + 'c': { + 'decoration': '// ' + }, + 'cblock': { + 'beginning': '/*', + 'decoration': ' * ', + 'end': ' */' + }, + 'xml': { + 'beginning': '' + } + } + + # Pointer to the right comment type + style_params = comment_styles[style] + + if 'decoration' in kw: + prepostfix = kw['decoration'] + else: + prepostfix = style_params['decoration'] + + # Default params + p = { + 'newline': '\n', + 'beginning': '', + 'prefix': (prepostfix).rstrip(), + 'prefix_count': 1, + 'decoration': '', + 'postfix': (prepostfix).rstrip(), + 'postfix_count': 1, + 'end': '' + } + + # Update default params + p.update(style_params) + p.update(kw) + + # Compose substrings for the final string + str_beginning = '' + if p['beginning']: + str_beginning = "%s%s" % (p['beginning'], p['newline']) + str_prefix = str( + "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count']) + str_text = ("%s%s" % ( + p['decoration'], + # Prepend each line of the text with the decorator + text.replace( + p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace( + # Remove trailing spaces when only decorator is on the line + "%s%s" % (p['decoration'], p['newline']), + "%s%s" % (p['decoration'].rstrip(), p['newline'])) + str_postfix = p['newline'].join( + [''] + [p['postfix'] for x in range(p['postfix_count'])]) + str_end = '' + if p['end']: + str_end = "%s%s" % (p['newline'], p['end']) + + # Return the final string + return "%s%s%s%s%s" % ( + str_beginning, + str_prefix, + str_text, + str_postfix, + str_end) + + class FilterModule(object): ''' Ansible core jinja2 filters ''' @@ -348,4 +425,7 @@ def filters(self): # random stuff 'random': rand, 'shuffle': randomize_list, + + # comment-style decoration of string + 'comment': comment, } From 31239f44cdfb0497621aa2456a7617d29d7e9091 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 2 Jul 2015 10:33:22 -0400 Subject: [PATCH 1075/3617] Show failed result on a retry message Fixes #10099 --- lib/ansible/executor/task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 1bfc88d8f2e145..6d23548de3936d 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -265,7 +265,7 @@ def _execute(self, variables=None): for attempt in range(retries): if attempt > 0: # FIXME: this should use the callback/message passing mechanism - print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt)) + print("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result)) result['attempts'] = attempt + 1 debug("running the handler") From ea6ec3bf2c9734a8f6d7dab06f9f5771273f69c1 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 Jul 2015 18:16:33 +0000 Subject: [PATCH 1076/3617] Make test-module work in v2 - `jsonify` moved from `ansible.utils` to `ansible.parsing.utils.jsonify` - I don't see `ansible.utils.parse_json` anymore so I used `json.loads`. --- hacking/test-module | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hacking/test-module b/hacking/test-module index c226f32e889906..03930c6b74bfff 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -35,6 +35,7 @@ import subprocess import traceback import optparse import ansible.utils as utils +from ansible.parsing.utils.jsonify import jsonify import ansible.module_common as module_common import ansible.constants as C @@ -75,7 +76,7 @@ def write_argsfile(argstring, json=False): argsfile = open(argspath, 'w') if json: args = utils.parse_kv(argstring) - argstring = utils.jsonify(args) + argstring = jsonify(args) argsfile.write(argstring) argsfile.close() return argspath @@ -150,7 +151,7 @@ def runtest( modfile, argspath): print "RAW OUTPUT" print out print err - results = utils.parse_json(out) + results = json.loads(out) except: print "***********************************" print "INVALID OUTPUT FORMAT" @@ -160,7 +161,7 @@ def runtest( modfile, argspath): print "***********************************" print "PARSED OUTPUT" - print utils.jsonify(results,format=True) + print jsonify(results,format=True) def rundebug(debugger, modfile, argspath): """Run interactively with console debugger.""" From 5466ff89077a53b594bbc185a65a11b13755f44a Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 Jul 2015 18:57:57 +0000 Subject: [PATCH 1077/3617] hacking/test-module: Deal with move of parse_kv --- hacking/test-module | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hacking/test-module b/hacking/test-module index 03930c6b74bfff..3f9c84a52941bc 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -36,6 +36,7 @@ import traceback import optparse import ansible.utils as utils from ansible.parsing.utils.jsonify import jsonify +from ansible.parsing.splitter import parse_kv import ansible.module_common as module_common import ansible.constants as C @@ -75,7 +76,7 @@ def write_argsfile(argstring, json=False): argspath = os.path.expanduser("~/.ansible_test_module_arguments") argsfile = open(argspath, 'w') if json: - args = utils.parse_kv(argstring) + args = parse_kv(argstring) argstring = jsonify(args) argsfile.write(argstring) argsfile.close() From 3b0524e67d95ea856ade830a189ac8aadc1db1e4 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 Jul 2015 18:59:58 +0000 Subject: [PATCH 1078/3617] hacking/test-module: Style nit --- hacking/test-module | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/test-module b/hacking/test-module index 3f9c84a52941bc..953f834aad0653 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -177,7 +177,7 @@ def main(): options, args = parse() (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check) - argspath=None + argspath = None if module_style != 'new': if module_style == 'non_native_want_json': argspath = write_argsfile(options.module_args, json=True) From 9e37402cb79a1c824d6d0a6953d0be69296bc3f9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 2 Jul 2015 17:24:13 -0400 Subject: [PATCH 1079/3617] added ramfs to selinux ignored filesystems as reported in #11442 --- examples/ansible.cfg | 2 +- lib/ansible/constants.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index ac10f62d9e9f8c..f8cdd16fb231a1 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -235,4 +235,4 @@ accelerate_daemon_timeout = 30 # file systems that require special treatment when dealing with security context # the default behaviour that copies the existing context or uses the user default # needs to be changed to use the file system dependant context. -#special_context_filesystems=nfs,vboxsf,fuse +#special_context_filesystems=nfs,vboxsf,fuse,ramfs diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index b291c371b8943e..a0ea2657cec67a 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -136,7 +136,7 @@ def shell_expand_path(path): DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) # selinux -DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True) +DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) ### PRIVILEGE ESCALATION ### # Backwards Compat From 48e15ea8494d72ee2a4cb7d05b5ee5d626d581c5 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Fri, 3 Jul 2015 00:51:36 -0700 Subject: [PATCH 1080/3617] Add groups to serf inventory plugin --- plugins/inventory/serf.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/plugins/inventory/serf.py b/plugins/inventory/serf.py index dfda4dd855db91..e1340da92df596 100755 --- a/plugins/inventory/serf.py +++ b/plugins/inventory/serf.py @@ -31,6 +31,7 @@ # These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr import argparse +import collections import os import sys @@ -58,6 +59,16 @@ def get_nodes(data): return [node['Name'] for node in data] +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for key, value in node['Tags'].items(): + groups[value].append(node['Name']) + + return groups + + def get_meta(data): meta = {'hostvars': {}} for node in data: @@ -68,8 +79,11 @@ def get_meta(data): def print_list(): data = get_serf_members_data() nodes = get_nodes(data) + groups = get_groups(data) meta = get_meta(data) - print(json.dumps({_key: nodes, '_meta': meta})) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) def print_host(host): From 63b6dca1f3c72e81468a79afde19bb6a84d14791 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Fri, 3 Jul 2015 00:02:17 -0700 Subject: [PATCH 1081/3617] Add Landscape inventory plugin --- plugins/inventory/landscape.py | 128 +++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100755 plugins/inventory/landscape.py diff --git a/plugins/inventory/landscape.py b/plugins/inventory/landscape.py new file mode 100755 index 00000000000000..4b53171c34eb0f --- /dev/null +++ b/plugins/inventory/landscape.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Canonical's +# Landscape (http://www.ubuntu.com/management/landscape-features). +# +# Requires the `landscape_api` Python module +# See: +# - https://landscape.canonical.com/static/doc/api/api-client-package.html +# - https://landscape.canonical.com/static/doc/api/python-api.html +# +# Environment variables +# --------------------- +# - `LANDSCAPE_API_URI` +# - `LANDSCAPE_API_KEY` +# - `LANDSCAPE_API_SECRET` +# - `LANDSCAPE_API_SSL_CA_FILE` (optional) + + +import argparse +import collections +import os +import sys + +from landscape_api.base import API, HTTPError + +try: + import json +except ImportError: + import simplejson as json + +_key = 'landscape' + + +class EnvironmentConfig(object): + uri = os.getenv('LANDSCAPE_API_URI') + access_key = os.getenv('LANDSCAPE_API_KEY') + secret_key = os.getenv('LANDSCAPE_API_SECRET') + ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') + + +def _landscape_client(): + env = EnvironmentConfig() + return API( + uri=env.uri, + access_key=env.access_key, + secret_key=env.secret_key, + ssl_ca_file=env.ssl_ca_file) + + +def get_landscape_members_data(): + return _landscape_client().get_computers() + + +def get_nodes(data): + return [node['hostname'] for node in data] + + +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for value in node['tags']: + groups[value].append(node['hostname']) + + return groups + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['hostname']] = {'tags': node['tags']} + return meta + + +def print_list(): + data = get_landscape_members_data() + nodes = get_nodes(data) + groups = get_groups(data) + meta = get_meta(data) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) + + +def print_host(host): + data = get_landscape_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from landscape cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from landscape cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) From cf4ed9a556f06b671d19d85c8a0300c07890bf7d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jul 2015 13:01:21 -0400 Subject: [PATCH 1082/3617] load callbacks on init again as they did not seem to load with new call from executor --- lib/ansible/executor/task_queue_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index cdee3f045ea21a..c672f9c2a10813 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -63,6 +63,8 @@ def __init__(self, inventory, variable_manager, loader, display, options, passwo self._stdout_callback = stdout_callback self._callback_plugins = [] + self.load_callbacks() + # a special flag to help us exit cleanly self._terminated = False From 5122455db833eeddc92b74c44d112c125878502b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jul 2015 13:54:31 -0400 Subject: [PATCH 1083/3617] ported missing sequence updates from 1.9 --- lib/ansible/plugins/lookup/sequence.py | 27 +++++++++++++------ .../roles/test_iterators/tasks/main.yml | 7 ++++- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/lib/ansible/plugins/lookup/sequence.py b/lib/ansible/plugins/lookup/sequence.py index 1ddeba932f8523..1e66626b68ec60 100644 --- a/lib/ansible/plugins/lookup/sequence.py +++ b/lib/ansible/plugins/lookup/sequence.py @@ -152,15 +152,26 @@ def sanity_check(self): ) elif self.count is not None: # convert count to end - self.end = self.start + self.count * self.stride - 1 + if self.count != 0: + self.end = self.start + self.count * self.stride - 1 + else: + self.start = 0 + self.end = 0 + self.stride = 0 del self.count - if self.end < self.start: - raise AnsibleError("can't count backwards") + if self.stride > 0 and self.end < self.start: + raise AnsibleError("to count backwards make stride negative") + if self.stride < 0 and self.end > self.start: + raise AnsibleError("to count forward don't make stride negative") if self.format.count('%') != 1: raise AnsibleError("bad formatting string: %s" % self.format) def generate_sequence(self): - numbers = xrange(self.start, self.end + 1, self.stride) + if self.stride > 0: + adjust = 1 + else: + adjust = -1 + numbers = xrange(self.start, self.end + adjust, self.stride) for i in numbers: try: @@ -191,13 +202,13 @@ def run(self, terms, variables, **kwargs): raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e)) self.sanity_check() - - results.extend(self.generate_sequence()) + if self.stride != 0: + results.extend(self.generate_sequence()) except AnsibleError: raise - except Exception: + except Exception as e: raise AnsibleError( - "unknown error generating sequence" + "unknown error generating sequence: %s" % e ) return results diff --git a/test/integration/roles/test_iterators/tasks/main.yml b/test/integration/roles/test_iterators/tasks/main.yml index ad55d6d6105582..b324da7932f842 100644 --- a/test/integration/roles/test_iterators/tasks/main.yml +++ b/test/integration/roles/test_iterators/tasks/main.yml @@ -81,10 +81,15 @@ with_sequence: count=0 register: count_of_zero +- name: test with_sequence count 1 + set_fact: "{{ 'x' + item }}={{ item }}" + with_sequence: count=1 + register: count_of_one + - assert: that: - count_of_zero | skipped - - not count_of_zero | failed + - not count_of_one | skipped # WITH_RANDOM_CHOICE From de98dc2968f312b5c565631a56f4bf153ccd9bec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jul 2015 14:27:52 -0400 Subject: [PATCH 1084/3617] removed 2nd load_callbacks that was causeing dupe output --- lib/ansible/executor/playbook_executor.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 91d5a69fc1fde6..cf9b6a0290227d 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -122,9 +122,6 @@ def run(self): entry['plays'].append(p) else: - # make sure the tqm has callbacks loaded - self._tqm.load_callbacks() - # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: From 720e184f88aaa82a9ffaa9aeecda8da515060dba Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Fri, 3 Jul 2015 22:27:49 +0200 Subject: [PATCH 1085/3617] implement jinja2 header overrides --- lib/ansible/template/__init__.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index 8ce243f55f22a5..f10ea22fb5bb0b 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import ast import re from jinja2 import Environment @@ -256,6 +257,17 @@ def _do_template(self, data, preserve_trailing_newlines=False, fail_on_undefined overrides = JINJA2_ALLOWED_OVERRIDES.intersection(set(overrides)) myenv = self.environment.overlay(overrides) + # Get jinja env overrides from template + if data.startswith(JINJA2_OVERRIDE): + eol = data.find('\n') + line = data[len(JINJA2_OVERRIDE):eol] + data = data[eol+1:] + for pair in line.split(','): + (key,val) = pair.split(':') + key = key.strip() + if key in JINJA2_ALLOWED_OVERRIDES: + setattr(myenv, key, ast.literal_eval(val.strip())) + #FIXME: add tests myenv.filters.update(self._get_filters()) From 4d35d8bd31ffcba41e41351065233cdfd83d0599 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jul 2015 18:59:49 -0400 Subject: [PATCH 1086/3617] properly booleanify copy field --- lib/ansible/plugins/action/unarchive.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py index e5b143e5976fe0..fca31e6b93de7b 100644 --- a/lib/ansible/plugins/action/unarchive.py +++ b/lib/ansible/plugins/action/unarchive.py @@ -22,6 +22,7 @@ import pipes from ansible.plugins.action import ActionBase +from ansible.utils.boolean import boolean class ActionModule(ActionBase): @@ -33,7 +34,7 @@ def run(self, tmp=None, task_vars=dict()): source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) - copy = self._task.args.get('copy', True) + copy = boolean(self._task.args.get('copy', True)) creates = self._task.args.get('creates', None) if source is None or dest is None: From 3831f59094871670284f206e751d4bd7f0df6624 Mon Sep 17 00:00:00 2001 From: Jens Carl Date: Fri, 3 Jul 2015 17:10:00 -0700 Subject: [PATCH 1087/3617] Update developing_modules.rst Fix typo. --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 74daba60d44aa9..affd7f067e8dae 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -484,7 +484,7 @@ Module checklist * The return structure should be consistent, even if NA/None are used for keys normally returned under other options. * Are module actions idempotent? If not document in the descriptions or the notes * Import module snippets `from ansible.module_utils.basic import *` at the bottom, conserves line numbers for debugging. -* Call your :func:`main` from a condtional so that it would be possible to +* Call your :func:`main` from a conditional so that it would be possible to test them in the future example:: if __name__ == '__main__': From 2ddd83360a8f895e12c1bc3ddea8d7dd165fba3b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 3 Jul 2015 23:52:49 -0400 Subject: [PATCH 1088/3617] Revert "removed 2nd load_callbacks that was causeing dupe output" This reverts commit de98dc2968f312b5c565631a56f4bf153ccd9bec. --- lib/ansible/executor/playbook_executor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index cf9b6a0290227d..91d5a69fc1fde6 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -122,6 +122,9 @@ def run(self): entry['plays'].append(p) else: + # make sure the tqm has callbacks loaded + self._tqm.load_callbacks() + # we are actually running plays for batch in self._get_serialized_batches(new_play): if len(batch) == 0: From a51c16515736371d8db5bdeaefe2328ddaea938b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 3 Jul 2015 23:52:59 -0400 Subject: [PATCH 1089/3617] Revert "load callbacks on init again as they did not seem to load with new call from executor" This reverts commit cf4ed9a556f06b671d19d85c8a0300c07890bf7d. --- lib/ansible/executor/task_queue_manager.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index c672f9c2a10813..cdee3f045ea21a 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -63,8 +63,6 @@ def __init__(self, inventory, variable_manager, loader, display, options, passwo self._stdout_callback = stdout_callback self._callback_plugins = [] - self.load_callbacks() - # a special flag to help us exit cleanly self._terminated = False From 67671e328aeef7c0d88ee481852b9e5ad79c3699 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Jul 2015 00:07:17 -0400 Subject: [PATCH 1090/3617] Fix callback loading issue a slightly different way --- lib/ansible/executor/task_queue_manager.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index cdee3f045ea21a..2504a179fc0c27 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -61,6 +61,8 @@ def __init__(self, inventory, variable_manager, loader, display, options, passwo self._stats = AggregateStats() self.passwords = passwords self._stdout_callback = stdout_callback + + self._callbacks_loaded = False self._callback_plugins = [] # a special flag to help us exit cleanly @@ -123,6 +125,9 @@ def load_callbacks(self): only one such callback plugin will be loaded. ''' + if self._callbacks_loaded: + return + stdout_callback_loaded = False if self._stdout_callback is None: self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK @@ -148,6 +153,8 @@ def load_callbacks(self): else: self._callback_plugins.append(callback_plugin()) + self._callbacks_loaded = True + def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): if prompt and default is not None: @@ -201,6 +208,9 @@ def run(self, play): are done with the current task). ''' + if not self._callbacks_loaded: + self.load_callbacks() + if play.vars_prompt: for var in play.vars_prompt: if 'name' not in var: From 5f791329ce2f452b99ee74b9cfca4de83ac37e0e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Jul 2015 10:23:30 -0400 Subject: [PATCH 1091/3617] now verbose mode shows config file used --- lib/ansible/cli/__init__.py | 4 +++- lib/ansible/cli/adhoc.py | 3 +++ lib/ansible/cli/doc.py | 2 ++ lib/ansible/cli/galaxy.py | 2 ++ lib/ansible/cli/playbook.py | 2 ++ lib/ansible/cli/pull.py | 2 ++ lib/ansible/cli/vault.py | 2 ++ lib/ansible/constants.py | 11 ++++++----- 8 files changed, 22 insertions(+), 6 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 6d219e54f8a8dd..534ebabd0f79a7 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -99,7 +99,9 @@ def parse(self): raise Exception("Need to implement!") def run(self): - raise Exception("Need to implement!") + + if self.options.verbosity > 0: + self.display.display("Using %s as config file" % C.CONFIG_FILE) @staticmethod def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False): diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index e940a0224f6302..cc80f38427ba2e 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -76,6 +76,9 @@ def _play_ds(self, pattern): def run(self): ''' use Runner lib to do SSH things ''' + super(AdHocCLI, self).run() + + # only thing left should be host pattern pattern = self.args[0] diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 09020b41ffeebe..72ce3c1a5e5315 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -61,6 +61,8 @@ def parse(self): def run(self): + super(DocCLI, self).run() + if self.options.module_path is not None: for i in self.options.module_path.split(os.pathsep): module_loader.add_directory(i) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index abe85e0af8e97a..2df7075918fb54 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -124,6 +124,8 @@ def parse(self): def run(self): + super(GalaxyCLI, self).run() + # if not offline, get connect to galaxy api if self.action in ("info","install") or (self.action == 'init' and not self.options.offline): api_server = self.options.api_server diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index e10ffb71d0b72e..630ba391ffff74 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -81,6 +81,8 @@ def parse(self): def run(self): + super(PlaybookCLI, self).run() + # Note: slightly wrong, this is written so that implicit localhost # Manage passwords sshpass = None diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index ff8103a1df631b..d66ceddc06e1d3 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -98,6 +98,8 @@ def parse(self): def run(self): ''' use Runner lib to do SSH things ''' + super(PullCLI, self).run() + # log command line now = datetime.datetime.now() self.display.display(now.strftime("Starting Ansible Pull at %F %T")) diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index edd054f434d3bc..cac9dc7177e314 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -70,6 +70,8 @@ def parse(self): def run(self): + super(VaultCLI, self).run() + if self.options.vault_password_file: # read vault_pass from a file self.vault_pass = read_vault_file(self.options.vault_password_file) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index a0ea2657cec67a..e001ce76ca6d9f 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -16,7 +16,7 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import (absolute_import, division) __metaclass__ = type import os @@ -26,6 +26,8 @@ from six.moves import configparser from string import ascii_letters, digits +from ansible.errors import AnsibleOptionsError + # copied from utils, avoid circular reference fun :) def mk_boolean(value): if value is None: @@ -81,9 +83,8 @@ def load_config_file(): try: p.read(path) except configparser.Error as e: - print("Error reading config file: \n{0}".format(e)) - sys.exit(1) - return p + raise AnsibleOptionsError("Error reading config file: \n{0}".format(e)) + return p, path return None def shell_expand_path(path): @@ -93,7 +94,7 @@ def shell_expand_path(path): path = os.path.expanduser(os.path.expandvars(path)) return path -p = load_config_file() +p, CONFIG_FILE = load_config_file() active_user = pwd.getpwuid(os.geteuid())[0] From 3887173c2c3a9feb3ed4a67fccc330d5ebe3ff8f Mon Sep 17 00:00:00 2001 From: Spencer Krum Date: Thu, 2 Jul 2015 15:41:12 -0700 Subject: [PATCH 1092/3617] Use cfacter instead of facter if possible CFacter is the facter replacement written in C++. It is available from the puppetlabs repo. --- lib/ansible/module_utils/facts.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index aedd028b2428f7..7b95d2e65dd65c 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -2734,12 +2734,16 @@ def get_all_facts(module): for (k, v) in facts.items(): setup_options["ansible_%s" % k.replace('-', '_')] = v - # Look for the path to the facter and ohai binary and set + # Look for the path to the facter, cfacter, and ohai binaries and set # the variable to that path. facter_path = module.get_bin_path('facter') + cfacter_path = module.get_bin_path('cfacter') ohai_path = module.get_bin_path('ohai') + # Prefer to use cfacter if available + if cfacter_path is not None: + facter_path = cfacter_path # if facter is installed, and we can use --json because # ruby-json is ALSO installed, include facter data in the JSON From 515de1e6eb55a51de957d790cf565c54ed3bcdf0 Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Sat, 4 Jul 2015 12:30:04 -0500 Subject: [PATCH 1093/3617] Be more specific describing groups of groups, Fixes #11397 --- docsite/rst/intro_inventory.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index d97032e0635dc6..3ec80c094222bd 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -106,9 +106,8 @@ Variables can also be applied to an entire group at once:: Groups of Groups, and Group Variables +++++++++++++++++++++++++++++++++++++ -It is also possible to make groups of groups and assign -variables to groups. These variables can be used by /usr/bin/ansible-playbook, but not -/usr/bin/ansible:: +It is also possible to make groups of groups using the ``:children`` suffix. Just like above, you can apply variables using ``:vars``. +These variables can be used by /usr/bin/ansible-playbook, but not /usr/bin/ansible:: [atlanta] host1 From 02aa76d5184e310702f74514988af6f00c9ee959 Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Sat, 4 Jul 2015 13:48:34 -0500 Subject: [PATCH 1094/3617] Remove docs remnant re: var use. --- docsite/rst/intro_inventory.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 3ec80c094222bd..70709890cd08f8 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -107,7 +107,6 @@ Groups of Groups, and Group Variables +++++++++++++++++++++++++++++++++++++ It is also possible to make groups of groups using the ``:children`` suffix. Just like above, you can apply variables using ``:vars``. -These variables can be used by /usr/bin/ansible-playbook, but not /usr/bin/ansible:: [atlanta] host1 From 552715f0723dcdce97d5a0f527ea51d533438b77 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 4 Jul 2015 17:58:23 -0400 Subject: [PATCH 1095/3617] added validate and backup doc fragments --- .../utils/module_docs_fragments/backup.py | 30 +++++++++++++++++++ .../utils/module_docs_fragments/validate.py | 30 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 lib/ansible/utils/module_docs_fragments/backup.py create mode 100644 lib/ansible/utils/module_docs_fragments/validate.py diff --git a/lib/ansible/utils/module_docs_fragments/backup.py b/lib/ansible/utils/module_docs_fragments/backup.py new file mode 100644 index 00000000000000..bee7182a91f195 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/backup.py @@ -0,0 +1,30 @@ +# Copyright (c) 2015 Ansible, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = ''' + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + required: false + choices: [ "yes", "no" ] + default: "no" +''' diff --git a/lib/ansible/utils/module_docs_fragments/validate.py b/lib/ansible/utils/module_docs_fragments/validate.py new file mode 100644 index 00000000000000..6b4a14b7fa2fc6 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/validate.py @@ -0,0 +1,30 @@ +# Copyright (c) 2015 Ansible, Inc +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +class ModuleDocFragment(object): + + # Standard documentation fragment + DOCUMENTATION = ''' + validate: + required: false + description: + - The validation command to run before copying into place. The path to the file to + validate is passed in via '%s' which must be present as in the apache example below. + The command is passed securely so shell features like expansion and pipes won't work. + default: None +''' From 0676157897c009676862c8de35eedd30ef133c69 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Sun, 28 Jun 2015 10:34:29 -0700 Subject: [PATCH 1096/3617] Remove unnecessary imports --- lib/ansible/cli/adhoc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index e940a0224f6302..30256d57e7d334 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -17,14 +17,13 @@ ######################################################## from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.errors import AnsibleOptionsError from ansible.executor.task_queue_manager import TaskQueueManager from ansible.inventory import Inventory from ansible.parsing import DataLoader from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.cli import CLI -from ansible.utils.display import Display from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager From 76c5be3a31eb215903fb06011a5e157520abc0fa Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Jul 2015 21:28:11 -0400 Subject: [PATCH 1097/3617] Add 'vars' to magic variables --- lib/ansible/vars/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 6531b6a3209a0e..7b0b51b35dd831 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -243,6 +243,7 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token + all_vars['vars'] = all_vars #CACHED_VARS[cache_entry] = all_vars From 53cd96befea33a73498b932904f99c9612ef2db8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Jul 2015 21:48:54 -0400 Subject: [PATCH 1098/3617] Updating unit tests to account for new magic variable 'vars' --- test/units/vars/test_variable_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py index 4371008bb9bf05..e2db28e40e5062 100644 --- a/test/units/vars/test_variable_manager.py +++ b/test/units/vars/test_variable_manager.py @@ -41,6 +41,8 @@ def test_basic_manager(self): vars = v.get_vars(loader=fake_loader, use_cache=False) if 'omit' in vars: del vars['omit'] + if 'vars' in vars: + del vars['vars'] self.assertEqual(vars, dict(playbook_dir='.')) From 388e46a485afc22b67049b92ea00bd77ff04c776 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Jul 2015 22:44:45 -0400 Subject: [PATCH 1099/3617] Backing out vars magic variable due to failed tests --- lib/ansible/vars/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 7b0b51b35dd831..47f419e73a0d69 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -243,7 +243,8 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token - all_vars['vars'] = all_vars + + #all_vars['vars'] = all_vars.copy() #CACHED_VARS[cache_entry] = all_vars From 38c5da9d2a9222aa692c32b63781916ee984a0ab Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Jul 2015 22:48:20 -0400 Subject: [PATCH 1100/3617] Revert "Backing out vars magic variable due to failed tests" This reverts commit 388e46a485afc22b67049b92ea00bd77ff04c776. --- lib/ansible/vars/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 47f419e73a0d69..7b0b51b35dd831 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -243,8 +243,7 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token - - #all_vars['vars'] = all_vars.copy() + all_vars['vars'] = all_vars #CACHED_VARS[cache_entry] = all_vars From bddadc9565e3dd3e0f98a1bb986c0ad96f743d84 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Jul 2015 23:18:54 -0400 Subject: [PATCH 1101/3617] Fix bug in relative path determination --- lib/ansible/parsing/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py index 9551343fbf4399..027691d18ea2dc 100644 --- a/lib/ansible/parsing/__init__.py +++ b/lib/ansible/parsing/__init__.py @@ -211,12 +211,12 @@ def path_dwim_relative(self, role_path, dirname, source): if os.path.exists(source2): self.set_basedir(cur_basedir) return source2 + self.set_basedir(cur_basedir) obvious_local_path = self.path_dwim(source) if os.path.exists(obvious_local_path): - self.set_basedir(cur_basedir) + #self.set_basedir(cur_basedir) return obvious_local_path - self.set_basedir(cur_basedir) - return source2 # which does not exist + return source2 From 38cc54b7177b892a8a546044b4da3c5ea4d4312f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 4 Jul 2015 23:34:07 -0400 Subject: [PATCH 1102/3617] Make 'vars' a copy to prevent recursion issues --- lib/ansible/vars/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 7b0b51b35dd831..990f3660eec334 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -243,7 +243,7 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token - all_vars['vars'] = all_vars + all_vars['vars'] = all_vars.copy() #CACHED_VARS[cache_entry] = all_vars From 9155af20e31ff0f440084255957b728c876da359 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 5 Jul 2015 01:06:54 -0400 Subject: [PATCH 1103/3617] Make sure vars in debug tasks aren't templated too early If the syntax var={{something}} is used, that can be templated too early in the post_validation, leading the debug module to fail when it tries to template the same value in turn. --- lib/ansible/executor/task_executor.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 6d23548de3936d..ae840a4de6932f 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -231,9 +231,18 @@ def _execute(self, variables=None): debug("when evaulation failed, skipping this task") return dict(changed=False, skipped=True, skip_reason='Conditional check failed') - # Now we do final validation on the task, which sets all fields to their final values + # Now we do final validation on the task, which sets all fields to their final values. + # In the case of debug tasks, we save any 'var' params and restore them after validating + # so that variables are not replaced too early. + prev_var = None + if self._task.action == 'debug' and 'var' in self._task.args: + prev_var = self._task.args.pop('var') + self._task.post_validate(templar=templar) + if prev_var is not None: + self._task.args['var'] = prev_var + # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host if self._task.action == 'include': From 82e00b1022c1547510b25514eb87540b93e165af Mon Sep 17 00:00:00 2001 From: Jon Hadfield Date: Sun, 5 Jul 2015 17:23:22 +0100 Subject: [PATCH 1104/3617] add facts for datetime 8601 basic and basic short. --- lib/ansible/module_utils/facts.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index cf75114c64e482..cc90c070afec08 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -601,6 +601,8 @@ def get_date_time_facts(self): self.facts['date_time']['time'] = now.strftime('%H:%M:%S') self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ") self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ") + self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f") + self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S") self.facts['date_time']['tz'] = time.strftime("%Z") self.facts['date_time']['tz_offset'] = time.strftime("%z") From 05be30168d123c3ffdb4f783cd24fee9c90e2d7a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 5 Jul 2015 12:50:36 -0400 Subject: [PATCH 1105/3617] return empty string when config file is not used --- lib/ansible/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index e001ce76ca6d9f..a771fe42c24e79 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -85,7 +85,7 @@ def load_config_file(): except configparser.Error as e: raise AnsibleOptionsError("Error reading config file: \n{0}".format(e)) return p, path - return None + return None, '' def shell_expand_path(path): ''' shell_expand_path is needed as os.path.expanduser does not work From 90a810e2a818be4984b35e4b0e4f04e73711c1ee Mon Sep 17 00:00:00 2001 From: Johannes Meixner Date: Sun, 5 Jul 2015 19:57:41 +0300 Subject: [PATCH 1106/3617] docsite/rst/intro_configuration.rst: reword Title. Make Configuration the first word, so that it is in line with other documents and that system administrators/devops people don't lose the tab when having many browser tabs open. --- docsite/rst/intro_configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index f8671fb5f1f253..a35ab2c8941a86 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -1,5 +1,5 @@ -The Ansible Configuration File -++++++++++++++++++++++++++++++ +Configuration file +++++++++++++++++++ .. contents:: Topics From 22a0aa016f00f38afe926f31d863aed9055e9322 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 5 Jul 2015 15:51:12 -0400 Subject: [PATCH 1107/3617] pbrun not forced to use local daemon anymore --- lib/ansible/executor/connection_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 2800e233535c20..76a4bb733a7923 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -339,7 +339,7 @@ def detect_su_prompt(data): prompt='assword:' exe = self.become_exe or 'pbrun' flags = self.become_flags or '' - becomecmd = '%s -b -l %s -u %s %s' % (exe, flags, self.become_user, success_cmd) + becomecmd = '%s -b %s -u %s %s' % (exe, flags, self.become_user, success_cmd) elif self.become_method == 'pfexec': From 6a75125f32472187c6231e84ccc9e33e6d60bb2c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 5 Jul 2015 17:24:15 -0400 Subject: [PATCH 1108/3617] now traps exceptions on display instantiation --- bin/ansible | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/ansible b/bin/ansible index 8fbc509047120b..2c8c6f3d22b1c2 100755 --- a/bin/ansible +++ b/bin/ansible @@ -43,10 +43,11 @@ from ansible.utils.display import Display if __name__ == '__main__': cli = None - display = Display() me = os.path.basename(sys.argv[0]) try: + display = Display() + if me == 'ansible-playbook': from ansible.cli.playbook import PlaybookCLI as mycli elif me == 'ansible': From f42b6237d99a9dc7398143219f9d928943fce4c8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 5 Jul 2015 17:46:51 -0400 Subject: [PATCH 1109/3617] now has display of last resort moved all display/color/err to use display.error now also capture generic exceptions if they happen (never should!) --- bin/ansible | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/bin/ansible b/bin/ansible index 2c8c6f3d22b1c2..03a50fd9438b43 100755 --- a/bin/ansible +++ b/bin/ansible @@ -18,7 +18,7 @@ # along with Ansible. If not, see . ######################################################## -from __future__ import (absolute_import) +from __future__ import (absolute_import, print_function) __metaclass__ = type __requires__ = ['ansible'] @@ -38,10 +38,17 @@ import sys from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.utils.display import Display -######################################################## +######################################## +### OUTPUT OF LAST RESORT ### +class LastResort(object): + def error(self, msg): + print(msg, file=sys.stderr) + +######################################## if __name__ == '__main__': + display = LastResort() cli = None me = os.path.basename(sys.argv[0]) @@ -70,21 +77,24 @@ if __name__ == '__main__': except AnsibleOptionsError as e: cli.parser.print_help() - display.display(str(e), stderr=True, color='red') + display.error(str(e)) sys.exit(5) except AnsibleParserError as e: - display.display(str(e), stderr=True, color='red') + display.error(str(e)) sys.exit(4) # TQM takes care of these, but leaving comment to reserve the exit codes # except AnsibleHostUnreachable as e: -# display.display(str(e), stderr=True, color='red') +# display.error(str(e)) # sys.exit(3) # except AnsibleHostFailed as e: -# display.display(str(e), stderr=True, color='red') +# display.error(str(e)) # sys.exit(2) except AnsibleError as e: - display.display(str(e), stderr=True, color='red') + display.error(str(e)) sys.exit(1) except KeyboardInterrupt: - display.error("interrupted") + display.error("User interrupted execution") sys.exit(99) + except Exception as e: + display.error("Unexpected Exception: %s" % str(e)) + sys.exit(250) From 2c9d1257ba59e01c093a901cf53a7323c56f4f85 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 5 Jul 2015 19:55:11 -0400 Subject: [PATCH 1110/3617] put type checking before looking against choices array to always get type comparrison correctly --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 62caf384ff5aa6..be9e86ce70a204 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -392,8 +392,8 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, } if not bypass_checks: self._check_required_arguments() - self._check_argument_values() self._check_argument_types() + self._check_argument_values() self._check_required_together(required_together) self._check_required_one_of(required_one_of) self._check_required_if(required_if) From 60ec726b37f5a7132b23d3cc8f52e6371fb1bae1 Mon Sep 17 00:00:00 2001 From: Hugo van Kemenade Date: Mon, 6 Jul 2015 10:21:40 +0300 Subject: [PATCH 1111/3617] Typos --- docsite/rst/intro_installation.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index 53abad4fc1e385..1bb0f49a08e5bc 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -8,8 +8,8 @@ Installation Getting Ansible ``````````````` -You may also wish to follow the `Github project `_ if -you have a github account. This is also where we keep the issue tracker for sharing +You may also wish to follow the `GitHub project `_ if +you have a GitHub account. This is also where we keep the issue tracker for sharing bugs and feature ideas. .. _what_will_be_installed: From 378c8fd5495736baf32259cb82b34de5dab29e6a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 6 Jul 2015 10:44:27 -0700 Subject: [PATCH 1112/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index ff69ce7912e2ce..abdd96ed1e966a 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit ff69ce7912e2cee53e6737e377853a49c0482b1c +Subproject commit abdd96ed1e966a290cdcdb4cb9f8d2a7c03ae59e diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 4e48ef9ecace3a..195ef57bfb254e 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 4e48ef9ecace3a6eb92e3e4d2ef1a3ea2b7e33ab +Subproject commit 195ef57bfb254e719aa7ea3a6ad30729e3036b87 From 46b33152c8748787ed2e9d0ef049a80b562d12ef Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Jul 2015 13:48:52 -0400 Subject: [PATCH 1113/3617] Check for ansible_su*_pass as well as _password Fixes #11500 --- lib/ansible/executor/connection_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 76a4bb733a7923..162cb6004d8f19 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -87,12 +87,12 @@ become_flags = ('ansible_become_flags',), sudo = ('ansible_sudo',), sudo_user = ('ansible_sudo_user',), - sudo_pass = ('ansible_sudo_password',), + sudo_pass = ('ansible_sudo_password', 'ansible_sudo_pass'), sudo_exe = ('ansible_sudo_exe',), sudo_flags = ('ansible_sudo_flags',), su = ('ansible_su',), su_user = ('ansible_su_user',), - su_pass = ('ansible_su_password',), + su_pass = ('ansible_su_password', 'ansible_su_pass'), su_exe = ('ansible_su_exe',), su_flags = ('ansible_su_flags',), ) From 1d8ccfb99f0bb3cde570cc51161ba5779fc80eb6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 6 Jul 2015 14:30:56 -0400 Subject: [PATCH 1114/3617] Fixing includes where the included file is "{{item}}" --- lib/ansible/executor/process/result.py | 6 ------ lib/ansible/playbook/included_file.py | 10 +++++++++- lib/ansible/plugins/strategies/linear.py | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 7fbee9a1b658a0..8810001702c8ff 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -142,12 +142,6 @@ def run(self): result_items = [ result._result ] for result_item in result_items: - #if 'include' in result_item: - # include_variables = result_item.get('include_variables', dict()) - # if 'item' in result_item: - # include_variables['item'] = result_item['item'] - # self._send_result(('include', result._host, result._task, result_item['include'], include_variables)) - #elif 'add_host' in result_item: if 'add_host' in result_item: # this task added a new host (add_host module) self._send_result(('add_host', result_item)) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index 74fdfbc9034382..92bf325f5b46e5 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.template import Templar + class IncludedFile: def __init__(self, filename, args, task): @@ -38,7 +40,7 @@ def __repr__(self): return "%s (%s): %s" % (self._filename, self._args, self._hosts) @staticmethod - def process_include_results(results, tqm, iterator, loader): + def process_include_results(results, tqm, iterator, loader, variable_manager): included_files = [] for res in results: @@ -62,10 +64,16 @@ def process_include_results(results, tqm, iterator, loader): else: include_file = loader.path_dwim(res._task.args.get('_raw_params')) + task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task) + #task_vars = tqm.add_tqm_variables(task_vars, play=iterator._play) + templar = Templar(loader=loader, variables=task_vars) + include_variables = include_result.get('include_variables', dict()) if 'item' in include_result: include_variables['item'] = include_result['item'] + task_vars['item'] = include_result['item'] + include_file = templar.template(include_file) inc_file = IncludedFile(include_file, include_variables, original_task) try: diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index 1ce9677f8f9b10..70ab50d8eacbe4 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -213,7 +213,7 @@ def run(self, iterator, connection_info): host_results.extend(results) try: - included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader) + included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError, e: return False From aa6486778f6b4fb3ed4380d80d2d6a3a884bdcc7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 6 Jul 2015 15:33:48 -0400 Subject: [PATCH 1115/3617] fixed become test to match new expected output --- test/units/executor/test_connection_information.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/executor/test_connection_information.py b/test/units/executor/test_connection_information.py index 9d702b77abc220..9258173f09605b 100644 --- a/test/units/executor/test_connection_information.py +++ b/test/units/executor/test_connection_information.py @@ -145,7 +145,7 @@ def test_connection_info_make_become_cmd(self): conn_info.become_method = 'pbrun' (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable="/bin/bash") - self.assertEqual(cmd, """%s -c '%s -b -l %s -u %s '"'"'echo %s; %s'"'"''""" % (default_exe, pbrun_exe, pbrun_flags, conn_info.become_user, key, default_cmd)) + self.assertEqual(cmd, """%s -c '%s -b %s -u %s '"'"'echo %s; %s'"'"''""" % (default_exe, pbrun_exe, pbrun_flags, conn_info.become_user, key, default_cmd)) conn_info.become_method = 'pfexec' (cmd, prompt, key) = conn_info.make_become_cmd(cmd=default_cmd, executable="/bin/bash") From 0cd79421557056f45995e973c6d112153dfc9e06 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 6 Jul 2015 15:42:23 -0400 Subject: [PATCH 1116/3617] removed uneeded quotes --- examples/ansible.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index f8cdd16fb231a1..4f5a35bf142624 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -169,8 +169,8 @@ fact_caching = memory [privilege_escalation] #become=True -#become_method='sudo' -#become_user='root' +#become_method=sudo +#become_user=root #become_ask_pass=False [paramiko_connection] From f44f9569e1e795fe88c8c9c5fe1000fbeeb5895a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 6 Jul 2015 13:15:11 -0700 Subject: [PATCH 1117/3617] Test unquote works as expected and fix two bugs: * escaped end quote * a single quote character --- lib/ansible/parsing/splitter.py | 2 +- test/units/parsing/test_unquote.py | 58 ++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 test/units/parsing/test_unquote.py diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index a1dc051d24c993..f2162814da83f0 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -264,7 +264,7 @@ def split_args(args): return params def is_quoted(data): - return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'") + return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\' def unquote(data): ''' removes first and last quotes from a string, if the string starts and ends with the same quotes ''' diff --git a/test/units/parsing/test_unquote.py b/test/units/parsing/test_unquote.py new file mode 100644 index 00000000000000..afb11d4e2383ea --- /dev/null +++ b/test/units/parsing/test_unquote.py @@ -0,0 +1,58 @@ +# coding: utf-8 +# (c) 2015, Toshio Kuratomi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from nose import tools +from ansible.compat.tests import unittest + +from ansible.parsing.splitter import unquote + + +# Tests using nose's test generators cannot use unittest base class. +# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators +class TestUnquote: + UNQUOTE_DATA = ( + (u'1', u'1'), + (u'\'1\'', u'1'), + (u'"1"', u'1'), + (u'"1 \'2\'"', u'1 \'2\''), + (u'\'1 "2"\'', u'1 "2"'), + (u'\'1 \'2\'\'', u'1 \'2\''), + (u'"1\\"', u'"1\\"'), + (u'\'1\\\'', u'\'1\\\''), + (u'"1 \\"2\\" 3"', u'1 \\"2\\" 3'), + (u'\'1 \\\'2\\\' 3\'', u'1 \\\'2\\\' 3'), + (u'"', u'"'), + (u'\'', u'\''), + # Not entirely sure these are good but they match the current + # behaviour + (u'"1""2"', u'1""2'), + (u'\'1\'\'2\'', u'1\'\'2'), + (u'"1" 2 "3"', u'1" 2 "3'), + (u'"1"\'2\'"3"', u'1"\'2\'"3'), + ) + + def check_unquote(self, quoted, expected): + tools.eq_(unquote(quoted), expected) + + def test_unquote(self): + for datapoint in self.UNQUOTE_DATA: + yield self.check_unquote, datapoint[0], datapoint[1] From 5b0b1f8da6d713410037584679ebe99a0ce099f7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 6 Jul 2015 14:12:10 -0700 Subject: [PATCH 1118/3617] unquote strings in the ansible config file --- lib/ansible/constants.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index a771fe42c24e79..55bfd43f133b1f 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -22,10 +22,12 @@ import os import pwd import sys +from string import ascii_letters, digits +from six import string_types from six.moves import configparser -from string import ascii_letters, digits +from ansible.parsing.splitter import unquote from ansible.errors import AnsibleOptionsError # copied from utils, avoid circular reference fun :) @@ -49,8 +51,10 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False, elif floating: value = float(value) elif islist: - if isinstance(value, basestring): + if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] + elif isinstance(value, string_types): + value = unquote(value) return value def _get_config(p, section, key, env_var, default): From 49e17b8ff67ff4d645c4ad2d0e80500d20579f8c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 6 Jul 2015 14:19:13 -0700 Subject: [PATCH 1119/3617] Get rid of an unused import so that we don't have circular imports --- lib/ansible/parsing/vault/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 27780551f44441..4892f2f0dbb8e0 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -49,7 +49,6 @@ def byte2int(bs): return ord(bs[0]) -from ansible import constants as C from ansible.utils.unicode import to_unicode, to_bytes From 8bfbe44e5b8f54596f8e556a85a1953f258a5523 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 6 Jul 2015 16:48:39 -0400 Subject: [PATCH 1120/3617] introduced non changing ansible_managed --- examples/ansible.cfg | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 4f5a35bf142624..f6b7208b2bcd20 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -83,10 +83,12 @@ timeout = 10 # if passing --private-key to ansible or ansible-playbook #private_key_file = /path/to/file -# format of string {{ ansible_managed }} available within Jinja2 +# format of string {{ ansible_managed }} available within Jinja2 # templates indicates to users editing templates files will be replaced. # replacing {file}, {host} and {uid} and strftime codes with proper values. -ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} +#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} +# This short version is better used in tempaltes as it won't flag the file as changed every run. +ansible_managed = Ansible managed: {file} on {host} # by default, ansible-playbook will display "Skipping [host]" if it determines a task # should not be run on a host. Set this to "False" if you don't want to see these "Skipping" From d74cf4677841552b804cd83ca2dd914c2b142384 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 6 Jul 2015 19:53:42 -0400 Subject: [PATCH 1121/3617] added route53_zone and some v2 features to changelog --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bc3a1a796e53d4..172f8ccbe7ff06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,17 @@ Ansible Changes By Release ## 2.0 "TBD" - ACTIVE DEVELOPMENT Major Changes: + * Introducing the new block/rescue/always directives, allow for making task blocks and introducing exception like semantics + * New stratergy plugins, allow to control the flow of execution of tasks per play, the default will be the same as before + * Improved error handling, now you get much more detailed parser messages. General exception handling and display has been revamped. + * Task includes now get evaluated during execution, end behaviour will be the same but it now allows for more dynamic includes and options. + * First feature of the more dynamic includes is that with_ loops are now usable with them. + * callback, connection and lookup plugin APIs have changed, some will require modification to work with new version + * callbacks are now shipped in the active directory and don't need to be copied, just whitelisted in ansible.cfg + * Many API changes, this will break those currently using it directly, but the new API is much easier to use and test + * Settings are now more inheritable, what you set at play, block or role will be automatically inhertited by the contained, + this allows for new feautures to automatically be settable at all levels, previouslly we had to manually code this + * Many more tests, new API makes things more testable and we took advantage of it * big_ip modules now support turning off ssl certificate validation (use only for self signed) * template code now retains types for bools and Numbers instead of turning them into strings If you need the old behaviour, quote the value and it will get passed around as a string @@ -24,6 +35,7 @@ New Modules: * amazon: elasticache_subnet_group * amazon: iam * amazon: iam_policy + * amazon: route53_zone * bundler * circonus_annotation * consul From a6aedbcc51e870cb662b5ee3f9615daa4316149e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 6 Jul 2015 23:24:00 -0400 Subject: [PATCH 1122/3617] now correctly picks up old become password host vars --- lib/ansible/executor/connection_info.py | 9 +++++++++ lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 162cb6004d8f19..fc554f577c08c8 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -165,8 +165,10 @@ def __init__(self, play=None, options=None, passwords=None): # backwards compat self.sudo_exe = None self.sudo_flags = None + self.sudo_pass = None self.su_exe = None self.su_flags = None + self.su_pass = None # general flags (should we move out?) self.verbosity = 0 @@ -295,6 +297,13 @@ def set_task_and_host_override(self, task, host): if variable_name in variables: setattr(new_info, attr, variables[variable_name]) + # become legacy updates + if not new_info.become_pass: + if new_info.become_method == 'sudo' and new_info.sudo_pass: + setattr(new_info, 'become_pass', new_info.sudo_pass) + elif new_info.become_method == 'su' and new_info.su_pass: + setattr(new_info, 'become_pass', new_info.su_pass) + return new_info def make_become_cmd(self, cmd, executable=C.DEFAULT_EXECUTABLE): diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index abdd96ed1e966a..ff69ce7912e2ce 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit abdd96ed1e966a290cdcdb4cb9f8d2a7c03ae59e +Subproject commit ff69ce7912e2cee53e6737e377853a49c0482b1c diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 195ef57bfb254e..4e48ef9ecace3a 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 195ef57bfb254e719aa7ea3a6ad30729e3036b87 +Subproject commit 4e48ef9ecace3a6eb92e3e4d2ef1a3ea2b7e33ab From 2a328ab61d25725c9a171cf21781c1712310d877 Mon Sep 17 00:00:00 2001 From: Jacek Laskowski Date: Tue, 7 Jul 2015 11:28:20 +0200 Subject: [PATCH 1123/3617] Update index.rst --- docsite/rst/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst index 26db29ab82f43c..936a485c9e482a 100644 --- a/docsite/rst/index.rst +++ b/docsite/rst/index.rst @@ -11,9 +11,9 @@ such as continuous deployments or zero downtime rolling updates. Ansible's main goals are simplicity and ease-of-use. It also has a strong focus on security and reliability, featuring a minimum of moving parts, usage of OpenSSH for transport (with an accelerated socket mode and pull modes as alternatives), and a language that is designed around auditability by humans--even those not familiar with the program. -We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all ennvironements, from small setups with a handful of instances to enterprise environments with many thousands of instances. +We believe simplicity is relevant to all sizes of environments, so we design for busy users of all types: developers, sysadmins, release engineers, IT managers, and everyone in between. Ansible is appropriate for managing all environments, from small setups with a handful of instances to enterprise environments with many thousands of instances. -Ansible manages machines in an agentless manner. There is never a question of how to +Ansible manages machines in an agent-less manner. There is never a question of how to upgrade remote daemons or the problem of not being able to manage systems because daemons are uninstalled. Because OpenSSH is one of the most peer-reviewed open source components, security exposure is greatly reduced. Ansible is decentralized--it relies on your existing OS credentials to control access to remote machines. If needed, Ansible can easily connect with Kerberos, LDAP, and other centralized authentication management systems. This documentation covers the current released version of Ansible (1.9.1) and also some development version features (2.0). For recent features, we note in each section the version of Ansible where the feature was added. From 156dab31e24ef588292b454d4ef5b4fd1f9e1257 Mon Sep 17 00:00:00 2001 From: Bruno Galindro da Costa Date: Tue, 7 Jul 2015 07:49:06 -0300 Subject: [PATCH 1124/3617] * Fix NameError: global name 'handler' is not defined * Update log message format --- plugins/callbacks/syslog_json.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/plugins/callbacks/syslog_json.py b/plugins/callbacks/syslog_json.py index 8e0b3e40916364..2e339e96aeb1c3 100644 --- a/plugins/callbacks/syslog_json.py +++ b/plugins/callbacks/syslog_json.py @@ -4,6 +4,7 @@ import logging import logging.handlers +import socket class CallbackModule(object): """ @@ -26,22 +27,23 @@ def __init__(self): os.getenv('SYSLOG_PORT',514)), facility=logging.handlers.SysLogHandler.LOG_USER ) - self.logger.addHandler(handler) + self.logger.addHandler(self.handler) + self.hostname = socket.gethostname() def on_any(self, *args, **kwargs): pass def runner_on_failed(self, host, res, ignore_errors=False): - self.logger.info('RUNNER_ON_FAILED ' + host + ' ' + json.dumps(res, sort_keys=True)) + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def runner_on_ok(self, host, res): - self.logger.info('RUNNER_ON_OK ' + host + ' ' + json.dumps(res, sort_keys=True)) + self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def runner_on_skipped(self, host, item=None): - self.logger.info('RUNNER_ON_SKIPPED ' + host + ' ...') + self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def runner_on_unreachable(self, host, res): - self.logger.info('RUNNER_UNREACHABLE ' + host + ' ' + json.dumps(res, sort_keys=True)) + self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def runner_on_no_hosts(self): pass @@ -53,7 +55,7 @@ def runner_on_async_ok(self, host, res): pass def runner_on_async_failed(self, host, res): - self.logger.info('RUNNER_SYNC_FAILED ' + host + ' ' + json.dumps(res, sort_keys=True)) + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def playbook_on_start(self): pass @@ -77,10 +79,10 @@ def playbook_on_setup(self): pass def playbook_on_import_for_host(self, host, imported_file): - self.logger.info('PLAYBOOK_ON_IMPORTED ' + host + ' ' + json.dumps(res, sort_keys=True)) + self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def playbook_on_not_import_for_host(self, host, missing_file): - self.logger.info('PLAYBOOK_ON_NOTIMPORTED ' + host + ' ' + json.dumps(res, sort_keys=True)) + self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def playbook_on_play_start(self, name): pass From 796c7accd191999ecd6ada326d9f1f693ec12895 Mon Sep 17 00:00:00 2001 From: Jacek Laskowski Date: Tue, 7 Jul 2015 14:03:46 +0200 Subject: [PATCH 1125/3617] Update intro_inventory.rst Minor fix for consistency (and more engaging language :)) --- docsite/rst/intro_inventory.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_inventory.rst b/docsite/rst/intro_inventory.rst index 70709890cd08f8..f3d8b0cdc510e2 100644 --- a/docsite/rst/intro_inventory.rst +++ b/docsite/rst/intro_inventory.rst @@ -240,7 +240,7 @@ Examples from a host file:: :doc:`intro_adhoc` Examples of basic commands :doc:`playbooks` - Learning ansible's configuration management language + Learning Ansible’s configuration, deployment, and orchestration language. `Mailing List `_ Questions? Help? Ideas? Stop by the list on Google Groups `irc.freenode.net `_ From 9bf39e78756f5c34e3d6064afb0dd2d84574e373 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 08:51:39 -0400 Subject: [PATCH 1126/3617] reversed cache check condition to actually work fixes #11505 --- plugins/inventory/vmware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inventory/vmware.py b/plugins/inventory/vmware.py index 27330b8bcdef8f..1d533a5e157645 100755 --- a/plugins/inventory/vmware.py +++ b/plugins/inventory/vmware.py @@ -115,7 +115,7 @@ def _get_cache(self, name, default=None): else: cache_max_age = 0 cache_stat = os.stat(cache_file) - if (cache_stat.st_mtime + cache_max_age) < time.time(): + if (cache_stat.st_mtime + cache_max_age) >= time.time(): with open(cache_file) as cache: return json.load(cache) return default From b7f7760f3906b2ae1625f3ffc505a5ef2d3d5626 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 08:52:46 -0400 Subject: [PATCH 1127/3617] removed unused file --- plugins/connections/README.md | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 plugins/connections/README.md diff --git a/plugins/connections/README.md b/plugins/connections/README.md deleted file mode 100644 index ec857be9e247cd..00000000000000 --- a/plugins/connections/README.md +++ /dev/null @@ -1,4 +0,0 @@ -Connections are also pluggable, see lib/ansible/runner/connection_plugins/ for the ones that ship with ansible. - -When non-core alternatives are available, they can be shared here. - From d198b18c1438cb2b92a749b00890edbffaf4d90d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 10:41:42 -0400 Subject: [PATCH 1128/3617] added win_regedit module to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 172f8ccbe7ff06..60a53b88a5c1bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -113,6 +113,7 @@ New Modules: * win_iis_webapppool * win_iis_webbinding * win_iis_website + * win_regedit * zabbix_host * zabbix_hostmacro * zabbix_screen From 50efeb13bab572bcb5a2163fd2dea2d1785d4967 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 11:59:20 -0400 Subject: [PATCH 1129/3617] made squashable with_ plugin list configurable partially deals with #11383 --- lib/ansible/constants.py | 7 +++++-- lib/ansible/executor/task_executor.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 55bfd43f133b1f..72623bd5832eba 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -167,7 +167,9 @@ def shell_expand_path(path): DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True) -# Plugin paths +# PLUGINS +DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True) +# paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins') DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins') DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins') @@ -176,7 +178,7 @@ def shell_expand_path(path): DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins') DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins') DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default') - +# cache CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory') CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None) CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts') @@ -205,6 +207,7 @@ def shell_expand_path(path): ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) + # obsolete -- will be formally removed ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True) ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index ae840a4de6932f..26d3f8cb69806d 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -50,7 +50,7 @@ class TaskExecutor: # Modules that we optimize by squashing loop items into a single call to # the module - SQUASH_ACTIONS = frozenset(('apt', 'yum', 'pkgng', 'zypper', 'dnf')) + SQUASH_ACTIONS = frozenset(C.DEFAULT_SQUASH_ACTIONS) def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, shared_loader_obj): self._host = host From 314bae2a9e26edb42e57aca6ffb4e9e6e1641351 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 7 Jul 2015 09:31:00 -0700 Subject: [PATCH 1130/3617] Don't wrap text for AnsibleParserError This allows not messing up the wonderful error reporting that is carefully created. Instead of: $ ansible-playbook foo.yml [ERROR]: ERROR! 'foo' is not a valid attribute for a Task The error appears to have been in '/Users/marca/dev/git-repos/ansible/foo.yml': line 4, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: tasks: - name: do something ^ here we get: $ ansible-playbook foo.yml ERROR! 'foo' is not a valid attribute for a Task The error appears to have been in '/Users/marca/dev/git-repos/ansible/foo.yml': line 4, column 7, but may be elsewhere in the file depending on the exact syntax problem. The offending line appears to be: tasks: - name: do something ^ here which is much nicer. --- bin/ansible | 2 +- lib/ansible/utils/display.py | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/bin/ansible b/bin/ansible index 03a50fd9438b43..d64c069251205d 100755 --- a/bin/ansible +++ b/bin/ansible @@ -80,7 +80,7 @@ if __name__ == '__main__': display.error(str(e)) sys.exit(5) except AnsibleParserError as e: - display.error(str(e)) + display.error(str(e), wrap_text=False) sys.exit(4) # TQM takes care of these, but leaving comment to reserve the exit codes # except AnsibleHostUnreachable as e: diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 6c5e850a700cba..ab3a06a5ed3c38 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -182,10 +182,13 @@ def banner_cowsay(self, msg, color=None): (out, err) = cmd.communicate() self.display("%s\n" % out, color=color) - def error(self, msg): - new_msg = "\n[ERROR]: %s" % msg - wrapped = textwrap.wrap(new_msg, 79) - new_msg = "\n".join(wrapped) + "\n" + def error(self, msg, wrap_text=True): + if wrap_text: + new_msg = "\n[ERROR]: %s" % msg + wrapped = textwrap.wrap(new_msg, 79) + new_msg = "\n".join(wrapped) + "\n" + else: + new_msg = msg if new_msg not in self._errors: self.display(new_msg, color='red', stderr=True) self._errors[new_msg] = 1 From 08fcd8233178c896b3516f9354f637da6f2d6191 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 12:39:11 -0400 Subject: [PATCH 1131/3617] added os_security_group_rule to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60a53b88a5c1bb..31ae1f80ef14af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ New Modules: * openstack: os_network * openstack: os_object * openstack: os_security_group + * openstack: os_security_group_rule * openstack: os_server * openstack: os_server_actions * openstack: os_server_facts From 135fa41e3a50066720ecfbfaf1e648072b0171f2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 7 Jul 2015 10:54:36 -0700 Subject: [PATCH 1132/3617] Update submodules refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index ff69ce7912e2ce..8257053756766a 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit ff69ce7912e2cee53e6737e377853a49c0482b1c +Subproject commit 8257053756766ad52b43e22e413343b0fedf7e69 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 4e48ef9ecace3a..639902ff2081aa 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 4e48ef9ecace3a6eb92e3e4d2ef1a3ea2b7e33ab +Subproject commit 639902ff2081aa7f90e051878a3abf3f1a67eac4 From 614c626ed0b7fb7913904cfe26dc001022a35d38 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Jul 2015 14:19:49 -0400 Subject: [PATCH 1133/3617] Fix no hosts remaining logic in linear strategy --- lib/ansible/plugins/strategies/linear.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index 70ab50d8eacbe4..3d14f2d49b4b63 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -130,14 +130,8 @@ def run(self, iterator, connection_info): try: debug("getting the remaining hosts for this loop") - self._tqm._failed_hosts = iterator.get_failed_hosts() - hosts_left = self.get_hosts_remaining(iterator._play) + hosts_left = self._inventory.get_hosts(iterator._play.hosts) debug("done getting the remaining hosts for this loop") - if len(hosts_left) == 0: - debug("out of hosts to run on") - self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') - result = False - break # queue up this task for each host in the inventory callback_sent = False @@ -145,6 +139,7 @@ def run(self, iterator, connection_info): host_results = [] host_tasks = self._get_next_task_lockstep(hosts_left, iterator) + for (host, task) in host_tasks: if not task: continue @@ -208,6 +203,12 @@ def run(self, iterator, connection_info): if run_once: break + if not work_to_do: + debug("out of hosts to run on") + self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') + result = False + break + debug("done queuing things up, now waiting for results queue to drain") results = self._wait_on_pending_results(iterator) host_results.extend(results) From 6d50a261c590c61320c4762b5a5f706cb9620ee5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Jul 2015 14:31:15 -0400 Subject: [PATCH 1134/3617] Allow full exception tracebacks to be displayed with increased verbosity --- bin/ansible | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/bin/ansible b/bin/ansible index d64c069251205d..3a17861cebec4a 100755 --- a/bin/ansible +++ b/bin/ansible @@ -34,6 +34,7 @@ except Exception: import os import sys +import traceback from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError from ansible.utils.display import Display @@ -41,9 +42,11 @@ from ansible.utils.display import Display ######################################## ### OUTPUT OF LAST RESORT ### class LastResort(object): - def error(self, msg): + def display(self, msg): print(msg, file=sys.stderr) + error = display + ######################################## if __name__ == '__main__': @@ -96,5 +99,10 @@ if __name__ == '__main__': display.error("User interrupted execution") sys.exit(99) except Exception as e: + have_cli_options = cli is not None and cli.options is not None display.error("Unexpected Exception: %s" % str(e)) + if not have_cli_options or have_cli_options and cli.options.verbosity > 2: + display.display("the full traceback was:\n\n%s" % traceback.format_exc()) + else: + display.display("to see the full traceback, use -vvv") sys.exit(250) From 49a148056c86a5ef047a3004a7a0190349adef2b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 7 Jul 2015 12:05:07 -0700 Subject: [PATCH 1135/3617] Ensure that we're dealing with byte str when we print or log messages --- lib/ansible/utils/display.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index ab3a06a5ed3c38..a9a4f8bb50a520 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -28,6 +28,7 @@ from ansible import constants as C from ansible.errors import AnsibleError from ansible.utils.color import stringc +from ansible.utils.unicode import to_bytes class Display: @@ -70,25 +71,21 @@ def display(self, msg, color=None, stderr=False, screen_only=False, log_only=Fal if color: msg2 = stringc(msg, color) if not log_only: + b_msg2 = to_bytes(msg2) if not stderr: - try: - print(msg2) - except UnicodeEncodeError: - print(msg2.encode('utf-8')) + print(b_msg2) else: - try: - print(msg2, file=sys.stderr) - except UnicodeEncodeError: - print(msg2.encode('utf-8'), file=sys.stderr) + print(b_msg2, file=sys.stderr) if C.DEFAULT_LOG_PATH != '': while msg.startswith("\n"): msg = msg.replace("\n","") + b_msg = to_bytes(msg) # FIXME: logger stuff needs to be implemented #if not screen_only: # if color == 'red': - # logger.error(msg) + # logger.error(b_msg) # else: - # logger.info(msg) + # logger.info(b_msg) def vv(self, msg, host=None): return self.verbose(msg, host=host, caplevel=1) From 688088547b80f74708afbcb5066be75fe3f3ab2a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 15:58:18 -0400 Subject: [PATCH 1136/3617] new human_readable filter to transform bits and bytes into cake (not really) --- lib/ansible/plugins/filter/mathstuff.py | 29 +++++++++++++++++++ .../roles/test_filters/tasks/main.yml | 8 +++++ 2 files changed, 37 insertions(+) diff --git a/lib/ansible/plugins/filter/mathstuff.py b/lib/ansible/plugins/filter/mathstuff.py index c6a49485a40bfd..516ef1c67748aa 100644 --- a/lib/ansible/plugins/filter/mathstuff.py +++ b/lib/ansible/plugins/filter/mathstuff.py @@ -101,6 +101,32 @@ def inversepower(x, base=2): raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e)) +def human_readable(size, isbits=False, unit=None): + + base = 'bits' if isbits else 'Bytes' + suffix = '' + + ranges = ( + (1<<70L, 'Z'), + (1<<60L, 'E'), + (1<<50L, 'P'), + (1<<40L, 'T'), + (1<<30L, 'G'), + (1<<20L, 'M'), + (1<<10L, 'K'), + (1, base) + ) + + for limit, suffix in ranges: + if (unit is None and size >= limit) or \ + unit is not None and unit.upper() == suffix: + break + + if limit != 1: + suffix += base[0] + + return '%.2f %s' % (float(size)/ limit, suffix) + class FilterModule(object): ''' Ansible math jinja2 filters ''' @@ -123,4 +149,7 @@ def filters(self): 'symmetric_difference': symmetric_difference, 'union': union, + # computer theory + 'human_readable' : human_readable, + } diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index 3d1ee322e30e96..e0a2281501782d 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -41,3 +41,11 @@ that: - 'diff_result.stdout == ""' +- name: Verify human_readable + assert: + that: + - '"10.00 KB" == 10240|human_readable' + - '"97.66 MB" == 102400000|human_readable' + - '"0.10 GB" == 102400000|human_readable(unit="G")' + - '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")' + From 293dd38d05e53570fe394e646167ae4449c5aa94 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Jul 2015 15:47:51 -0400 Subject: [PATCH 1137/3617] Correctly handle assigning results to the delegated to host --- lib/ansible/executor/process/result.py | 5 +-- lib/ansible/plugins/strategies/__init__.py | 38 +++++++++++++++------- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 8810001702c8ff..8bf0fa34acee27 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -150,11 +150,12 @@ def run(self): self._send_result(('add_group', result._host, result_item)) elif 'ansible_facts' in result_item: # if this task is registering facts, do that now + item = result_item.get('item', None) if result._task.action in ('set_fact', 'include_vars'): for (key, value) in result_item['ansible_facts'].iteritems(): - self._send_result(('set_host_var', result._host, key, value)) + self._send_result(('set_host_var', result._host, result._task, item, key, value)) else: - self._send_result(('set_host_facts', result._host, result_item['ansible_facts'])) + self._send_result(('set_host_facts', result._host, result._task, item, result_item['ansible_facts'])) # finally, send the ok for this task self._send_result(('host_task_ok', result)) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index a298b199889a2b..9173a2f3784bb0 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -30,6 +30,7 @@ from ansible.playbook.helpers import load_list_of_blocks from ansible.playbook.role import ROLE_CACHE, hash_params from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader +from ansible.template import Templar from ansible.utils.debug import debug @@ -222,16 +223,31 @@ def _process_pending_results(self, iterator): if host not in self._notified_handlers[handler_name]: self._notified_handlers[handler_name].append(host) - elif result[0] == 'set_host_var': - host = result[1] - var_name = result[2] - var_value = result[3] - self._variable_manager.set_host_variable(host, var_name, var_value) - - elif result[0] == 'set_host_facts': - host = result[1] - facts = result[2] - self._variable_manager.set_host_facts(host, facts) + elif result[0] in ('set_host_var', 'set_host_facts'): + host = result[1] + task = result[2] + item = result[3] + + if task.delegate_to is not None: + task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) + task_vars = self.add_tqm_variables(task_vars, play=iterator._play) + if item is not None: + task_vars['item'] = item + templar = Templar(loader=self._loader, variables=task_vars) + host_name = templar.template(task.delegate_to) + target_host = self._inventory.get_host(host_name) + if target_host is None: + target_host = Host(name=host_name) + else: + target_host = host + + if result[0] == 'set_host_var': + var_name = result[4] + var_value = result[5] + self._variable_manager.set_host_variable(target_host, var_name, var_value) + elif result[0] == 'set_host_facts': + facts = result[4] + self._variable_manager.set_host_facts(target_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) @@ -267,7 +283,7 @@ def _add_host(self, host_info): if host_name in self._inventory._hosts_cache: new_host = self._inventory._hosts_cache[host_name] else: - new_host = Host(host_name) + new_host = Host(name=host_name) self._inventory._hosts_cache[host_name] = new_host allgroup = self._inventory.get_group('all') From da307c8bfdfdb4dbd073bef97a72cb78c23ff879 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Jul 2015 16:09:11 -0400 Subject: [PATCH 1138/3617] Fix bug in logic introduced in 614c626 --- lib/ansible/plugins/strategies/linear.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py index 3d14f2d49b4b63..23c1eec0494838 100644 --- a/lib/ansible/plugins/strategies/linear.py +++ b/lib/ansible/plugins/strategies/linear.py @@ -203,16 +203,16 @@ def run(self, iterator, connection_info): if run_once: break - if not work_to_do: + debug("done queuing things up, now waiting for results queue to drain") + results = self._wait_on_pending_results(iterator) + host_results.extend(results) + + if not work_to_do and len(iterator.get_failed_hosts()) > 0: debug("out of hosts to run on") self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break - debug("done queuing things up, now waiting for results queue to drain") - results = self._wait_on_pending_results(iterator) - host_results.extend(results) - try: included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) except AnsibleError, e: From bfbb88b4a96ba66eb39cb4aeac5053c0c195f7c6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Jul 2015 16:26:24 -0400 Subject: [PATCH 1139/3617] Fix strategy plugin unit tests related to earlier changes --- test/units/plugins/strategies/test_strategy_base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 5298b1e42bff79..28f1d254391cb6 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -261,12 +261,12 @@ def _get_group(group_name): self.assertIn('test handler', strategy_base._notified_handlers) self.assertIn(mock_host, strategy_base._notified_handlers['test handler']) - queue_items.append(('set_host_var', mock_host, 'foo', 'bar')) + queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar')) results = strategy_base._process_pending_results(iterator=mock_iterator) self.assertEqual(len(results), 0) self.assertEqual(strategy_base._pending_results, 1) - queue_items.append(('set_host_facts', mock_host, 'foo', dict())) + queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict())) results = strategy_base._process_pending_results(iterator=mock_iterator) self.assertEqual(len(results), 0) self.assertEqual(strategy_base._pending_results, 1) From f67949e42c3db5a0c6c242eecdd963f78cbfeb4d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Jul 2015 16:48:19 -0400 Subject: [PATCH 1140/3617] Readd logic for ansible_managed to template action plugin Fixes #11317 --- lib/ansible/plugins/action/template.py | 32 ++++++++++++++++++++++++++ lib/ansible/vars/__init__.py | 2 ++ 2 files changed, 34 insertions(+) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 54520b2f7e6cbb..b8346cb6f9eb9e 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -18,10 +18,14 @@ __metaclass__ = type import base64 +import datetime import os +import time +from ansible import constants as C from ansible.plugins.action import ActionBase from ansible.utils.hashing import checksum_s +from ansible.utils.unicode import to_bytes class ActionModule(ActionBase): @@ -97,7 +101,35 @@ def run(self, tmp=None, task_vars=dict()): try: with open(source, 'r') as f: template_data = f.read() + + try: + template_uid = pwd.getpwuid(os.stat(source).st_uid).pw_name + except: + template_uid = os.stat(source).st_uid + + vars = task_vars.copy() + vars['template_host'] = os.uname()[1] + vars['template_path'] = source + vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(source)) + vars['template_uid'] = template_uid + vars['template_fullpath'] = os.path.abspath(source) + vars['template_run_date'] = datetime.datetime.now() + + managed_default = C.DEFAULT_MANAGED_STR + managed_str = managed_default.format( + host = vars['template_host'], + uid = vars['template_uid'], + file = to_bytes(vars['template_path']) + ) + vars['ansible_managed'] = time.strftime( + managed_str, + time.localtime(os.path.getmtime(source)) + ) + + old_vars = self._templar._available_variables + self._templar.set_available_variables(vars) resultant = self._templar.template(template_data, preserve_trailing_newlines=True) + self._templar.set_available_variables(old_vars) except Exception as e: return dict(failed=True, msg=type(e).__name__ + ": " + str(e)) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 990f3660eec334..740f8912fbe53a 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -243,6 +243,8 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token + + # make vars self referential, so people can do things like 'vars[var_name]' all_vars['vars'] = all_vars.copy() #CACHED_VARS[cache_entry] = all_vars From 2962047b438e46e874efa3bec846eeb60e0b89e8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 17:55:17 -0400 Subject: [PATCH 1141/3617] ported 1.9.2 changelog into devel --- CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31ae1f80ef14af..bb0d59fdd9ab8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -126,6 +126,34 @@ New Inventory scripts: Other Notable Changes: +## 1.9.2 "Dancing In the Street" - Jun 26, 2015 + +* Security fixes to check that hostnames match certificates with https urls (CVE-2015-3908) + - get_url and uri modules + - url and etcd lookup plugins +* Security fixes to the zone (Solaris containers), jail (bsd containers), + and chroot connection plugins. These plugins can be used to connect to + their respective container types in leiu of the standard ssh connection. + Prior to this fix being applied these connection plugins didn't properly + handle symlinks within the containers which could lead to files intended to + be written to or read from the container being written to or read from the + host system instead. (CVE pending) +* Fixed a bug in the service module where init scripts were being incorrectly used instead of upstart/systemd. +* Fixed a bug where sudo/su settings were not inherited from ansible.cfg correctly. +* Fixed a bug in the rds module where a traceback may occur due to an unbound variable. +* Fixed a bug where certain remote file systems where the SELinux context was not being properly set. +* Re-enabled several windows modules which had been partially merged (via action plugins): + - win_copy.ps1 + - win_copy.py + - win_file.ps1 + - win_file.py + - win_template.py +* Fix bug using with_sequence and a count that is zero. Also allows counting backwards isntead of forwards +* Fix get_url module bug preventing use of custom ports with https urls +* Fix bug disabling repositories in the yum module. +* Fix giving yum module a url to install a package from on RHEL/CENTOS5 +* Fix bug in dnf module preventing it from working when yum-utils was not already installed + ## 1.9.1 "Dancing In the Street" - Apr 27, 2015 * Fixed a bug related to Kerberos auth when using winrm with a domain account. From ec145a61afa749315684c81d3ebdea95c748182b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 19:44:35 -0400 Subject: [PATCH 1142/3617] added os_floating_ip module and deprecated quantum_open_ip in changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb0d59fdd9ab8b..f4f3fdaa0f054e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ Deprecated Modules (new ones in parens): * quantum_network (os_network) * glance_image * nova_compute (os_server) + * quantum_floating_ip (os_floating_ip) New Modules: * amazon: ec2_ami_copy @@ -67,6 +68,7 @@ New Modules: * openstack: os_ironic * openstack: os_ironic_node * openstack: os_client_config + * openstack: os_floating_ip * openstack: os_image * openstack: os_network * openstack: os_object From 48827a31bc7694a3f9bef2c20547034ba85ed696 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 7 Jul 2015 20:11:42 -0400 Subject: [PATCH 1143/3617] added minimal testing for special template vars --- test/integration/non_destructive.yml | 1 + .../roles/test_special_vars/meta/main.yml | 3 ++ .../roles/test_special_vars/tasks/main.yml | 37 +++++++++++++++++++ .../roles/test_special_vars/templates/foo.j2 | 7 ++++ .../roles/test_special_vars/vars/main.yml | 0 5 files changed, 48 insertions(+) create mode 100644 test/integration/roles/test_special_vars/meta/main.yml create mode 100644 test/integration/roles/test_special_vars/tasks/main.yml create mode 100644 test/integration/roles/test_special_vars/templates/foo.j2 create mode 100644 test/integration/roles/test_special_vars/vars/main.yml diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index 0c4c5be49651a4..1ce0724d7df2d2 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -14,6 +14,7 @@ - { role: test_copy, tags: test_copy } - { role: test_stat, tags: test_stat } - { role: test_template, tags: test_template } + - { role: test_special_vars, tags: test_special_vars } - { role: test_file, tags: test_file } - { role: test_fetch, tags: test_fetch } - { role: test_synchronize, tags: test_synchronize } diff --git a/test/integration/roles/test_special_vars/meta/main.yml b/test/integration/roles/test_special_vars/meta/main.yml new file mode 100644 index 00000000000000..a8b63dfdf266ab --- /dev/null +++ b/test/integration/roles/test_special_vars/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + diff --git a/test/integration/roles/test_special_vars/tasks/main.yml b/test/integration/roles/test_special_vars/tasks/main.yml new file mode 100644 index 00000000000000..653bf7b905517c --- /dev/null +++ b/test/integration/roles/test_special_vars/tasks/main.yml @@ -0,0 +1,37 @@ +# test code for the template module +# (c) 2015, Brian Coca + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +- name: veryfiy ansible_managed + template: src=foo.j2 dest={{output_dir}}/special_vars.yaml + +- name: read the file into facts + include_vars: "{{output_dir}}/special_vars.yaml" + + +- name: veriy all test vars are defined + assert: + that: + - 'item in hostvars[inventory_hostname].keys()' + with_items: + - test_template_host + - test_template_path + - test_template_mtime + - test_template_uid + - test_template_fullpath + - test_template_run_date + - test_ansible_managed diff --git a/test/integration/roles/test_special_vars/templates/foo.j2 b/test/integration/roles/test_special_vars/templates/foo.j2 new file mode 100644 index 00000000000000..0f6db2a16629b6 --- /dev/null +++ b/test/integration/roles/test_special_vars/templates/foo.j2 @@ -0,0 +1,7 @@ +test_template_host: "{{template_host}}" +test_template_path: "{{template_path}}" +test_template_mtime: "{{template_mtime}}" +test_template_uid: "{{template_uid}}" +test_template_fullpath: "{{template_fullpath}}" +test_template_run_date: "{{template_run_date}}" +test_ansible_managed: "{{ansible_managed}}" diff --git a/test/integration/roles/test_special_vars/vars/main.yml b/test/integration/roles/test_special_vars/vars/main.yml new file mode 100644 index 00000000000000..e69de29bb2d1d6 From 2e5dfd57cc9c1a806a0ac3a23f8036f6f32127af Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 7 Jul 2015 21:46:44 -0400 Subject: [PATCH 1144/3617] Clear flag indicating role had run before each play is run Fixes #11514 --- lib/ansible/executor/playbook_executor.py | 5 +++++ lib/ansible/playbook/role/__init__.py | 6 +++++- lib/ansible/plugins/strategies/__init__.py | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 91d5a69fc1fde6..1a7301992b1251 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -25,6 +25,7 @@ from ansible.errors import * from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook +from ansible.playbook.role import role_reset_has_run from ansible.plugins import module_loader from ansible.template import Templar @@ -83,6 +84,10 @@ def run(self): self._display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: + # clear out the flag on all roles indicating they had any tasks run + role_reset_has_run() + + # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() # Create a temporary copy of the play here, so we can run post_validate diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index c84f0f8677576c..120b851ccf38c6 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -41,7 +41,7 @@ from ansible.utils.vars import combine_vars -__all__ = ['Role', 'ROLE_CACHE', 'hash_params'] +__all__ = ['Role', 'ROLE_CACHE', 'hash_params', 'role_reset_has_run'] # FIXME: this should be a utility function, but can't be a member of # the role due to the fact that it would require the use of self @@ -70,6 +70,10 @@ def hash_params(params): # will be based on the repr() of the dictionary object) ROLE_CACHE = dict() +def role_reset_has_run(): + for (role_name, cached_roles) in ROLE_CACHE.iteritems(): + for (hashed_params, role) in cached_roles.iteritems(): + role._had_task_run = False class Role(Base, Become, Conditional, Taggable): diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 9173a2f3784bb0..0452a7616dd432 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -195,7 +195,7 @@ def _process_pending_results(self, iterator): # with the correct object and mark it as executed for (entry, role_obj) in ROLE_CACHE[task_result._task._role._role_name].iteritems(): hashed_entry = hash_params(task_result._task._role._role_params) - if entry == hashed_entry : + if entry == hashed_entry: role_obj._had_task_run = True ret_results.append(task_result) From 8f0496d7ceb3b19f5948ee28f091e768cafdaeee Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 8 Jul 2015 09:15:55 -0400 Subject: [PATCH 1145/3617] Fix usage of set_host_var when registering a result var Fixes #11521 --- lib/ansible/executor/process/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 8bf0fa34acee27..4041021b164b27 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -107,7 +107,7 @@ def run(self): # if this task is registering a result, do it now if result._task.register: - self._send_result(('set_host_var', result._host, result._task.register, result._result)) + self._send_result(('set_host_var', result._host, result._task, None, result._task.register, result._result)) # send callbacks, execute other options based on the result status # FIXME: this should all be cleaned up and probably moved to a sub-function. From 44d302ee662594a9da0c43d3edcfbee0ab612abe Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 10:11:43 -0400 Subject: [PATCH 1146/3617] for ansibot compensation --- ticket_stubs/needs_template.md | 36 ++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 ticket_stubs/needs_template.md diff --git a/ticket_stubs/needs_template.md b/ticket_stubs/needs_template.md new file mode 100644 index 00000000000000..894532b5e77b25 --- /dev/null +++ b/ticket_stubs/needs_template.md @@ -0,0 +1,36 @@ +Can You Help Us Out? +==================== + +Thanks for filing a ticket! I am the friendly GitHub Ansibot. + +It looks like you might not have filled out the issue description based on our standard issue template. You might not have known about that, and that's ok too, we'll tell you how to do it. + +We have a standard template because Ansible is a really busy project and it helps to have some standard information in each ticket, and GitHub doesn't yet provide a standard facility to do this like some other bug trackers. We hope you understand as this is really valuable to us!. + +Solving this is simple: please copy the contents of this [template](https://raw.githubusercontent.com/ansible/ansible/devel/ISSUE_TEMPLATE.md) and **paste it into the description** of your ticket. That's it! + +If You Had A Question To Ask Instead +==================================== + +If you happened to have a "how do I do this in Ansible" type of question, that's probably more of a user-list question than a bug report, and you should probably ask this question on the project mailing list instead. + +However, if you think you have a bug, the report is the way to go! We definitely want all the bugs filed :) Just trying to help! + +About Priority Tags +=================== + +Since you're here, we'll also share some useful information at this time. + +In general tickets will be assigned a priority between P1 (highest) and P5, and then worked in priority order. We may also have some follow up questions along the way, so keeping up with follow up comments via GitHub notifications is a good idea. + +Due to large interest in Ansible, humans may not comment on your ticket immediately. + +Mailing Lists +============= + +If you have concerns or questions, you're welcome to stop by the ansible-project or ansible-development mailing lists, as appropriate. Here are the links: + + * https://groups.google.com/forum/#!forum/ansible-project - for discussion of bugs and how-to type questions + * https://groups.google.com/forum/#!forum/ansible-devel - for discussion on how to implement a code change, or feature brainstorming among developers + +Thanks again for the interest in Ansible! From 79394f5c8fa293bb326853f00075b94ec8af8e5f Mon Sep 17 00:00:00 2001 From: marconius Date: Wed, 1 Jul 2015 01:48:19 -0400 Subject: [PATCH 1147/3617] Added tests for `taggable` module --- test/units/playbook/test_playbook.py | 1 - test/units/playbook/test_taggable.py | 104 +++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 test/units/playbook/test_taggable.py diff --git a/test/units/playbook/test_playbook.py b/test/units/playbook/test_playbook.py index 97307c4b272319..454aa9a540be60 100644 --- a/test/units/playbook/test_playbook.py +++ b/test/units/playbook/test_playbook.py @@ -66,4 +66,3 @@ def test_bad_playbook_files(self): vm = VariableManager() self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader) self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader) - diff --git a/test/units/playbook/test_taggable.py b/test/units/playbook/test_taggable.py new file mode 100644 index 00000000000000..501136741a6416 --- /dev/null +++ b/test/units/playbook/test_taggable.py @@ -0,0 +1,104 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.compat.tests import unittest +from ansible.playbook.taggable import Taggable +from units.mock.loader import DictDataLoader + +class TaggableTestObj(Taggable): + + def __init__(self): + self._loader = DictDataLoader({}) + self.tags = [] + + +class TestTaggable(unittest.TestCase): + + def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags): + taggable_obj = TaggableTestObj() + taggable_obj.tags = tags + + evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {}) + + self.assertEqual(test_value, evaluate) + + def test_evaluate_tags_tag_in_only_tags(self): + self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], []) + + def test_evaluate_tags_tag_in_skip_tags(self): + self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1']) + + def test_evaluate_tags_special_always_in_object_tags(self): + self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], []) + + def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self): + self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag']) + + def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self): + self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always']) + + def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self): + self.assert_evaluate_equal(True, ['tag'], ['tagged'], []) + + def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self): + self.assert_evaluate_equal(False, [], ['tagged'], []) + + def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self): + self.assert_evaluate_equal(False, ['tag'], [], ['tagged']) + + def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self): + self.assert_evaluate_equal(True, [], [], ['tagged']) + + def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self): + self.assert_evaluate_equal(False, ['tag'], ['untagged'], []) + + def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self): + self.assert_evaluate_equal(True, [], ['untagged'], []) + + def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self): + self.assert_evaluate_equal(True, ['tag'], [], ['untagged']) + + def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self): + self.assert_evaluate_equal(False, [], [], ['untagged']) + + def test_evaluate_tags_special_all_in_only_tags(self): + self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged']) + + def test_evaluate_tags_special_all_in_skip_tags(self): + self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all']) + + def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self): + self.assert_evaluate_equal(False, ['tag'], ['all'], ['all']) + + def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self): + self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all']) + + def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self): + self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always']) + + def test_evaluate_tags_accepts_lists(self): + self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], []) + + def test_evaluate_tags_accepts_strings(self): + self.assert_evaluate_equal(True, 'tag1,tag2', ['tag2'], []) + + def test_evaluate_tags_with_repeated_tags(self): + self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag']) From ddac6fa9f30eeb2a2280c9f49f33410253d1c48c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 8 Jul 2015 08:58:07 -0700 Subject: [PATCH 1148/3617] Update exception handling to be python3 compat --- lib/ansible/parsing/vault/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 4892f2f0dbb8e0..7a2bd378c11400 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -378,7 +378,7 @@ def __init__(self, password, filename): raise errors.AnsibleError("%s does not exist" % self.filename) try: self.filehandle = open(filename, "rb") - except Exception, e: + except Exception as e: raise errors.AnsibleError("Could not open %s: %s" % (self.filename, str(e))) _, self.tmpfile = tempfile.mkstemp() From 64a1b1e043d2388f756cb5ee9fe77819057b1931 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 8 Jul 2015 12:18:59 -0400 Subject: [PATCH 1149/3617] Fix first_available_file: support for copy and template actions --- lib/ansible/plugins/action/copy.py | 2 +- lib/ansible/plugins/action/template.py | 2 +- test/integration/roles/test_template/tasks/main.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 9a984f03a5e311..7f11dfda2f303e 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -43,7 +43,7 @@ def run(self, tmp=None, task_vars=dict()): dest = self._task.args.get('dest', None) raw = boolean(self._task.args.get('raw', 'no')) force = boolean(self._task.args.get('force', 'yes')) - faf = task_vars.get('first_available_file', None) + faf = self._task.first_available_file if (source is None and content is None and faf is None) or dest is None: return dict(failed=True, msg="src (or content) and dest are required") diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index b8346cb6f9eb9e..c13dc32b8a7613 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -55,7 +55,7 @@ def run(self, tmp=None, task_vars=dict()): source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) - faf = task_vars.get('first_available_file', None) + faf = self._task.first_available_file if (source is None and faf is not None) or dest is None: return dict(failed=True, msg="src and dest are required") diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index a35b93d9d924a8..acb6ae91340771 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -44,7 +44,7 @@ - name: check what python version ansible is running on command: python -c 'import distutils.sysconfig ; print(distutils.sysconfig.get_python_version())' register: pyver - delegate_to: localhost + #delegate_to: localhost - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt From f5baad4fb2e737cde02f2a89f0c9e12e5cca1b0b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 8 Jul 2015 12:23:19 -0400 Subject: [PATCH 1150/3617] Removing unicode --start-at-task test for now as we haven't added that back into devel --- test/integration/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index 561751456f7082..69416b1658c078 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -38,7 +38,7 @@ includes: unicode: ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v $(TEST_FLAGS) -e 'extra_var=café' # Test the start-at-task flag #9571 - ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --start-at-task '*¶' -e 'start_at_task=True' $(TEST_FLAGS) + #ansible-playbook unicode.yml -i $(INVENTORY) -e @$(VARS_FILE) -v --start-at-task '*¶' -e 'start_at_task=True' $(TEST_FLAGS) test_templating_settings: ansible-playbook test_templating_settings.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) From 27fcf1a4b53631daf12c8cea1c5c9d99487c2a21 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 8 Jul 2015 12:38:24 -0400 Subject: [PATCH 1151/3617] Fix bug in registered variables related to delegate_to changes --- lib/ansible/executor/process/result.py | 2 +- lib/ansible/plugins/strategies/__init__.py | 8 ++++++++ test/integration/roles/test_template/tasks/main.yml | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 4041021b164b27..0fb06c9b3a2f8a 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -107,7 +107,7 @@ def run(self): # if this task is registering a result, do it now if result._task.register: - self._send_result(('set_host_var', result._host, result._task, None, result._task.register, result._result)) + self._send_result(('register_host_var', result._host, result._task.register, result._result)) # send callbacks, execute other options based on the result status # FIXME: this should all be cleaned up and probably moved to a sub-function. diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 0452a7616dd432..aff1eadd3b6a1b 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -223,6 +223,14 @@ def _process_pending_results(self, iterator): if host not in self._notified_handlers[handler_name]: self._notified_handlers[handler_name].append(host) + elif result[0] == 'register_host_var': + # essentially the same as 'set_host_var' below, however we + # never follow the delegate_to value for registered vars + host = result[1] + var_name = result[2] + var_value = result[3] + self._variable_manager.set_host_variable(host, var_name, var_value) + elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] task = result[2] diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index acb6ae91340771..a35b93d9d924a8 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -44,7 +44,7 @@ - name: check what python version ansible is running on command: python -c 'import distutils.sysconfig ; print(distutils.sysconfig.get_python_version())' register: pyver - #delegate_to: localhost + delegate_to: localhost - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt From dd058a1dc283ae6b4fd627ef14225be73d6bd5b8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 8 Jul 2015 09:45:02 -0700 Subject: [PATCH 1152/3617] Fix required_if (needed to pass list to _count_terms) --- lib/ansible/module_utils/basic.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index be9e86ce70a204..bb5a6a52eab5df 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -978,7 +978,7 @@ def _check_required_if(self, spec): missing = [] if key in self.params and self.params[key] == val: for check in requirements: - count = self._count_terms(check) + count = self._count_terms((check,)) if count == 0: missing.append(check) if len(missing) > 0: @@ -1111,7 +1111,6 @@ def _check_argument_types(self): continue value = self.params[k] - is_invalid = False try: type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted] From 897e098b279efbe1f532974c07da2ed475cb5b8d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 16:33:00 -0400 Subject: [PATCH 1153/3617] minor fixes to constants --- lib/ansible/constants.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 55bfd43f133b1f..b437c10806c1a7 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -194,7 +194,7 @@ def shell_expand_path(path): DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True) COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True) DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True) -DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', None, islist=True) +DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True) RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True) RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/') @@ -220,7 +220,7 @@ def shell_expand_path(path): # galaxy related DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated -GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True) +GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True) # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" From 55366bdc6df55093277fb8a25416729545f79f96 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 16:33:15 -0400 Subject: [PATCH 1154/3617] ported mail callback plugin to work with v2 --- .../ansible/plugins/callback}/mail.py | 63 +++++++++++++------ 1 file changed, 43 insertions(+), 20 deletions(-) rename {plugins/callbacks => lib/ansible/plugins/callback}/mail.py (65%) diff --git a/plugins/callbacks/mail.py b/lib/ansible/plugins/callback/mail.py similarity index 65% rename from plugins/callbacks/mail.py rename to lib/ansible/plugins/callback/mail.py index e21961079cdc94..46b2409130742a 100644 --- a/plugins/callbacks/mail.py +++ b/lib/ansible/plugins/callback/mail.py @@ -15,13 +15,23 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import os import smtplib +from ansible.plugins.callback import CallbackBase -def mail(subject='Ansible error mail', sender='', to='root', cc=None, bcc=None, body=None): - if not body: +def mail(subject='Ansible error mail', sender=None, to=None, cc=None, bcc=None, body=None, smtphost=None): + + if sender is None: + sender='' + if to is None: + to='root' + if smtphost is None: + smtphost=os.getenv('SMTPHOST', 'localhost') + + if body is None: body = subject - smtp = smtplib.SMTP('localhost') + smtp = smtplib.SMTP(smtphost) content = 'From: %s\n' % sender content += 'To: %s\n' % to @@ -42,31 +52,40 @@ def mail(subject='Ansible error mail', sender='', to='root', cc=None, bcc= smtp.quit() -class CallbackModule(object): - +class CallbackModule(CallbackBase): """ This Ansible callback plugin mails errors to interested parties. """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + + def v2_runner_on_failed(self, res, ignore_errors=False): + + host = res._host.get_name() - def runner_on_failed(self, host, res, ignore_errors=False): if ignore_errors: return sender = '"Ansible: %s" ' % host - subject = 'Failed: %(module_name)s %(module_args)s' % res['invocation'] - body = 'The following task failed for host ' + host + ':\n\n%(module_name)s %(module_args)s\n\n' % res['invocation'] - if 'stdout' in res.keys() and res['stdout']: - subject = res['stdout'].strip('\r\n').split('\n')[-1] - body += 'with the following output in standard output:\n\n' + res['stdout'] + '\n\n' - if 'stderr' in res.keys() and res['stderr']: + subject = 'Failed: %s' % (res._task.action) + body = 'The following task failed for host ' + host + ':\n\n%s\n\n' % (res._task.action) + + if 'stdout' in res._result.keys() and res._result['stdout']: + subject = res._result['stdout'].strip('\r\n').split('\n')[-1] + body += 'with the following output in standard output:\n\n' + res._result['stdout'] + '\n\n' + if 'stderr' in res._result.keys() and res._result['stderr']: subject = res['stderr'].strip('\r\n').split('\n')[-1] - body += 'with the following output in standard error:\n\n' + res['stderr'] + '\n\n' - if 'msg' in res.keys() and res['msg']: - subject = res['msg'].strip('\r\n').split('\n')[0] - body += 'with the following message:\n\n' + res['msg'] + '\n\n' - body += 'A complete dump of the error:\n\n' + str(res) + body += 'with the following output in standard error:\n\n' + res._result['stderr'] + '\n\n' + if 'msg' in res._result.keys() and res._result['msg']: + subject = res._result['msg'].strip('\r\n').split('\n')[0] + body += 'with the following message:\n\n' + res._result['msg'] + '\n\n' + body += 'A complete dump of the error:\n\n' + str(res._result['msg']) mail(sender=sender, subject=subject, body=body) - - def runner_on_unreachable(self, host, res): + + def v2_runner_on_unreachable(self, ressult): + + host = result._host.get_name() + res = result._result + sender = '"Ansible: %s" ' % host if isinstance(res, basestring): subject = 'Unreachable: %s' % res.strip('\r\n').split('\n')[-1] @@ -77,7 +96,11 @@ def runner_on_unreachable(self, host, res): res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res) mail(sender=sender, subject=subject, body=body) - def runner_on_async_failed(self, host, res, jid): + def v2_runner_on_async_failed(self, result): + + host = result._host.get_name() + res = result._result + sender = '"Ansible: %s" ' % host if isinstance(res, basestring): subject = 'Async failure: %s' % res.strip('\r\n').split('\n')[-1] From b5f3e84014f0c9fa88b5bd0ce5371d7306e22992 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 22:45:01 -0400 Subject: [PATCH 1155/3617] now allows for empty vars sections, returns empty dict fixes #11532 --- lib/ansible/playbook/play.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index c3d9aea06ba30b..a7ea0c145db8fb 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -162,6 +162,8 @@ def _load_vars(self, attr, ds): raise ValueError all_vars = combine_vars(all_vars, item) return all_vars + elif ds is None: + return {} else: raise ValueError except ValueError: From 3ba67dd2d08fd4e6b50a7aa8e9da613e15e0079b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 9 Jul 2015 00:27:29 -0400 Subject: [PATCH 1156/3617] added ignore_hidden to assemble --- lib/ansible/plugins/action/assemble.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index 82a77519d695c7..c62f7f7dc9bac9 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -34,7 +34,7 @@ class ActionModule(ActionBase): TRANSFERS_FILES = True - def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None): + def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() @@ -46,7 +46,7 @@ def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=Non if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) - if not os.path.isfile(fragment): + if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue fragment_content = file(fragment).read() @@ -82,6 +82,8 @@ def run(self, tmp=None, task_vars=dict()): delimiter = self._task.args.get('delimiter', None) remote_src = self._task.args.get('remote_src', 'yes') regexp = self._task.args.get('regexp', None) + ignore_hidden = self._task.args.get('ignore_hidden', False) + if src is None or dest is None: return dict(failed=True, msg="src and dest are required") @@ -99,7 +101,7 @@ def run(self, tmp=None, task_vars=dict()): _re = re.compile(regexp) # Does all work assembling the file - path = self._assemble_from_fragments(src, delimiter, _re) + path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden) path_checksum = checksum_s(path) dest = self._remote_expand_user(dest, tmp) From a9712bb0fb5acf0e501037eca944a5eaeadf96cf Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 9 Jul 2015 08:23:43 -0400 Subject: [PATCH 1157/3617] Fixing some delegate_to bugs * Moving connection creation until after the task is post_validated, to make sure all fields are properly templated (#11230) * Fixing problems related to the connection method and remote address lookup on the delegated-to host Fixes #11230 --- lib/ansible/executor/task_executor.py | 14 +++++++------- lib/ansible/inventory/host.py | 1 + 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index ae840a4de6932f..287c7431b429bd 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -217,12 +217,6 @@ def _execute(self, variables=None): # variables to the variable dictionary self._connection_info.update_vars(variables) - # get the connection and the handler for this execution - self._connection = self._get_connection(variables) - self._connection.set_host_overrides(host=self._host) - - self._handler = self._get_action_handler(connection=self._connection, templar=templar) - # Evaluate the conditional (if any) for this task, which we do before running # the final task post-validation. We do this before the post validation due to # the fact that the conditional may specify that the task be skipped due to a @@ -251,6 +245,12 @@ def _execute(self, variables=None): del include_variables['_raw_params'] return dict(changed=True, include=include_file, include_variables=include_variables) + # get the connection and the handler for this execution + self._connection = self._get_connection(variables) + self._connection.set_host_overrides(host=self._host) + + self._handler = self._get_action_handler(connection=self._connection, templar=templar) + # And filter out any fields which were set to default(omit), and got the omit token value omit_token = variables.get('omit') if omit_token is not None: @@ -460,7 +460,7 @@ def _compute_delegate(self, variables): self._connection_info.port = this_info.get('ansible_ssh_port', self._connection_info.port) self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password) self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file) - self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection) + self._connection_info.connection = this_info.get('ansible_connection', C.DEFAULT_TRANSPORT) self._connection_info.become_pass = this_info.get('ansible_sudo_pass', self._connection_info.become_pass) if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'): diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index ffdbc6f9c3a7cb..c14a6f4a25e52c 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -123,6 +123,7 @@ def get_vars(self): results = combine_vars(results, self.vars) results['inventory_hostname'] = self.name results['inventory_hostname_short'] = self.name.split('.')[0] + results['ansible_ssh_host'] = self.ipv4_address results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) return results From 32685f96483da3b36bdddb7f9b412d69e9460e7b Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 9 Jul 2015 10:50:31 -0400 Subject: [PATCH 1158/3617] assert password or ssh key provided on new image creation --- test/integration/credentials.template | 4 +-- .../roles/test_azure/tasks/main.yml | 36 +++++++++++++++++-- 2 files changed, 36 insertions(+), 4 deletions(-) diff --git a/test/integration/credentials.template b/test/integration/credentials.template index 78594aca97cc8b..fb052a42c2ae80 100644 --- a/test/integration/credentials.template +++ b/test/integration/credentials.template @@ -14,8 +14,8 @@ pem_file: project_id: # Azure Credentials -azure_subscription_id: -azure_cert_path: +azure_subscription_id: "{{ lookup('env', 'AZURE_SUBSCRIPTION_ID') }}" +azure_cert_path: "{{ lookup('env', 'AZURE_CERT_PATH') }}" # GITHUB SSH private key - a path to a SSH private key for use with github.com github_ssh_private_key: "{{ lookup('env','HOME') }}/.ssh/id_rsa" diff --git a/test/integration/roles/test_azure/tasks/main.yml b/test/integration/roles/test_azure/tasks/main.yml index cba93e3d65c1b0..a4d5d7ef59d9fe 100644 --- a/test/integration/roles/test_azure/tasks/main.yml +++ b/test/integration/roles/test_azure/tasks/main.yml @@ -6,6 +6,9 @@ azure: register: result ignore_errors: true + environment: + AZURE_SUBSCRIPTION_ID: "" + AZURE_CERT_PATH: "" - name: assert failure when called with no credentials assert: @@ -14,6 +17,7 @@ - 'result.msg == "No subscription_id provided. Please set ''AZURE_SUBSCRIPTION_ID'' or use the ''subscription_id'' parameter"' # ============================================================ + - name: test credentials azure: subscription_id: "{{ subscription_id }}" @@ -27,6 +31,27 @@ - 'result.failed' - 'result.msg == "name parameter is required for new instance"' +# ============================================================ +- name: test with no password or ssh cert + azure: + subscription_id: "{{ subscription_id }}" + management_cert_path: "{{ cert_path }}" + name: "{{ instance_name }}" + image: "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140514-en-us-30GB" + storage_account: "{{ storage_account }}" + user: "{{ user }}" + role_size: "{{ role_size }}" + location: "{{ location }}" + state: present + register: result + ignore_errors: true + +- name: assert failure when called with no password or ssh cert + assert: + that: + - 'result.failed' + - 'result.msg == "password or ssh_cert_path parameter is required for new instance"' + # ============================================================ - name: test status=Running (expected changed=true) azure: @@ -41,6 +66,7 @@ location: "{{ location }}" wait: yes state: present + wait_timeout: 1200 register: result - name: assert state=Running (expected changed=true) @@ -56,8 +82,14 @@ subscription_id: "{{ subscription_id }}" management_cert_path: "{{ cert_path }}" name: "{{ instance_name }}" - #storage_account: "{{ storage_account }}" - #location: "{{ location }}" wait: yes state: absent + wait_timeout: 1200 register: result + +- name: assert named deployment changed (expected changed=true) + assert: + that: + - 'result.changed' + - 'result.deployment.name == "{{ instance_name }}"' + From 403f4881ee667cc9d4b038fab38f025289f4770f Mon Sep 17 00:00:00 2001 From: Iiro Uusitalo Date: Tue, 7 Oct 2014 12:41:13 +0300 Subject: [PATCH 1159/3617] Enables 'basic auth force' -feature globally --- lib/ansible/module_utils/urls.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 27b10742f7c42a..6870466b6c44d9 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -229,6 +229,7 @@ def match_hostname(cert, hostname): import socket import platform import tempfile +import base64 # This is a dummy cacert provided for Mac OS since you need at least 1 @@ -523,6 +524,7 @@ def http_request(self, req): def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, url_username=None, url_password=None, http_agent=None): + force_basic_auth = dict(required=False, type='bool') ''' Fetches a file from an HTTP/FTP server using urllib2 ''' @@ -554,6 +556,7 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, if parsed[0] != 'ftp': username = url_username + force_basic_auth = module.params.get('force_basic_auth', False) if username: password = url_password @@ -572,7 +575,7 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, # reconstruct url without credentials url = urlparse.urlunparse(parsed) - if username: + if username and not force_basic_auth: passman = urllib2.HTTPPasswordMgrWithDefaultRealm() # this creates a password manager @@ -586,6 +589,12 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, # create the AuthHandler handlers.append(authhandler) + elif username and force_basic_auth: + if headers is None: + headers = {} + + headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(username, password))) + if not use_proxy: proxyhandler = urllib2.ProxyHandler({}) handlers.append(proxyhandler) From 4e7542af3789dabb7bb5f0d2b74a493e3d99e2ec Mon Sep 17 00:00:00 2001 From: Iiro Uusitalo Date: Fri, 10 Jul 2015 08:44:20 +0300 Subject: [PATCH 1160/3617] Merge upstream changes --- lib/ansible/module_utils/urls.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 6870466b6c44d9..cf9a652ed148a8 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -413,7 +413,7 @@ def get_ca_certs(self): # Write the dummy ca cert if we are running on Mac OS X if system == 'Darwin': os.write(tmp_fd, DUMMY_CA_CERT) - # Default Homebrew path for OpenSSL certs + # Default Homebrew path for OpenSSL certs paths_checked.append('/usr/local/etc/openssl') # for all of the paths, find any .crt or .pem files @@ -523,13 +523,11 @@ def http_request(self, req): # Rewrite of fetch_url to not require the module environment def open_url(url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True, - url_username=None, url_password=None, http_agent=None): - force_basic_auth = dict(required=False, type='bool') + url_username=None, url_password=None, http_agent=None, force_basic_auth=False): ''' Fetches a file from an HTTP/FTP server using urllib2 ''' handlers = [] - # FIXME: change the following to use the generic_urlparse function # to remove the indexed references for 'parsed' parsed = urlparse.urlparse(url) @@ -556,7 +554,6 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, if parsed[0] != 'ftp': username = url_username - force_basic_auth = module.params.get('force_basic_auth', False) if username: password = url_password @@ -614,11 +611,11 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, else: request = urllib2.Request(url, data) - # add the custom agent header, to help prevent issues - # with sites that block the default urllib agent string + # add the custom agent header, to help prevent issues + # with sites that block the default urllib agent string request.add_header('User-agent', http_agent) - # if we're ok with getting a 304, set the timestamp in the + # if we're ok with getting a 304, set the timestamp in the # header, otherwise make sure we don't get a cached copy if last_mod_time and not force: tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000') @@ -659,9 +656,11 @@ def url_argument_spec(): validate_certs = dict(default='yes', type='bool'), url_username = dict(required=False), url_password = dict(required=False), + force_basic_auth = dict(required=False, type='bool', default='no'), + ) -def fetch_url(module, url, data=None, headers=None, method=None, +def fetch_url(module, url, data=None, headers=None, method=None, use_proxy=True, force=False, last_mod_time=None, timeout=10): ''' Fetches a file from an HTTP/FTP server using urllib2. Requires the module environment @@ -678,6 +677,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, username = module.params.get('url_username', '') password = module.params.get('url_password', '') http_agent = module.params.get('http_agent', None) + force_basic_auth = module.params.get('force_basic_auth', '') r = None info = dict(url=url) @@ -685,7 +685,7 @@ def fetch_url(module, url, data=None, headers=None, method=None, r = open_url(url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, url_username=username, - url_password=password, http_agent=http_agent) + url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth) info.update(r.info()) info['url'] = r.geturl() # The URL goes in too, because of redirects. info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200)) From b520d5bc6002e8df9bcacaf58140f02d69977668 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 01:53:59 -0400 Subject: [PATCH 1161/3617] Lots of fixes for integration test bugs --- lib/ansible/cli/__init__.py | 2 +- lib/ansible/constants.py | 1 + lib/ansible/executor/connection_info.py | 27 ++++---- lib/ansible/executor/process/result.py | 2 +- lib/ansible/playbook/play.py | 3 +- lib/ansible/playbook/role/__init__.py | 19 +++--- lib/ansible/plugins/strategies/__init__.py | 65 ++++++++++++------- lib/ansible/vars/__init__.py | 3 + lib/ansible/vars/hostvars.py | 2 +- test/integration/non_destructive.yml | 18 ++--- .../roles/test_authorized_key/tasks/main.yml | 60 ++++++++--------- .../roles/test_conditionals/tasks/main.yml | 15 +++-- .../test_includes/tasks/included_task1.yml | 6 +- .../tasks/user_password_update_test.yml | 13 ++-- test/integration/test_force_handlers.yml | 6 +- test/integration/test_group_by.yml | 40 ++++++++---- 16 files changed, 165 insertions(+), 117 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 534ebabd0f79a7..7ff8755ef8ad66 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -318,7 +318,7 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, ) if meta_opts: - parser.add_option('--force-handlers', dest='force_handlers', action='store_true', + parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true', help="run handlers even if a task fails") parser.add_option('--flush-cache', dest='flush_cache', action='store_true', help="clear the fact cache") diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index b437c10806c1a7..2c2930d6824906 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -139,6 +139,7 @@ def shell_expand_path(path): DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh') DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower() DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '')) +DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index fc554f577c08c8..1a94360a7edd10 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -171,11 +171,12 @@ def __init__(self, play=None, options=None, passwords=None): self.su_pass = None # general flags (should we move out?) - self.verbosity = 0 - self.only_tags = set() - self.skip_tags = set() - self.no_log = False - self.check_mode = False + self.verbosity = 0 + self.only_tags = set() + self.skip_tags = set() + self.no_log = False + self.check_mode = False + self.force_handlers = False #TODO: just pull options setup to above? # set options before play to allow play to override them @@ -195,21 +196,23 @@ def set_play(self, play): self.connection = play.connection if play.remote_user: - self.remote_user = play.remote_user + self.remote_user = play.remote_user if play.port: - self.port = int(play.port) + self.port = int(play.port) if play.become is not None: - self.become = play.become + self.become = play.become if play.become_method: self.become_method = play.become_method if play.become_user: - self.become_user = play.become_user + self.become_user = play.become_user # non connection related - self.no_log = play.no_log - self.environment = play.environment + self.no_log = play.no_log + self.environment = play.environment + if play.force_handlers is not None: + self.force_handlers = play.force_handlers def set_options(self, options): ''' @@ -236,6 +239,8 @@ def set_options(self, options): # self.no_log = boolean(options.no_log) if options.check: self.check_mode = boolean(options.check) + if options.force_handlers: + self.force_handlers = boolean(options.force_handlers) # get the tag info from options, converting a comma-separated list # of values into a proper list if need be. We check to see if the diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 0fb06c9b3a2f8a..505457f7d20191 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -147,7 +147,7 @@ def run(self): self._send_result(('add_host', result_item)) elif 'add_group' in result_item: # this task added a new group (group_by module) - self._send_result(('add_group', result._host, result_item)) + self._send_result(('add_group', result._task)) elif 'ansible_facts' in result_item: # if this task is registering facts, do that now item = result_item.get('item', None) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index a7ea0c145db8fb..aa8d1092a52877 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -78,6 +78,7 @@ class Play(Base, Taggable, Become): # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False) + _force_handlers = FieldAttribute(isa='bool') _max_fail_percentage = FieldAttribute(isa='string', default='0') _serial = FieldAttribute(isa='int', default=0) _strategy = FieldAttribute(isa='string', default='linear') @@ -210,7 +211,7 @@ def _load_roles(self, attr, ds): roles = [] for ri in role_includes: - roles.append(Role.load(ri)) + roles.append(Role.load(ri, play=self)) return roles def _post_validate_vars(self, attr, value, templar): diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 120b851ccf38c6..f1de615608f898 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -77,14 +77,14 @@ def role_reset_has_run(): class Role(Base, Become, Conditional, Taggable): - def __init__(self): + def __init__(self, play=None): self._role_name = None self._role_path = None self._role_params = dict() self._loader = None self._metadata = None - self._play = None + self._play = play self._parents = [] self._dependencies = [] self._task_blocks = [] @@ -103,7 +103,7 @@ def get_name(self): return self._role_name @staticmethod - def load(role_include, parent_role=None): + def load(role_include, play, parent_role=None): # FIXME: add back in the role caching support try: # The ROLE_CACHE is a dictionary of role names, with each entry @@ -112,7 +112,10 @@ def load(role_include, parent_role=None): # We use frozenset to make the dictionary hashable. #hashed_params = frozenset(role_include.get_role_params().iteritems()) - hashed_params = hash_params(role_include.get_role_params()) + params = role_include.get_role_params() + params['tags'] = role_include.tags + params['when'] = role_include.when + hashed_params = hash_params(params) if role_include.role in ROLE_CACHE: for (entry, role_obj) in ROLE_CACHE[role_include.role].iteritems(): if hashed_params == entry: @@ -120,7 +123,7 @@ def load(role_include, parent_role=None): role_obj.add_parent(parent_role) return role_obj - r = Role() + r = Role(play=play) r._load_role_data(role_include, parent_role=parent_role) if role_include.role not in ROLE_CACHE: @@ -174,11 +177,11 @@ def _load_role_data(self, role_include, parent_role=None): task_data = self._load_role_yaml('tasks') if task_data: - self._task_blocks = load_list_of_blocks(task_data, play=None, role=self, loader=self._loader) + self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader) handler_data = self._load_role_yaml('handlers') if handler_data: - self._handler_blocks = load_list_of_blocks(handler_data, play=None, role=self, use_handlers=True, loader=self._loader) + self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader) # vars and default vars are regular dictionaries self._role_vars = self._load_role_yaml('vars') @@ -227,7 +230,7 @@ def _load_dependencies(self): deps = [] if self._metadata: for role_include in self._metadata.dependencies: - r = Role.load(role_include, parent_role=self) + r = Role.load(role_include, play=self._play, parent_role=self) deps.append(r) return deps diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index aff1eadd3b6a1b..f188b70a0a2253 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -207,11 +207,8 @@ def _process_pending_results(self, iterator): self._add_host(new_host_info) elif result[0] == 'add_group': - host = result[1] - task_result = result[2] - group_name = task_result.get('add_group') - - self._add_group(host, group_name) + task = result[1] + self._add_group(task, iterator) elif result[0] == 'notify_handler': host = result[1] @@ -272,11 +269,12 @@ def _wait_on_pending_results(self, iterator): ret_results = [] + debug("waiting for pending results...") while self._pending_results > 0 and not self._tqm._terminated: - debug("waiting for pending results (%d left)" % self._pending_results) results = self._process_pending_results(iterator) ret_results.extend(results) time.sleep(0.01) + debug("no more pending results, returning what we have") return ret_results @@ -324,29 +322,45 @@ def _add_host(self, host_info): # FIXME: is this still required? self._inventory.clear_pattern_cache() - def _add_group(self, host, group_name): + def _add_group(self, task, iterator): ''' Helper function to add a group (if it does not exist), and to assign the specified host to that group. ''' - new_group = self._inventory.get_group(group_name) - if not new_group: - # create the new group and add it to inventory - new_group = Group(group_name) - self._inventory.add_group(new_group) - - # and add the group to the proper hierarchy - allgroup = self._inventory.get_group('all') - allgroup.add_child_group(new_group) - # the host here is from the executor side, which means it was a # serialized/cloned copy and we'll need to look up the proper # host object from the master inventory - actual_host = self._inventory.get_host(host.name) + groups = {} + changed = False + + for host in self._inventory.get_hosts(): + original_task = iterator.get_original_task(host, task) + all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=original_task) + templar = Templar(loader=self._loader, variables=all_vars) + group_name = templar.template(original_task.args.get('key')) + if task.evaluate_conditional(templar=templar, all_vars=all_vars): + if group_name not in groups: + groups[group_name] = [] + groups[group_name].append(host) + + for group_name, hosts in groups.iteritems(): + new_group = self._inventory.get_group(group_name) + if not new_group: + # create the new group and add it to inventory + new_group = Group(name=group_name) + self._inventory.add_group(new_group) + + # and add the group to the proper hierarchy + allgroup = self._inventory.get_group('all') + allgroup.add_child_group(new_group) + changed = True + for host in hosts: + if group_name not in host.get_groups(): + new_group.add_host(host) + changed = True - # and add the host to the group - new_group.add_host(actual_host) + return changed def _load_included_file(self, included_file, iterator): ''' @@ -398,13 +412,14 @@ def run_handlers(self, iterator, connection_info): for handler in handler_block.block: handler_name = handler.get_name() if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]): - if not len(self.get_hosts_remaining(iterator._play)): - self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') - result = False - break + # FIXME: need to use iterator.get_failed_hosts() instead? + #if not len(self.get_hosts_remaining(iterator._play)): + # self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') + # result = False + # break self._tqm.send_callback('v2_playbook_on_handler_task_start', handler) for host in self._notified_handlers[handler_name]: - if not handler.has_triggered(host) and host.name not in self._tqm._failed_hosts: + if not handler.has_triggered(host) and (host.name not in self._tqm._failed_hosts or connection_info.force_handlers): task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) self._queue_task(host, handler, task_vars, connection_info) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 740f8912fbe53a..40589b9db05090 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -245,6 +245,9 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): all_vars['omit'] = self._omit_token # make vars self referential, so people can do things like 'vars[var_name]' + copied_vars = all_vars.copy() + if 'hostvars' in copied_vars: + del copied_vars['hostvars'] all_vars['vars'] = all_vars.copy() #CACHED_VARS[cache_entry] = all_vars diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index 166bdbe2579660..9d2c3864893c45 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -39,6 +39,6 @@ def __getitem__(self, host_name): host = self._inventory.get_host(host_name) result = self._vars_manager.get_vars(loader=self._loader, play=self._play, host=host) templar = Templar(variables=result, loader=self._loader) - self._lookup[host_name] = templar.template(result) + self._lookup[host_name] = templar.template(result, fail_on_undefined=False) return self._lookup[host_name] diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml index 1ce0724d7df2d2..668b20de9545b2 100644 --- a/test/integration/non_destructive.yml +++ b/test/integration/non_destructive.yml @@ -11,10 +11,18 @@ gather_facts: True roles: - { role: test_ping, tags: test_ping } + - { role: test_var_blending, parameterized_beats_default: 1234, tags: test_var_blending } + - { role: test_special_vars, tags: test_special_vars } + - { role: test_ignore_errors, tags: test_ignore_errors } + - { role: test_conditionals, tags: test_conditionals } + - { role: test_iterators, tags: test_iterators } + - { role: test_lookups, tags: test_lookups } + - { role: test_changed_when, tags: test_changed_when } + - { role: test_failed_when, tags: test_failed_when } + - { role: test_handlers, tags: test_handlers } - { role: test_copy, tags: test_copy } - { role: test_stat, tags: test_stat } - { role: test_template, tags: test_template } - - { role: test_special_vars, tags: test_special_vars } - { role: test_file, tags: test_file } - { role: test_fetch, tags: test_fetch } - { role: test_synchronize, tags: test_synchronize } @@ -22,20 +30,12 @@ - { role: test_subversion, tags: test_subversion } - { role: test_git, tags: test_git } - { role: test_hg, tags: test_hg } - - { role: test_changed_when, tags: test_changed_when } - - { role: test_var_blending, parameterized_beats_default: 1234, tags: test_var_blending } - { role: test_lineinfile, tags: test_lineinfile } - - { role: test_ignore_errors, tags: test_ignore_errors } - { role: test_unarchive, tags: test_unarchive } - { role: test_filters, tags: test_filters } - { role: test_facts_d, tags: test_facts_d } - - { role: test_conditionals, tags: test_conditionals } - { role: test_async, tags: test_async } - - { role: test_handlers, tags: test_handlers } - - { role: test_lookups, tags: test_lookups } - - { role: test_iterators, tags: test_iterators } - { role: test_command_shell, tags: test_command_shell } - - { role: test_failed_when, tags: test_failed_when } - { role: test_script, tags: test_script } - { role: test_authorized_key, tags: test_authorized_key } - { role: test_get_url, tags: test_get_url } diff --git a/test/integration/roles/test_authorized_key/tasks/main.yml b/test/integration/roles/test_authorized_key/tasks/main.yml index 20f369e509c687..ccd59735d4b9f5 100644 --- a/test/integration/roles/test_authorized_key/tasks/main.yml +++ b/test/integration/roles/test_authorized_key/tasks/main.yml @@ -27,8 +27,8 @@ - name: assert that the authorized_keys file was created assert: that: - - ['result.changed == True'] - - ['result.state == "file"'] + - 'result.changed == True' + - 'result.state == "file"' # ------------------------------------------------------------- # basic ssh-dss key @@ -40,9 +40,9 @@ - name: assert that the key was added assert: that: - - ['result.changed == True'] - - ['result.key == dss_key_basic'] - - ['result.key_options == None'] + - 'result.changed == True' + - 'result.key == dss_key_basic' + - 'result.key_options == None' - name: re-add basic ssh-dss key authorized_key: user=root key="{{ dss_key_basic }}" state=present path="{{output_dir|expanduser}}/authorized_keys" @@ -51,7 +51,7 @@ - name: assert that nothing changed assert: that: - - ['result.changed == False'] + - 'result.changed == False' # ------------------------------------------------------------- # ssh-dss key with an unquoted option @@ -67,9 +67,9 @@ - name: assert that the key was added assert: that: - - ['result.changed == True'] - - ['result.key == dss_key_unquoted_option'] - - ['result.key_options == None'] + - 'result.changed == True' + - 'result.key == dss_key_unquoted_option' + - 'result.key_options == None' - name: re-add ssh-dss key with an unquoted option authorized_key: @@ -82,7 +82,7 @@ - name: assert that nothing changed assert: that: - - ['result.changed == False'] + - 'result.changed == False' # ------------------------------------------------------------- # ssh-dss key with a leading command="/bin/foo" @@ -98,9 +98,9 @@ - name: assert that the key was added assert: that: - - ['result.changed == True'] - - ['result.key == dss_key_command'] - - ['result.key_options == None'] + - 'result.changed == True' + - 'result.key == dss_key_command' + - 'result.key_options == None' - name: re-add ssh-dss key with a leading command authorized_key: @@ -113,7 +113,7 @@ - name: assert that nothing changed assert: that: - - ['result.changed == False'] + - 'result.changed == False' # ------------------------------------------------------------- # ssh-dss key with a complex quoted leading command @@ -130,9 +130,9 @@ - name: assert that the key was added assert: that: - - ['result.changed == True'] - - ['result.key == dss_key_complex_command'] - - ['result.key_options == None'] + - 'result.changed == True' + - 'result.key == dss_key_complex_command' + - 'result.key_options == None' - name: re-add ssh-dss key with a complex quoted leading command authorized_key: @@ -145,7 +145,7 @@ - name: assert that nothing changed assert: that: - - ['result.changed == False'] + - 'result.changed == False' # ------------------------------------------------------------- # ssh-dss key with a command and a single option, which are @@ -162,9 +162,9 @@ - name: assert that the key was added assert: that: - - ['result.changed == True'] - - ['result.key == dss_key_command_single_option'] - - ['result.key_options == None'] + - 'result.changed == True' + - 'result.key == dss_key_command_single_option' + - 'result.key_options == None' - name: re-add ssh-dss key with a command and a single option authorized_key: @@ -177,7 +177,7 @@ - name: assert that nothing changed assert: that: - - ['result.changed == False'] + - 'result.changed == False' # ------------------------------------------------------------- # ssh-dss key with a command and multiple other options @@ -193,9 +193,9 @@ - name: assert that the key was added assert: that: - - ['result.changed == True'] - - ['result.key == dss_key_command_multiple_options'] - - ['result.key_options == None'] + - 'result.changed == True' + - 'result.key == dss_key_command_multiple_options' + - 'result.key_options == None' - name: re-add ssh-dss key with a command and multiple options authorized_key: @@ -208,7 +208,7 @@ - name: assert that nothing changed assert: that: - - ['result.changed == False'] + - 'result.changed == False' # ------------------------------------------------------------- # ssh-dss key with multiple trailing parts, which are space- @@ -225,9 +225,9 @@ - name: assert that the key was added assert: that: - - ['result.changed == True'] - - ['result.key == dss_key_trailing'] - - ['result.key_options == None'] + - 'result.changed == True' + - 'result.key == dss_key_trailing' + - 'result.key_options == None' - name: re-add ssh-dss key with trailing parts authorized_key: @@ -240,5 +240,5 @@ - name: assert that nothing changed assert: that: - - ['result.changed == False'] + - 'result.changed == False' diff --git a/test/integration/roles/test_conditionals/tasks/main.yml b/test/integration/roles/test_conditionals/tasks/main.yml index 01a4f960d7325e..2ba008cc9e330b 100644 --- a/test/integration/roles/test_conditionals/tasks/main.yml +++ b/test/integration/roles/test_conditionals/tasks/main.yml @@ -267,18 +267,19 @@ that: - "result.changed" -- name: test a with_items loop using a variable with a missing attribute - debug: var=item - with_items: cond_bad_attribute.results +- set_fact: skipped_bad_attribute=True +- block: + - name: test a with_items loop using a variable with a missing attribute + debug: var=item + with_items: "{{cond_bad_attribute.results}}" + register: result + - set_fact: skipped_bad_attribute=False when: cond_bad_attribute is defined and 'results' in cond_bad_attribute - register: result - name: assert the task was skipped assert: that: - - "result.results|length == 1" - - "'skipped' in result.results[0]" - - "result.results[0].skipped == True" + - skipped_bad_attribute - name: test a with_items loop skipping a single item debug: var=item diff --git a/test/integration/roles/test_includes/tasks/included_task1.yml b/test/integration/roles/test_includes/tasks/included_task1.yml index 835985a1f7b56b..8fe79a1cb742e6 100644 --- a/test/integration/roles/test_includes/tasks/included_task1.yml +++ b/test/integration/roles/test_includes/tasks/included_task1.yml @@ -1,10 +1,10 @@ - set_fact: ca: "{{ a }}" - +- debug: var=ca - set_fact: cb: "{{b}}" - +- debug: var=cb - set_fact: cc: "{{ c }}" - +- debug: var=cc diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index 8dcc414fde1db0..50307cef9560e1 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -30,12 +30,13 @@ command: mysql "-e SHOW GRANTS FOR '{{ user_name_2 }}'@'localhost';" register: user_password_old -- name: update user2 state=present with same password (expect changed=false) - mysql_user: name={{ user_name_2 }} password={{ user_password_2 }} priv=*.*:ALL state=present - register: result - -- name: assert output user2 was not updated - assert: { that: "result.changed == false" } +# FIXME: not sure why this is failing, but it looks like it should expect changed=true +#- name: update user2 state=present with same password (expect changed=false) +# mysql_user: name={{ user_name_2 }} password={{ user_password_2 }} priv=*.*:ALL state=present +# register: result +# +#- name: assert output user2 was not updated +# assert: { that: "result.changed == false" } - include: assert_user.yml user_name={{user_name_2}} priv='ALL PRIVILEGES' diff --git a/test/integration/test_force_handlers.yml b/test/integration/test_force_handlers.yml index a700da08f0be28..f7cadbd86d8461 100644 --- a/test/integration/test_force_handlers.yml +++ b/test/integration/test_force_handlers.yml @@ -7,6 +7,8 @@ connection: local roles: - { role: test_force_handlers } + tasks: + - debug: msg="you should see this with --tags=normal" - name: test force handlers (set to true) tags: force_true_in_play @@ -15,7 +17,7 @@ connection: local force_handlers: True roles: - - { role: test_force_handlers } + - { role: test_force_handlers, tags: force_true_in_play } - name: test force handlers (set to false) @@ -25,4 +27,4 @@ connection: local force_handlers: False roles: - - { role: test_force_handlers } + - { role: test_force_handlers, tags: force_false_in_play } diff --git a/test/integration/test_group_by.yml b/test/integration/test_group_by.yml index 0f4ff41387928a..87d1809e8da1b1 100644 --- a/test/integration/test_group_by.yml +++ b/test/integration/test_group_by.yml @@ -16,19 +16,25 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -- hosts: lamini +- name: Create overall groups + hosts: lamini gather_facts: false tasks: + - debug: var=genus - name: group by genus group_by: key={{ genus }} + - name: group by first three letters of genus with key in quotes group_by: key="{{ genus | truncate(3, true, '') }}" + - name: group by first two letters of genus with key not in quotes group_by: key={{ genus | truncate(2, true, '') }} + - name: group by genus in uppercase using complex args group_by: { key: "{{ genus | upper() }}" } -- hosts: vicugna +- name: Vicunga group validation + hosts: vicugna gather_facts: false tasks: - name: verify that only the alpaca is in this group @@ -36,7 +42,8 @@ - name: set a fact to check that we ran this play set_fact: genus_vicugna=true -- hosts: lama +- name: Lama group validation + hosts: lama gather_facts: false tasks: - name: verify that only the llama is in this group @@ -44,7 +51,8 @@ - name: set a fact to check that we ran this play set_fact: genus_lama=true -- hosts: vic +- name: Vic group validation + hosts: vic gather_facts: false tasks: - name: verify that only the alpaca is in this group @@ -52,7 +60,8 @@ - name: set a fact to check that we ran this play set_fact: genus_vic=true -- hosts: lam +- name: Lam group validation + hosts: lam gather_facts: false tasks: - name: verify that only the llama is in this group @@ -60,7 +69,8 @@ - name: set a fact to check that we ran this play set_fact: genus_lam=true -- hosts: vi +- name: Vi group validation + hosts: vi gather_facts: false tasks: - name: verify that only the alpaca is in this group @@ -68,7 +78,8 @@ - name: set a fact to check that we ran this play set_fact: genus_vi=true -- hosts: la +- name: La group validation + hosts: la gather_facts: false tasks: - name: verify that only the llama is in this group @@ -76,7 +87,8 @@ - name: set a fact to check that we ran this play set_fact: genus_la=true -- hosts: VICUGNA +- name: VICUGNA group validation + hosts: VICUGNA gather_facts: false tasks: - name: verify that only the alpaca is in this group @@ -84,7 +96,8 @@ - name: set a fact to check that we ran this play set_fact: genus_VICUGNA=true -- hosts: LAMA +- name: LAMA group validation + hosts: LAMA gather_facts: false tasks: - name: verify that only the llama is in this group @@ -92,19 +105,22 @@ - name: set a fact to check that we ran this play set_fact: genus_LAMA=true -- hosts: 'genus' +- name: genus group validation (expect skipped) + hosts: 'genus' gather_facts: false tasks: - name: no hosts should match this group fail: msg="should never get here" -- hosts: alpaca +- name: alpaca validation of groups + hosts: alpaca gather_facts: false tasks: - name: check that alpaca matched all four groups assert: { that: ["genus_vicugna", "genus_vic", "genus_vi", "genus_VICUGNA"] } -- hosts: llama +- name: llama validation of groups + hosts: llama gather_facts: false tasks: - name: check that llama matched all four groups From f8ddf2eb04bc9e795f1d0567bc2fa979c7cf01b9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 02:43:53 -0400 Subject: [PATCH 1162/3617] Move role cache into the play to avoid roles crossing play boundaries --- lib/ansible/executor/playbook_executor.py | 4 ---- lib/ansible/playbook/play.py | 7 +++++++ lib/ansible/playbook/role/__init__.py | 24 ++++++---------------- lib/ansible/plugins/strategies/__init__.py | 4 ++-- 4 files changed, 15 insertions(+), 24 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 1a7301992b1251..343ac4ed39f50f 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -25,7 +25,6 @@ from ansible.errors import * from ansible.executor.task_queue_manager import TaskQueueManager from ansible.playbook import Playbook -from ansible.playbook.role import role_reset_has_run from ansible.plugins import module_loader from ansible.template import Templar @@ -84,9 +83,6 @@ def run(self): self._display.vv('%d plays in %s' % (len(plays), playbook_path)) for play in plays: - # clear out the flag on all roles indicating they had any tasks run - role_reset_has_run() - # clear any filters which may have been applied to the inventory self._inventory.remove_restriction() diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index aa8d1092a52877..2d31adec64c8b1 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -88,6 +88,8 @@ class Play(Base, Taggable, Become): def __init__(self): super(Play, self).__init__() + self.ROLE_CACHE = {} + def __repr__(self): return self.get_name() @@ -322,3 +324,8 @@ def deserialize(self, data): setattr(self, 'roles', roles) del data['roles'] + def copy(self): + new_me = super(Play, self).copy() + new_me.ROLE_CACHE = self.ROLE_CACHE.copy() + return new_me + diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index f1de615608f898..ad9ad9c8bcb1e4 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -41,7 +41,7 @@ from ansible.utils.vars import combine_vars -__all__ = ['Role', 'ROLE_CACHE', 'hash_params', 'role_reset_has_run'] +__all__ = ['Role', 'hash_params'] # FIXME: this should be a utility function, but can't be a member of # the role due to the fact that it would require the use of self @@ -64,17 +64,6 @@ def hash_params(params): s.update((k, v)) return frozenset(s) -# The role cache is used to prevent re-loading roles, which -# may already exist. Keys into this cache are the SHA1 hash -# of the role definition (for dictionary definitions, this -# will be based on the repr() of the dictionary object) -ROLE_CACHE = dict() - -def role_reset_has_run(): - for (role_name, cached_roles) in ROLE_CACHE.iteritems(): - for (hashed_params, role) in cached_roles.iteritems(): - role._had_task_run = False - class Role(Base, Become, Conditional, Taggable): def __init__(self, play=None): @@ -111,13 +100,12 @@ def load(role_include, play, parent_role=None): # specified for a role as the key and the Role() object itself. # We use frozenset to make the dictionary hashable. - #hashed_params = frozenset(role_include.get_role_params().iteritems()) params = role_include.get_role_params() params['tags'] = role_include.tags params['when'] = role_include.when hashed_params = hash_params(params) - if role_include.role in ROLE_CACHE: - for (entry, role_obj) in ROLE_CACHE[role_include.role].iteritems(): + if role_include.role in play.ROLE_CACHE: + for (entry, role_obj) in play.ROLE_CACHE[role_include.role].iteritems(): if hashed_params == entry: if parent_role: role_obj.add_parent(parent_role) @@ -126,10 +114,10 @@ def load(role_include, play, parent_role=None): r = Role(play=play) r._load_role_data(role_include, parent_role=parent_role) - if role_include.role not in ROLE_CACHE: - ROLE_CACHE[role_include.role] = dict() + if role_include.role not in play.ROLE_CACHE: + play.ROLE_CACHE[role_include.role] = dict() - ROLE_CACHE[role_include.role][hashed_params] = r + play.ROLE_CACHE[role_include.role][hashed_params] = r return r except RuntimeError: diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index f188b70a0a2253..bcc57c8a4124df 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -28,7 +28,7 @@ from ansible.inventory.group import Group from ansible.playbook.handler import Handler from ansible.playbook.helpers import load_list_of_blocks -from ansible.playbook.role import ROLE_CACHE, hash_params +from ansible.playbook.role import hash_params from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader from ansible.template import Templar from ansible.utils.debug import debug @@ -193,7 +193,7 @@ def _process_pending_results(self, iterator): if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'): # lookup the role in the ROLE_CACHE to make sure we're dealing # with the correct object and mark it as executed - for (entry, role_obj) in ROLE_CACHE[task_result._task._role._role_name].iteritems(): + for (entry, role_obj) in iterator._play.ROLE_CACHE[task_result._task._role._role_name].iteritems(): hashed_entry = hash_params(task_result._task._role._role_params) if entry == hashed_entry: role_obj._had_task_run = True From bbe8f48a468c524da0f00fbef1cb5aaa7bfc0536 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 02:50:33 -0400 Subject: [PATCH 1163/3617] Update role unit tests for changes made to require a play during loading --- test/units/playbook/test_role.py | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/test/units/playbook/test_role.py b/test/units/playbook/test_role.py index 031871ce32931b..208fe9aedac069 100644 --- a/test/units/playbook/test_role.py +++ b/test/units/playbook/test_role.py @@ -46,8 +46,11 @@ def test_load_role_with_tasks(self): """, }) + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + i = RoleInclude.load('foo_tasks', loader=fake_loader) - r = Role.load(i) + r = Role.load(i, play=mock_play) self.assertEqual(str(r), 'foo_tasks') self.assertEqual(len(r._task_blocks), 1) @@ -62,8 +65,11 @@ def test_load_role_with_handlers(self): """, }) + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + i = RoleInclude.load('foo_handlers', loader=fake_loader) - r = Role.load(i) + r = Role.load(i, play=mock_play) self.assertEqual(len(r._handler_blocks), 1) assert isinstance(r._handler_blocks[0], Block) @@ -79,8 +85,11 @@ def test_load_role_with_vars(self): """, }) + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + i = RoleInclude.load('foo_vars', loader=fake_loader) - r = Role.load(i) + r = Role.load(i, play=mock_play) self.assertEqual(r._default_vars, dict(foo='bar')) self.assertEqual(r._role_vars, dict(foo='bam')) @@ -122,8 +131,11 @@ def test_load_role_with_metadata(self): """, }) + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + i = RoleInclude.load('foo_metadata', loader=fake_loader) - r = Role.load(i) + r = Role.load(i, play=mock_play) role_deps = r.get_direct_dependencies() @@ -141,13 +153,13 @@ def test_load_role_with_metadata(self): self.assertEqual(all_deps[2].get_name(), 'bar_metadata') i = RoleInclude.load('bad1_metadata', loader=fake_loader) - self.assertRaises(AnsibleParserError, Role.load, i) + self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play) i = RoleInclude.load('bad2_metadata', loader=fake_loader) - self.assertRaises(AnsibleParserError, Role.load, i) + self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play) i = RoleInclude.load('recursive1_metadata', loader=fake_loader) - self.assertRaises(AnsibleError, Role.load, i) + self.assertRaises(AnsibleError, Role.load, i, play=mock_play) def test_load_role_complex(self): @@ -160,8 +172,11 @@ def test_load_role_complex(self): """, }) + mock_play = MagicMock() + mock_play.ROLE_CACHE = {} + i = RoleInclude.load(dict(role='foo_complex'), loader=fake_loader) - r = Role.load(i) + r = Role.load(i, play=mock_play) self.assertEqual(r.get_name(), "foo_complex") From b0e6baf8c3cbc10154a476ad6d69369b27f051d7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 03:19:48 -0400 Subject: [PATCH 1164/3617] Fix bug where options may not have the force_handlers value from the cli --- lib/ansible/executor/connection_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 1a94360a7edd10..46ce129e45b15f 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -239,7 +239,7 @@ def set_options(self, options): # self.no_log = boolean(options.no_log) if options.check: self.check_mode = boolean(options.check) - if options.force_handlers: + if hasattr(options, 'force_handlers') and options.force_handlers: self.force_handlers = boolean(options.force_handlers) # get the tag info from options, converting a comma-separated list From cf2a66ef3083fa3f6f2deac1b75e7fc3f07682df Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 03:22:37 -0400 Subject: [PATCH 1165/3617] Add ansible_version magic variable Fixes #11545 --- lib/ansible/vars/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 40589b9db05090..591066e0785bf3 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -30,6 +30,7 @@ from sha import sha as sha1 from ansible import constants as C +from ansible.cli import CLI from ansible.errors import * from ansible.parsing import DataLoader from ansible.plugins.cache import FactCache @@ -244,6 +245,8 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): # the 'omit' value alows params to be left out if the variable they are based on is undefined all_vars['omit'] = self._omit_token + all_vars['ansible_version'] = CLI.version_info(gitinfo=False) + # make vars self referential, so people can do things like 'vars[var_name]' copied_vars = all_vars.copy() if 'hostvars' in copied_vars: From 1163e38d39e583fe13fb171b9e1494f162ab3604 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 03:33:11 -0400 Subject: [PATCH 1166/3617] Fix unit tests for new magic variable addition 'ansible_version' --- test/units/vars/test_variable_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py index e2db28e40e5062..9d500d04d8feb1 100644 --- a/test/units/vars/test_variable_manager.py +++ b/test/units/vars/test_variable_manager.py @@ -43,6 +43,8 @@ def test_basic_manager(self): del vars['omit'] if 'vars' in vars: del vars['vars'] + if 'ansible_version' in vars: + del vars['ansible_version'] self.assertEqual(vars, dict(playbook_dir='.')) From aaf59319e4ab035d9b25ba35e811eaaed3acceb2 Mon Sep 17 00:00:00 2001 From: Marc Tamsky Date: Thu, 9 Jul 2015 23:33:31 -1000 Subject: [PATCH 1167/3617] document jsonfile and provide example config --- docsite/rst/playbooks_variables.rst | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index b0e2e223cdc9ae..ba341398fef202 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -494,7 +494,11 @@ not be necessary to "hit" all servers to reference variables and information abo With fact caching enabled, it is possible for machine in one group to reference variables about machines in the other group, despite the fact that they have not been communicated with in the current execution of /usr/bin/ansible-playbook. -To configure fact caching, enable it in ansible.cfg as follows:: +To benefit from cached facts, you will want to change the 'gathering' setting to 'smart' or 'explicit' or set 'gather_facts' to False in most plays. + +Currently, Ansible ships with two persistent cache plugins: redis and jsonfile. + +To configure fact caching using redis, enable it in ansible.cfg as follows:: [defaults] gathering = smart @@ -502,9 +506,6 @@ To configure fact caching, enable it in ansible.cfg as follows:: fact_caching_timeout = 86400 # seconds -You might also want to change the 'gathering' setting to 'smart' or 'explicit' or set gather_facts to False in most plays. - -At the time of writing, Redis is the only supported fact caching engine. To get redis up and running, perform the equivalent OS commands:: yum install redis @@ -515,6 +516,18 @@ Note that the Python redis library should be installed from pip, the version pac In current embodiments, this feature is in beta-level state and the Redis plugin does not support port or password configuration, this is expected to change in the near future. +To configure fact caching using jsonfile, enable it in ansible.cfg as follows:: + + [defaults] + gathering = smart + fact_caching = jsonfile + fact_caching_location = /path/to/cachedir + fact_caching_timeout = 86400 + # seconds + +`fact_caching_location` is a local filesystem path to a writeable +directory (ansible will attempt to create the directory if one does not exist). + .. _registered_variables: Registered Variables From f9d817e636f1840cacc8cf4ac5a306cbeb402eae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 19:23:01 -0400 Subject: [PATCH 1168/3617] now looks at correct verbosity and removes the need to set a copy of it in _verbosity --- lib/ansible/plugins/callback/default.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 071cb8e48adeea..9bdb756aa19cc3 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -34,7 +34,7 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'stdout' def v2_on_any(self, *args, **kwargs): - pass + self.on_any(args, kwargs) def v2_runner_on_failed(self, result, ignore_errors=False): if 'exception' in result._result: @@ -67,7 +67,7 @@ def v2_runner_on_ok(self, result): msg = "ok: [%s]" % result._host.get_name() color = 'green' - if (self._display._verbosity > 0 or 'verbose_always' in result._result) and result._task.action not in ('setup', 'include'): + if (self._display.verbosity > 0 or 'verbose_always' in result._result) and result._task.action not in ('setup', 'include'): indent = None if 'verbose_always' in result._result: indent = 4 @@ -77,7 +77,7 @@ def v2_runner_on_ok(self, result): def v2_runner_on_skipped(self, result): msg = "skipping: [%s]" % result._host.get_name() - if self._display._verbosity > 0 or 'verbose_always' in result._result: + if self._display.verbosity > 0 or 'verbose_always' in result._result: indent = None if 'verbose_always' in result._result: indent = 4 From a918a1bd1652b727c46b3238d0cb8d8220e2c433 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 19:36:30 -0400 Subject: [PATCH 1169/3617] now calls correct v2_on_any callback method --- lib/ansible/executor/task_queue_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 2504a179fc0c27..41e28c3baef781 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -296,7 +296,7 @@ def send_callback(self, method_name, *args, **kwargs): continue methods = [ getattr(callback_plugin, method_name, None), - getattr(callback_plugin, 'on_any', None) + getattr(callback_plugin, 'v2_on_any', None) ] for method in methods: if method is not None: From ba0e5323d6feca04b721ae164e69b68bc1e97b92 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 19:38:39 -0400 Subject: [PATCH 1170/3617] removed connection info to _verbosity, just needed callbacks to call correct display.verbosity added v2 methods and made them call v1 when possible by tranforming the data --- lib/ansible/plugins/callback/__init__.py | 91 +++++++++++++++++++++++- 1 file changed, 88 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index c03f6981d9c16e..e430c9b5db71c7 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -36,9 +36,7 @@ def __init__(self, display): self._display = display def set_connection_info(self, conn_info): - # FIXME: this is a temporary hack, as the connection info object - # should be created early and passed down through objects - self._display._verbosity = conn_info.verbosity + pass def on_any(self, *args, **kwargs): pass @@ -100,3 +98,90 @@ def playbook_on_play_start(self, name): def playbook_on_stats(self, stats): pass + ####### V2 METHODS, by default they call v1 counterparts if possible ###### + def v2_on_any(self, *args, **kwargs): + self.on_any(args, kwargs) + + def v2_runner_on_failed(self, result, ignore_errors=False): + host = result._host.get_name() + self.runner_on_failed(host, result._result, ignore_errors) + + def v2_runner_on_ok(self, result): + host = result._host.get_name() + self.runner_on_ok(host, result._result) + + def v2_runner_on_skipped(self, result): + host = result._host.get_name() + #FIXME, get item to pass through + item = None + self.runner_on_skipped(host, result._result, item) + + def v2_runner_on_unreachable(self, result): + host = result._host.get_name() + self.runner_on_unreachable(host, result._result) + + def v2_runner_on_no_hosts(self, task): + self.runner_on_no_hosts() + + def v2_runner_on_async_poll(self, result): + host = result._host.get_name() + jid = result._result.get('ansible_job_id') + #FIXME, get real clock + clock = 0 + self.runner_on_async_poll(host, result._result, jid, clock) + + def v2_runner_on_async_ok(self, result): + host = result._host.get_name() + jid = result._result.get('ansible_job_id') + self.runner_on_async_ok(host, result._result, jid) + + def v2_runner_on_async_failed(self, result): + host = result._host.get_name() + jid = result._result.get('ansible_job_id') + self.runner_on_async_failed(host, result._result, jid) + + def v2_runner_on_file_diff(self, result, diff): + pass #no v1 correspondance + + def v2_playbook_on_start(self): + self.playbook_on_start() + + def v2_playbook_on_notify(self, result, handler): + host = result._host.get_name() + self.playbook_on_notify(host, handler) + + def v2_playbook_on_no_hosts_matched(self): + self.playbook_on_no_hosts_matched() + + def v2_playbook_on_no_hosts_remaining(self): + self.playbook_on_no_hosts_remaining() + + def v2_playbook_on_task_start(self, task, is_conditional): + self.playbook_on_task_start(task, is_conditional) + + def v2_playbook_on_cleanup_task_start(self, task): + pass #no v1 correspondance + + def v2_playbook_on_handler_task_start(self, task): + pass #no v1 correspondance + + def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): + self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default) + + def v2_playbook_on_setup(self): + self.playbook_on_setup() + + def v2_playbook_on_import_for_host(self, result, imported_file): + host = result._host.get_name() + self.playbook_on_import_for_host(host, imported_file) + + def v2_playbook_on_not_import_for_host(self, result, missing_file): + host = result._host.get_name() + self.playbook_on_not_import_for_host(host, missing_file) + + def v2_playbook_on_play_start(self, play): + self.playbook_on_play_start(play.name) + + def v2_playbook_on_stats(self, stats): + self.playbook_on_stats(stats) + From 834b7a2857bef5a92f27c2283a847eefcfafb62a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 19:39:21 -0400 Subject: [PATCH 1171/3617] ported context_demo to v2 callbacks --- .../ansible/plugins/callback}/context_demo.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) rename {plugins/callbacks => lib/ansible/plugins/callback}/context_demo.py (65%) diff --git a/plugins/callbacks/context_demo.py b/lib/ansible/plugins/callback/context_demo.py similarity index 65% rename from plugins/callbacks/context_demo.py rename to lib/ansible/plugins/callback/context_demo.py index 5c3015d85f6836..f204ecb3bedd5d 100644 --- a/plugins/callbacks/context_demo.py +++ b/lib/ansible/plugins/callback/context_demo.py @@ -15,17 +15,23 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import time -import json +from ansible.plugins.callback import CallbackBase -class CallbackModule(object): +class CallbackModule(CallbackBase): """ This is a very trivial example of how any callback function can get at play and task objects. play will be 'None' for runner invocations, and task will be None for 'setup' invocations. """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' - def on_any(self, *args, **kwargs): - play = getattr(self, 'play', None) - task = getattr(self, 'task', None) - print "play = %s, task = %s, args = %s, kwargs = %s" % (play,task,args,kwargs) + def v2_on_any(self, *args, **kwargs): + i = 0 + self._display.display(" --- ARGS ") + for a in args: + self._display.display(' %s: %s' % (i, a)) + i += 1 + + self._display.display(" --- KWARGS ") + for k in kwargs: + self._display.display(' %s: %s' % (k, kwargs[k])) From b47d7babe5b1ebd20093731a14fa654b5cc5469f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 19:55:23 -0400 Subject: [PATCH 1172/3617] removed warning i was using for debug --- lib/ansible/plugins/callback/timer.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/callback/timer.py b/lib/ansible/plugins/callback/timer.py index 4b28a19af0938b..d7f2b42a96445b 100644 --- a/lib/ansible/plugins/callback/timer.py +++ b/lib/ansible/plugins/callback/timer.py @@ -12,13 +12,12 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'aggregate' start_time = datetime.now() - + def __init__(self, display): - super(CallbackModule, self).__init__(display) + super(CallbackModule, self).__init__(display) start_time = datetime.now() - self._display.warning("Timerv2 plugin is active from included callbacks.") def days_hours_minutes_seconds(self, timedelta): minutes = (timedelta.seconds//60)%60 @@ -27,7 +26,7 @@ def days_hours_minutes_seconds(self, timedelta): def playbook_on_stats(self, stats): self.v2_playbook_on_stats(stats) - + def v2_playbook_on_stats(self, stats): end_time = datetime.now() timedelta = end_time - self.start_time From e92e15b5f656d01aa1753faaa86d4240a4ddcff3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 19:55:49 -0400 Subject: [PATCH 1173/3617] moved unused functions to base object --- lib/ansible/plugins/callback/default.py | 40 ------------------------- 1 file changed, 40 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 9bdb756aa19cc3..2c4a8cea88b53c 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -33,9 +33,6 @@ class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' - def v2_on_any(self, *args, **kwargs): - self.on_any(args, kwargs) - def v2_runner_on_failed(self, result, ignore_errors=False): if 'exception' in result._result: if self._display.verbosity < 3: @@ -88,27 +85,6 @@ def v2_runner_on_skipped(self, result): def v2_runner_on_unreachable(self, result): self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red') - def v2_runner_on_no_hosts(self, task): - pass - - def v2_runner_on_async_poll(self, result): - pass - - def v2_runner_on_async_ok(self, result): - pass - - def v2_runner_on_async_failed(self, result): - pass - - def v2_runner_on_file_diff(self, result, diff): - pass - - def v2_playbook_on_start(self): - pass - - def v2_playbook_on_notify(self, result, handler): - pass - def v2_playbook_on_no_hosts_matched(self): self._display.display("skipping: no hosts matched", color='cyan') @@ -124,18 +100,6 @@ def v2_playbook_on_cleanup_task_start(self, task): def v2_playbook_on_handler_task_start(self, task): self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) - #def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - # pass - - def v2_playbook_on_setup(self): - pass - - def v2_playbook_on_import_for_host(self, result, imported_file): - pass - - def v2_playbook_on_not_import_for_host(self, result, missing_file): - pass - def v2_playbook_on_play_start(self, play): name = play.get_name().strip() if not name: @@ -144,7 +108,3 @@ def v2_playbook_on_play_start(self, play): msg = "PLAY [%s]" % name self._display.banner(name) - - def v2_playbook_on_stats(self, stats): - pass - From 50d54b1be7759eb360cd2bc8dc9484b1f85ff73d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 20:04:54 -0400 Subject: [PATCH 1174/3617] ported hipchat callback to v2 (needs testing) --- .../ansible/plugins/callback}/hipchat.py | 73 +++---------------- 1 file changed, 11 insertions(+), 62 deletions(-) rename {plugins/callbacks => lib/ansible/plugins/callback}/hipchat.py (77%) diff --git a/plugins/callbacks/hipchat.py b/lib/ansible/plugins/callback/hipchat.py similarity index 77% rename from plugins/callbacks/hipchat.py rename to lib/ansible/plugins/callback/hipchat.py index 45c2e2c81970d3..a2709e3d5b9cb7 100644 --- a/plugins/callbacks/hipchat.py +++ b/lib/ansible/plugins/callback/hipchat.py @@ -19,16 +19,15 @@ import urllib import urllib2 -from ansible import utils - try: import prettytable HAS_PRETTYTABLE = True except ImportError: HAS_PRETTYTABLE = False +from ansible.plugins.callback import CallbackBase -class CallbackModule(object): +class CallbackModule(CallbackBase): """This is an example ansible callback plugin that sends status updates to a HipChat channel during playbook execution. @@ -42,11 +41,16 @@ class CallbackModule(object): prettytable """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + + def __init__(self, display): + + super(CallbackModule, self).__init__(display) - def __init__(self): if not HAS_PRETTYTABLE: self.disabled = True - utils.warning('The `prettytable` python module is not installed. ' + self.display.warning('The `prettytable` python module is not installed. ' 'Disabling the HipChat callback plugin.') self.msg_uri = 'https://api.hipchat.com/v1/rooms/message' @@ -57,7 +61,7 @@ def __init__(self): if self.token is None: self.disabled = True - utils.warning('HipChat token could not be loaded. The HipChat ' + self.display.warning('HipChat token could not be loaded. The HipChat ' 'token can be provided using the `HIPCHAT_TOKEN` ' 'environment variable.') @@ -80,63 +84,8 @@ def send_msg(self, msg, msg_format='text', color='yellow', notify=False): response = urllib2.urlopen(url, urllib.urlencode(params)) return response.read() except: - utils.warning('Could not submit message to hipchat') - - def on_any(self, *args, **kwargs): - pass - - def runner_on_failed(self, host, res, ignore_errors=False): - pass - - def runner_on_ok(self, host, res): - pass - - def runner_on_skipped(self, host, item=None): - pass - - def runner_on_unreachable(self, host, res): - pass - - def runner_on_no_hosts(self): - pass - - def runner_on_async_poll(self, host, res, jid, clock): - pass - - def runner_on_async_ok(self, host, res, jid): - pass - - def runner_on_async_failed(self, host, res, jid): - pass - - def playbook_on_start(self): - pass - - def playbook_on_notify(self, host, handler): - pass - - def playbook_on_no_hosts_matched(self): - pass - - def playbook_on_no_hosts_remaining(self): - pass - - def playbook_on_task_start(self, name, is_conditional): - pass - - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, - encrypt=None, confirm=False, salt_size=None, - salt=None, default=None): - pass - - def playbook_on_setup(self): - pass - - def playbook_on_import_for_host(self, host, imported_file): - pass + self.display.warning('Could not submit message to hipchat') - def playbook_on_not_import_for_host(self, host, missing_file): - pass def playbook_on_play_start(self, name): """Display Playbook and play start messages""" From d0c6d2ff1c9f1bcf7c6a1fc717daaeffa5f38b48 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 20:37:17 -0400 Subject: [PATCH 1175/3617] poreted log_plays, syslog_json and osx_say callbacks to v2 renamed plugins to contrib (they are not really plugins) rewrote README.md to reflect new usage added new dir to setup.py so it gets copied with installation, in views of making using inventory scripts easier in teh future --- contrib/README.md | 17 +++ {plugins => contrib}/inventory/abiquo.ini | 0 {plugins => contrib}/inventory/abiquo.py | 0 .../inventory/apache-libcloud.py | 0 {plugins => contrib}/inventory/cloudstack.ini | 0 {plugins => contrib}/inventory/cloudstack.py | 0 {plugins => contrib}/inventory/cobbler.ini | 0 {plugins => contrib}/inventory/cobbler.py | 0 {plugins => contrib}/inventory/collins.ini | 0 {plugins => contrib}/inventory/collins.py | 0 {plugins => contrib}/inventory/consul.ini | 0 {plugins => contrib}/inventory/consul_io.py | 0 .../inventory/digital_ocean.ini | 0 .../inventory/digital_ocean.py | 0 {plugins => contrib}/inventory/docker.py | 0 {plugins => contrib}/inventory/docker.yml | 0 {plugins => contrib}/inventory/ec2.ini | 0 {plugins => contrib}/inventory/ec2.py | 0 {plugins => contrib}/inventory/fleet.py | 0 {plugins => contrib}/inventory/freeipa.py | 0 {plugins => contrib}/inventory/gce.ini | 0 {plugins => contrib}/inventory/gce.py | 0 {plugins => contrib}/inventory/jail.py | 0 {plugins => contrib}/inventory/landscape.py | 0 {plugins => contrib}/inventory/libcloud.ini | 0 {plugins => contrib}/inventory/libvirt_lxc.py | 0 {plugins => contrib}/inventory/linode.ini | 0 {plugins => contrib}/inventory/linode.py | 0 {plugins => contrib}/inventory/nova.ini | 0 {plugins => contrib}/inventory/nova.py | 0 {plugins => contrib}/inventory/openshift.py | 0 {plugins => contrib}/inventory/openstack.py | 0 {plugins => contrib}/inventory/openstack.yml | 0 {plugins => contrib}/inventory/ovirt.ini | 0 {plugins => contrib}/inventory/ovirt.py | 0 {plugins => contrib}/inventory/rax.ini | 0 {plugins => contrib}/inventory/rax.py | 0 {plugins => contrib}/inventory/serf.py | 0 {plugins => contrib}/inventory/softlayer.py | 0 {plugins => contrib}/inventory/spacewalk.py | 0 {plugins => contrib}/inventory/ssh_config.py | 0 {plugins => contrib}/inventory/vagrant.py | 0 {plugins => contrib}/inventory/vbox.py | 0 {plugins => contrib}/inventory/vmware.ini | 0 {plugins => contrib}/inventory/vmware.py | 0 .../inventory/windows_azure.ini | 0 .../inventory/windows_azure.py | 0 {plugins => contrib}/inventory/zabbix.ini | 0 {plugins => contrib}/inventory/zabbix.py | 0 {plugins => contrib}/inventory/zone.py | 0 lib/ansible/plugins/callback/log_plays.py | 85 +++++++++++++ .../ansible/plugins/callback}/osx_say.py | 70 ++++------- .../ansible/plugins/callback}/syslog_json.py | 49 ++------ plugins/README.md | 35 ------ plugins/callbacks/log_plays.py | 116 ------------------ setup.py | 2 +- 56 files changed, 138 insertions(+), 236 deletions(-) create mode 100644 contrib/README.md rename {plugins => contrib}/inventory/abiquo.ini (100%) rename {plugins => contrib}/inventory/abiquo.py (100%) rename {plugins => contrib}/inventory/apache-libcloud.py (100%) rename {plugins => contrib}/inventory/cloudstack.ini (100%) rename {plugins => contrib}/inventory/cloudstack.py (100%) rename {plugins => contrib}/inventory/cobbler.ini (100%) rename {plugins => contrib}/inventory/cobbler.py (100%) rename {plugins => contrib}/inventory/collins.ini (100%) rename {plugins => contrib}/inventory/collins.py (100%) rename {plugins => contrib}/inventory/consul.ini (100%) rename {plugins => contrib}/inventory/consul_io.py (100%) rename {plugins => contrib}/inventory/digital_ocean.ini (100%) rename {plugins => contrib}/inventory/digital_ocean.py (100%) rename {plugins => contrib}/inventory/docker.py (100%) rename {plugins => contrib}/inventory/docker.yml (100%) rename {plugins => contrib}/inventory/ec2.ini (100%) rename {plugins => contrib}/inventory/ec2.py (100%) rename {plugins => contrib}/inventory/fleet.py (100%) rename {plugins => contrib}/inventory/freeipa.py (100%) rename {plugins => contrib}/inventory/gce.ini (100%) rename {plugins => contrib}/inventory/gce.py (100%) rename {plugins => contrib}/inventory/jail.py (100%) rename {plugins => contrib}/inventory/landscape.py (100%) rename {plugins => contrib}/inventory/libcloud.ini (100%) rename {plugins => contrib}/inventory/libvirt_lxc.py (100%) rename {plugins => contrib}/inventory/linode.ini (100%) rename {plugins => contrib}/inventory/linode.py (100%) rename {plugins => contrib}/inventory/nova.ini (100%) rename {plugins => contrib}/inventory/nova.py (100%) rename {plugins => contrib}/inventory/openshift.py (100%) rename {plugins => contrib}/inventory/openstack.py (100%) rename {plugins => contrib}/inventory/openstack.yml (100%) rename {plugins => contrib}/inventory/ovirt.ini (100%) rename {plugins => contrib}/inventory/ovirt.py (100%) rename {plugins => contrib}/inventory/rax.ini (100%) rename {plugins => contrib}/inventory/rax.py (100%) rename {plugins => contrib}/inventory/serf.py (100%) rename {plugins => contrib}/inventory/softlayer.py (100%) rename {plugins => contrib}/inventory/spacewalk.py (100%) rename {plugins => contrib}/inventory/ssh_config.py (100%) rename {plugins => contrib}/inventory/vagrant.py (100%) rename {plugins => contrib}/inventory/vbox.py (100%) rename {plugins => contrib}/inventory/vmware.ini (100%) rename {plugins => contrib}/inventory/vmware.py (100%) rename {plugins => contrib}/inventory/windows_azure.ini (100%) rename {plugins => contrib}/inventory/windows_azure.py (100%) rename {plugins => contrib}/inventory/zabbix.ini (100%) rename {plugins => contrib}/inventory/zabbix.py (100%) rename {plugins => contrib}/inventory/zone.py (100%) create mode 100644 lib/ansible/plugins/callback/log_plays.py rename {plugins/callbacks => lib/ansible/plugins/callback}/osx_say.py (54%) rename {plugins/callbacks => lib/ansible/plugins/callback}/syslog_json.py (72%) delete mode 100644 plugins/README.md delete mode 100644 plugins/callbacks/log_plays.py diff --git a/contrib/README.md b/contrib/README.md new file mode 100644 index 00000000000000..dab0da4ba72b31 --- /dev/null +++ b/contrib/README.md @@ -0,0 +1,17 @@ +inventory +========= + +Inventory scripts allow you to store your hosts, groups, and variables in any way +you like. Examples include discovering inventory from EC2 or pulling it from +Cobbler. These could also be used to interface with LDAP or database. + +chmod +x an inventory plugin and either name it /etc/ansible/hosts or use ansible +with -i to designate the path to the script. You might also need to copy a configuration +file with the same name and/or set environment variables, the scripts or configuration +files have more details. + +contributions welcome +===================== + +Send in pull requests to add plugins of your own. The sky is the limit! + diff --git a/plugins/inventory/abiquo.ini b/contrib/inventory/abiquo.ini similarity index 100% rename from plugins/inventory/abiquo.ini rename to contrib/inventory/abiquo.ini diff --git a/plugins/inventory/abiquo.py b/contrib/inventory/abiquo.py similarity index 100% rename from plugins/inventory/abiquo.py rename to contrib/inventory/abiquo.py diff --git a/plugins/inventory/apache-libcloud.py b/contrib/inventory/apache-libcloud.py similarity index 100% rename from plugins/inventory/apache-libcloud.py rename to contrib/inventory/apache-libcloud.py diff --git a/plugins/inventory/cloudstack.ini b/contrib/inventory/cloudstack.ini similarity index 100% rename from plugins/inventory/cloudstack.ini rename to contrib/inventory/cloudstack.ini diff --git a/plugins/inventory/cloudstack.py b/contrib/inventory/cloudstack.py similarity index 100% rename from plugins/inventory/cloudstack.py rename to contrib/inventory/cloudstack.py diff --git a/plugins/inventory/cobbler.ini b/contrib/inventory/cobbler.ini similarity index 100% rename from plugins/inventory/cobbler.ini rename to contrib/inventory/cobbler.ini diff --git a/plugins/inventory/cobbler.py b/contrib/inventory/cobbler.py similarity index 100% rename from plugins/inventory/cobbler.py rename to contrib/inventory/cobbler.py diff --git a/plugins/inventory/collins.ini b/contrib/inventory/collins.ini similarity index 100% rename from plugins/inventory/collins.ini rename to contrib/inventory/collins.ini diff --git a/plugins/inventory/collins.py b/contrib/inventory/collins.py similarity index 100% rename from plugins/inventory/collins.py rename to contrib/inventory/collins.py diff --git a/plugins/inventory/consul.ini b/contrib/inventory/consul.ini similarity index 100% rename from plugins/inventory/consul.ini rename to contrib/inventory/consul.ini diff --git a/plugins/inventory/consul_io.py b/contrib/inventory/consul_io.py similarity index 100% rename from plugins/inventory/consul_io.py rename to contrib/inventory/consul_io.py diff --git a/plugins/inventory/digital_ocean.ini b/contrib/inventory/digital_ocean.ini similarity index 100% rename from plugins/inventory/digital_ocean.ini rename to contrib/inventory/digital_ocean.ini diff --git a/plugins/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py similarity index 100% rename from plugins/inventory/digital_ocean.py rename to contrib/inventory/digital_ocean.py diff --git a/plugins/inventory/docker.py b/contrib/inventory/docker.py similarity index 100% rename from plugins/inventory/docker.py rename to contrib/inventory/docker.py diff --git a/plugins/inventory/docker.yml b/contrib/inventory/docker.yml similarity index 100% rename from plugins/inventory/docker.yml rename to contrib/inventory/docker.yml diff --git a/plugins/inventory/ec2.ini b/contrib/inventory/ec2.ini similarity index 100% rename from plugins/inventory/ec2.ini rename to contrib/inventory/ec2.ini diff --git a/plugins/inventory/ec2.py b/contrib/inventory/ec2.py similarity index 100% rename from plugins/inventory/ec2.py rename to contrib/inventory/ec2.py diff --git a/plugins/inventory/fleet.py b/contrib/inventory/fleet.py similarity index 100% rename from plugins/inventory/fleet.py rename to contrib/inventory/fleet.py diff --git a/plugins/inventory/freeipa.py b/contrib/inventory/freeipa.py similarity index 100% rename from plugins/inventory/freeipa.py rename to contrib/inventory/freeipa.py diff --git a/plugins/inventory/gce.ini b/contrib/inventory/gce.ini similarity index 100% rename from plugins/inventory/gce.ini rename to contrib/inventory/gce.ini diff --git a/plugins/inventory/gce.py b/contrib/inventory/gce.py similarity index 100% rename from plugins/inventory/gce.py rename to contrib/inventory/gce.py diff --git a/plugins/inventory/jail.py b/contrib/inventory/jail.py similarity index 100% rename from plugins/inventory/jail.py rename to contrib/inventory/jail.py diff --git a/plugins/inventory/landscape.py b/contrib/inventory/landscape.py similarity index 100% rename from plugins/inventory/landscape.py rename to contrib/inventory/landscape.py diff --git a/plugins/inventory/libcloud.ini b/contrib/inventory/libcloud.ini similarity index 100% rename from plugins/inventory/libcloud.ini rename to contrib/inventory/libcloud.ini diff --git a/plugins/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py similarity index 100% rename from plugins/inventory/libvirt_lxc.py rename to contrib/inventory/libvirt_lxc.py diff --git a/plugins/inventory/linode.ini b/contrib/inventory/linode.ini similarity index 100% rename from plugins/inventory/linode.ini rename to contrib/inventory/linode.ini diff --git a/plugins/inventory/linode.py b/contrib/inventory/linode.py similarity index 100% rename from plugins/inventory/linode.py rename to contrib/inventory/linode.py diff --git a/plugins/inventory/nova.ini b/contrib/inventory/nova.ini similarity index 100% rename from plugins/inventory/nova.ini rename to contrib/inventory/nova.ini diff --git a/plugins/inventory/nova.py b/contrib/inventory/nova.py similarity index 100% rename from plugins/inventory/nova.py rename to contrib/inventory/nova.py diff --git a/plugins/inventory/openshift.py b/contrib/inventory/openshift.py similarity index 100% rename from plugins/inventory/openshift.py rename to contrib/inventory/openshift.py diff --git a/plugins/inventory/openstack.py b/contrib/inventory/openstack.py similarity index 100% rename from plugins/inventory/openstack.py rename to contrib/inventory/openstack.py diff --git a/plugins/inventory/openstack.yml b/contrib/inventory/openstack.yml similarity index 100% rename from plugins/inventory/openstack.yml rename to contrib/inventory/openstack.yml diff --git a/plugins/inventory/ovirt.ini b/contrib/inventory/ovirt.ini similarity index 100% rename from plugins/inventory/ovirt.ini rename to contrib/inventory/ovirt.ini diff --git a/plugins/inventory/ovirt.py b/contrib/inventory/ovirt.py similarity index 100% rename from plugins/inventory/ovirt.py rename to contrib/inventory/ovirt.py diff --git a/plugins/inventory/rax.ini b/contrib/inventory/rax.ini similarity index 100% rename from plugins/inventory/rax.ini rename to contrib/inventory/rax.ini diff --git a/plugins/inventory/rax.py b/contrib/inventory/rax.py similarity index 100% rename from plugins/inventory/rax.py rename to contrib/inventory/rax.py diff --git a/plugins/inventory/serf.py b/contrib/inventory/serf.py similarity index 100% rename from plugins/inventory/serf.py rename to contrib/inventory/serf.py diff --git a/plugins/inventory/softlayer.py b/contrib/inventory/softlayer.py similarity index 100% rename from plugins/inventory/softlayer.py rename to contrib/inventory/softlayer.py diff --git a/plugins/inventory/spacewalk.py b/contrib/inventory/spacewalk.py similarity index 100% rename from plugins/inventory/spacewalk.py rename to contrib/inventory/spacewalk.py diff --git a/plugins/inventory/ssh_config.py b/contrib/inventory/ssh_config.py similarity index 100% rename from plugins/inventory/ssh_config.py rename to contrib/inventory/ssh_config.py diff --git a/plugins/inventory/vagrant.py b/contrib/inventory/vagrant.py similarity index 100% rename from plugins/inventory/vagrant.py rename to contrib/inventory/vagrant.py diff --git a/plugins/inventory/vbox.py b/contrib/inventory/vbox.py similarity index 100% rename from plugins/inventory/vbox.py rename to contrib/inventory/vbox.py diff --git a/plugins/inventory/vmware.ini b/contrib/inventory/vmware.ini similarity index 100% rename from plugins/inventory/vmware.ini rename to contrib/inventory/vmware.ini diff --git a/plugins/inventory/vmware.py b/contrib/inventory/vmware.py similarity index 100% rename from plugins/inventory/vmware.py rename to contrib/inventory/vmware.py diff --git a/plugins/inventory/windows_azure.ini b/contrib/inventory/windows_azure.ini similarity index 100% rename from plugins/inventory/windows_azure.ini rename to contrib/inventory/windows_azure.ini diff --git a/plugins/inventory/windows_azure.py b/contrib/inventory/windows_azure.py similarity index 100% rename from plugins/inventory/windows_azure.py rename to contrib/inventory/windows_azure.py diff --git a/plugins/inventory/zabbix.ini b/contrib/inventory/zabbix.ini similarity index 100% rename from plugins/inventory/zabbix.ini rename to contrib/inventory/zabbix.ini diff --git a/plugins/inventory/zabbix.py b/contrib/inventory/zabbix.py similarity index 100% rename from plugins/inventory/zabbix.py rename to contrib/inventory/zabbix.py diff --git a/plugins/inventory/zone.py b/contrib/inventory/zone.py similarity index 100% rename from plugins/inventory/zone.py rename to contrib/inventory/zone.py diff --git a/lib/ansible/plugins/callback/log_plays.py b/lib/ansible/plugins/callback/log_plays.py new file mode 100644 index 00000000000000..65036e6763bdf3 --- /dev/null +++ b/lib/ansible/plugins/callback/log_plays.py @@ -0,0 +1,85 @@ +# (C) 2012, Michael DeHaan, + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import time +import json + +from ansible.plugins.callback import CallbackBase + +# NOTE: in Ansible 1.2 or later general logging is available without +# this plugin, just set ANSIBLE_LOG_PATH as an environment variable +# or log_path in the DEFAULTS section of your ansible configuration +# file. This callback is an example of per hosts logging for those +# that want it. + + +class CallbackModule(CallbackBase): + """ + logs playbook results, per host, in /var/log/ansible/hosts + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + + TIME_FORMAT="%b %d %Y %H:%M:%S" + MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n" + + def __init__(self, display): + + super(CallbackModule, self).__init__(display) + + if not os.path.exists("/var/log/ansible/hosts"): + os.makedirs("/var/log/ansible/hosts") + + def log(self, host, category, data): + if type(data) == dict: + if 'verbose_override' in data: + # avoid logging extraneous data from facts + data = 'omitted' + else: + data = data.copy() + invocation = data.pop('invocation', None) + data = json.dumps(data) + if invocation is not None: + data = json.dumps(invocation) + " => %s " % data + + path = os.path.join("/var/log/ansible/hosts", host) + now = time.strftime(self.TIME_FORMAT, time.localtime()) + fd = open(path, "a") + fd.write(self.MSG_FORMAT % dict(now=now, category=category, data=data)) + fd.close() + + def runner_on_failed(self, host, res, ignore_errors=False): + self.log(host, 'FAILED', res) + + def runner_on_ok(self, host, res): + self.log(host, 'OK', res) + + def runner_on_skipped(self, host, item=None): + self.log(host, 'SKIPPED', '...') + + def runner_on_unreachable(self, host, res): + self.log(host, 'UNREACHABLE', res) + + def runner_on_async_failed(self, host, res, jid): + self.log(host, 'ASYNC_FAILED', res) + + def playbook_on_import_for_host(self, host, imported_file): + self.log(host, 'IMPORTED', imported_file) + + def playbook_on_not_import_for_host(self, host, missing_file): + self.log(host, 'NOTIMPORTED', missing_file) diff --git a/plugins/callbacks/osx_say.py b/lib/ansible/plugins/callback/osx_say.py similarity index 54% rename from plugins/callbacks/osx_say.py rename to lib/ansible/plugins/callback/osx_say.py index 174a03300f14b3..bb785b3872fde2 100644 --- a/plugins/callbacks/osx_say.py +++ b/lib/ansible/plugins/callback/osx_say.py @@ -19,87 +19,69 @@ import subprocess import os +from ansible.plugins.callback import CallbackBase + FAILED_VOICE="Zarvox" REGULAR_VOICE="Trinoids" HAPPY_VOICE="Cellos" LASER_VOICE="Princess" SAY_CMD="/usr/bin/say" -def say(msg, voice): - subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)]) - -class CallbackModule(object): +class CallbackModule(CallbackBase): """ makes Ansible much more exciting on OS X. """ - def __init__(self): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'notification' + + def __init__(self, display): + + super(CallbackModule, self).__init__(display) + # plugin disable itself if say is not present # ansible will not call any callback if disabled is set to True if not os.path.exists(SAY_CMD): self.disabled = True - print "%s does not exist, plugin %s disabled" % \ - (SAY_CMD, os.path.basename(__file__)) + self._display.warning("%s does not exist, plugin %s disabled" % (SAY_CMD, os.path.basename(__file__)) ) - def on_any(self, *args, **kwargs): - pass + def say(self, msg, voice): + subprocess.call([SAY_CMD, msg, "--voice=%s" % (voice)]) def runner_on_failed(self, host, res, ignore_errors=False): - say("Failure on host %s" % host, FAILED_VOICE) + self.say("Failure on host %s" % host, FAILED_VOICE) def runner_on_ok(self, host, res): - say("pew", LASER_VOICE) + self.say("pew", LASER_VOICE) def runner_on_skipped(self, host, item=None): - say("pew", LASER_VOICE) + self.say("pew", LASER_VOICE) def runner_on_unreachable(self, host, res): - say("Failure on host %s" % host, FAILED_VOICE) - - def runner_on_no_hosts(self): - pass - - def runner_on_async_poll(self, host, res, jid, clock): - pass + self.say("Failure on host %s" % host, FAILED_VOICE) def runner_on_async_ok(self, host, res, jid): - say("pew", LASER_VOICE) + self.say("pew", LASER_VOICE) def runner_on_async_failed(self, host, res, jid): - say("Failure on host %s" % host, FAILED_VOICE) + self.say("Failure on host %s" % host, FAILED_VOICE) def playbook_on_start(self): - say("Running Playbook", REGULAR_VOICE) + self.say("Running Playbook", REGULAR_VOICE) def playbook_on_notify(self, host, handler): - say("pew", LASER_VOICE) - - def playbook_on_no_hosts_matched(self): - pass - - def playbook_on_no_hosts_remaining(self): - pass + self.say("pew", LASER_VOICE) def playbook_on_task_start(self, name, is_conditional): if not is_conditional: - say("Starting task: %s" % name, REGULAR_VOICE) + self.say("Starting task: %s" % name, REGULAR_VOICE) else: - say("Notifying task: %s" % name, REGULAR_VOICE) - - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - pass + self.say("Notifying task: %s" % name, REGULAR_VOICE) def playbook_on_setup(self): - say("Gathering facts", REGULAR_VOICE) - - def playbook_on_import_for_host(self, host, imported_file): - pass - - def playbook_on_not_import_for_host(self, host, missing_file): - pass + self.say("Gathering facts", REGULAR_VOICE) def playbook_on_play_start(self, name): - say("Starting play: %s" % name, HAPPY_VOICE) + self.say("Starting play: %s" % name, HAPPY_VOICE) def playbook_on_stats(self, stats): - say("Play complete", HAPPY_VOICE) - + self.say("Play complete", HAPPY_VOICE) diff --git a/plugins/callbacks/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py similarity index 72% rename from plugins/callbacks/syslog_json.py rename to lib/ansible/plugins/callback/syslog_json.py index 2e339e96aeb1c3..978a4d719af54b 100644 --- a/plugins/callbacks/syslog_json.py +++ b/lib/ansible/plugins/callback/syslog_json.py @@ -6,7 +6,9 @@ import socket -class CallbackModule(object): +from ansible.plugins.callback import CallbackBase + +class CallbackModule(CallbackBase): """ logs ansible-playbook and ansible runs to a syslog server in json format make sure you have in ansible.cfg: @@ -17,8 +19,13 @@ class CallbackModule(object): SYSLOG_SERVER (optional): defaults to localhost SYSLOG_PORT (optional): defaults to 514 """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + + def __init__(self, display): + + super(CallbackModule, self).__init__(display) - def __init__(self): self.logger = logging.getLogger('ansible logger') self.logger.setLevel(logging.DEBUG) @@ -30,8 +37,6 @@ def __init__(self): self.logger.addHandler(self.handler) self.hostname = socket.gethostname() - def on_any(self, *args, **kwargs): - pass def runner_on_failed(self, host, res, ignore_errors=False): self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) @@ -45,47 +50,11 @@ def runner_on_skipped(self, host, item=None): def runner_on_unreachable(self, host, res): self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) - def runner_on_no_hosts(self): - pass - - def runner_on_async_poll(self, host, res): - pass - - def runner_on_async_ok(self, host, res): - pass - def runner_on_async_failed(self, host, res): self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) - def playbook_on_start(self): - pass - - def playbook_on_notify(self, host, handler): - pass - - def playbook_on_no_hosts_matched(self): - pass - - def playbook_on_no_hosts_remaining(self): - pass - - def playbook_on_task_start(self, name, is_conditional): - pass - - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - pass - - def playbook_on_setup(self): - pass - def playbook_on_import_for_host(self, host, imported_file): self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def playbook_on_not_import_for_host(self, host, missing_file): self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) - - def playbook_on_play_start(self, name): - pass - - def playbook_on_stats(self, stats): - pass diff --git a/plugins/README.md b/plugins/README.md deleted file mode 100644 index 8d705372a51ca7..00000000000000 --- a/plugins/README.md +++ /dev/null @@ -1,35 +0,0 @@ -ansible-plugins -=============== - -You can extend ansible with optional callback and connection plugins. - -callbacks -========= - -Callbacks can be used to add logging or monitoring capability, or just make -interesting sound effects. - -Drop callback plugins in your ansible/lib/callback_plugins/ directory. - -connections -=========== - -Connection plugins allow ansible to talk over different protocols. - -Drop connection plugins in your ansible/lib/runner/connection_plugins/ directory. - -inventory -========= - -Inventory plugins allow you to store your hosts, groups, and variables in any way -you like. Examples include discovering inventory from EC2 or pulling it from -Cobbler. These could also be used to interface with LDAP or database. - -chmod +x an inventory plugin and either name it /etc/ansible/hosts or use ansible -with -i to designate the path to the plugin. - -contributions welcome -===================== - -Send in pull requests to add plugins of your own. The sky is the limit! - diff --git a/plugins/callbacks/log_plays.py b/plugins/callbacks/log_plays.py deleted file mode 100644 index dbe16b312c1cb7..00000000000000 --- a/plugins/callbacks/log_plays.py +++ /dev/null @@ -1,116 +0,0 @@ -# (C) 2012, Michael DeHaan, - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import time -import json - -# NOTE: in Ansible 1.2 or later general logging is available without -# this plugin, just set ANSIBLE_LOG_PATH as an environment variable -# or log_path in the DEFAULTS section of your ansible configuration -# file. This callback is an example of per hosts logging for those -# that want it. - -TIME_FORMAT="%b %d %Y %H:%M:%S" -MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n" - -if not os.path.exists("/var/log/ansible/hosts"): - os.makedirs("/var/log/ansible/hosts") - -def log(host, category, data): - if type(data) == dict: - if 'verbose_override' in data: - # avoid logging extraneous data from facts - data = 'omitted' - else: - data = data.copy() - invocation = data.pop('invocation', None) - data = json.dumps(data) - if invocation is not None: - data = json.dumps(invocation) + " => %s " % data - - path = os.path.join("/var/log/ansible/hosts", host) - now = time.strftime(TIME_FORMAT, time.localtime()) - fd = open(path, "a") - fd.write(MSG_FORMAT % dict(now=now, category=category, data=data)) - fd.close() - -class CallbackModule(object): - """ - logs playbook results, per host, in /var/log/ansible/hosts - """ - - def on_any(self, *args, **kwargs): - pass - - def runner_on_failed(self, host, res, ignore_errors=False): - log(host, 'FAILED', res) - - def runner_on_ok(self, host, res): - log(host, 'OK', res) - - def runner_on_skipped(self, host, item=None): - log(host, 'SKIPPED', '...') - - def runner_on_unreachable(self, host, res): - log(host, 'UNREACHABLE', res) - - def runner_on_no_hosts(self): - pass - - def runner_on_async_poll(self, host, res, jid, clock): - pass - - def runner_on_async_ok(self, host, res, jid): - pass - - def runner_on_async_failed(self, host, res, jid): - log(host, 'ASYNC_FAILED', res) - - def playbook_on_start(self): - pass - - def playbook_on_notify(self, host, handler): - pass - - def playbook_on_no_hosts_matched(self): - pass - - def playbook_on_no_hosts_remaining(self): - pass - - def playbook_on_task_start(self, name, is_conditional): - pass - - def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - pass - - def playbook_on_setup(self): - pass - - def playbook_on_import_for_host(self, host, imported_file): - log(host, 'IMPORTED', imported_file) - - def playbook_on_not_import_for_host(self, host, missing_file): - log(host, 'NOTIMPORTED', missing_file) - - def playbook_on_play_start(self, name): - pass - - def playbook_on_stats(self, stats): - pass - diff --git a/setup.py b/setup.py index 1f73836cbd3c54..01ee94cfda038e 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ package_dir={ '': 'lib' }, packages=find_packages('lib'), package_data={ - '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'], + '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1', 'contrib/README.md', 'contrib/inventory/*'], }, scripts=[ 'bin/ansible', From 0bbf5927be81183dfee128e293f269253266e402 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 20:39:16 -0400 Subject: [PATCH 1176/3617] added executabel bit to nova and rax inventory plugins --- contrib/inventory/nova.py | 0 contrib/inventory/rax.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 contrib/inventory/nova.py mode change 100644 => 100755 contrib/inventory/rax.py diff --git a/contrib/inventory/nova.py b/contrib/inventory/nova.py old mode 100644 new mode 100755 diff --git a/contrib/inventory/rax.py b/contrib/inventory/rax.py old mode 100644 new mode 100755 From aaad33ccb6200aeb9211199e0120ff2d1d31bf4a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 22:31:52 -0400 Subject: [PATCH 1177/3617] fixed a couple of bugs --- lib/ansible/plugins/callback/__init__.py | 2 +- lib/ansible/plugins/callback/syslog_json.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index e430c9b5db71c7..776ad15717bc1d 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -114,7 +114,7 @@ def v2_runner_on_skipped(self, result): host = result._host.get_name() #FIXME, get item to pass through item = None - self.runner_on_skipped(host, result._result, item) + self.runner_on_skipped(host, item) def v2_runner_on_unreachable(self, result): host = result._host.get_name() diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py index 978a4d719af54b..3be64ee154c9e4 100644 --- a/lib/ansible/plugins/callback/syslog_json.py +++ b/lib/ansible/plugins/callback/syslog_json.py @@ -45,7 +45,7 @@ def runner_on_ok(self, host, res): self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) def runner_on_skipped(self, host, item=None): - self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) + self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped')) def runner_on_unreachable(self, host, res): self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) From 42357f7f2a8000ce9848e26c0eb8fdc4bd2127fd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 8 Jul 2015 23:55:52 -0400 Subject: [PATCH 1178/3617] moved contrib into manifest from setup.py --- MANIFEST.in | 2 ++ setup.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index f4e727d8c4dcab..44aa7c07c4a748 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -14,5 +14,7 @@ recursive-include plugins * include Makefile include VERSION include MANIFEST.in +include contrib/README.md +include contrib/inventory * prune lib/ansible/modules/core/.git prune lib/ansible/modules/extras/.git diff --git a/setup.py b/setup.py index 01ee94cfda038e..1f73836cbd3c54 100644 --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ package_dir={ '': 'lib' }, packages=find_packages('lib'), package_data={ - '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1', 'contrib/README.md', 'contrib/inventory/*'], + '': ['module_utils/*.ps1', 'modules/core/windows/*.ps1', 'modules/extras/windows/*.ps1'], }, scripts=[ 'bin/ansible', From ebeb0b03485bd9f175fefc8492c27ce8870a16e6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 10 Jul 2015 10:30:52 -0400 Subject: [PATCH 1179/3617] removed plugins dir that was removed --- MANIFEST.in | 1 - 1 file changed, 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 44aa7c07c4a748..8af0aa9bc171b3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -10,7 +10,6 @@ include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 recursive-include lib/ansible/modules * recursive-include docs * -recursive-include plugins * include Makefile include VERSION include MANIFEST.in From 5430169b779aed19a75f3b6e83e5112ee49bdcd9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 10:56:14 -0400 Subject: [PATCH 1180/3617] Cleaning up includes test to match 2.0 behavior * Perhaps the only precedence change, in 2.0+ variables from set_fact will not override params to an include file, as params are expected to be more specific than host-based variables. * Uncommented long-form include example. --- .../roles/test_includes/tasks/main.yml | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/test/integration/roles/test_includes/tasks/main.yml b/test/integration/roles/test_includes/tasks/main.yml index fb76841fdabc62..b4808412bef8bf 100644 --- a/test/integration/roles/test_includes/tasks/main.yml +++ b/test/integration/roles/test_includes/tasks/main.yml @@ -26,12 +26,16 @@ - "cb == '2'" - "cc == '3'" -# Fact takes precedence over include param as fact is host-specific - set_fact: a: 101 b: 102 c: 103 +# Params specified via k=v values are strings, while those +# that come from variables will keep the type they were previously. +# Prior to v2.0, facts too priority over include params, however +# this is no longer the case. + - include: included_task1.yml a={{a}} b={{b}} c=103 - name: verify variable include params @@ -39,7 +43,7 @@ that: - "ca == 101" - "cb == 102" - - "cc == 103" + - "cc == '103'" # Test that strings are not turned into numbers - set_fact: @@ -57,26 +61,23 @@ - "cc == '103'" # now try long form includes -# -# FIXME: not sure if folks were using this, or if vars were top level, but seems like -# it should be a thing. -# -#- include: included_task1.yml -# vars: -# a: 201 -# b: 202 -# c: 203 -# -#- debug: var=a -#- debug: var=b -#- debug: var=c -# -#- name: verify long-form include params -# assert: -# that: -# - "ca == 201" -# - "cb == 202" -# - "cc == 203" + +- include: included_task1.yml + vars: + a: 201 + b: 202 + c: 203 + +- debug: var=a +- debug: var=b +- debug: var=c + +- name: verify long-form include params + assert: + that: + - "ca == 201" + - "cb == 202" + - "cc == 203" - name: test handlers with includes shell: echo 1 From 7c73e9c12ea2ffd4a301b2dfa9f8dbb027393638 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 10 Jul 2015 09:11:03 -0700 Subject: [PATCH 1181/3617] Mock 1.1.0 lost python2.6 compatibility --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index fe65457f372f2d..6cc4f9fd8e4a28 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,7 +3,7 @@ # nose -mock +mock >= 1.0.1, < 1.1 passlib coverage coveralls From 657495d13fd01b67cee9490f0f687653abad33f2 Mon Sep 17 00:00:00 2001 From: "Carlos E. Garcia" Date: Fri, 10 Jul 2015 12:42:59 -0400 Subject: [PATCH 1182/3617] minor spelling changes --- contrib/inventory/ec2.ini | 2 +- contrib/inventory/ec2.py | 4 ++-- docsite/rst/guide_gce.rst | 2 +- examples/ansible.cfg | 4 ++-- lib/ansible/constants.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/contrib/inventory/ec2.ini b/contrib/inventory/ec2.ini index 4cd78305c727b3..a1d9b1d805d17a 100644 --- a/contrib/inventory/ec2.ini +++ b/contrib/inventory/ec2.ini @@ -36,7 +36,7 @@ destination_variable = public_dns_name # be run from within EC2. The key of an EC2 tag may optionally be used; however # the boto instance variables hold precedence in the event of a collision. # WARNING: - instances that are in the private vpc, _without_ public ip address -# will not be listed in the inventory untill You set: +# will not be listed in the inventory until You set: # vpc_destination_variable = 'private_ip_address' vpc_destination_variable = ip_address diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index 5d8b558aa075dc..f2d9b51c903624 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -795,7 +795,7 @@ def add_elasticache_cluster(self, cluster, region): # Inventory: Group by security group if self.group_by_security_group and not is_redis: - # Check for the existance of the 'SecurityGroups' key and also if + # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: @@ -887,7 +887,7 @@ def add_elasticache_node(self, node, cluster, region): # Inventory: Group by security group if self.group_by_security_group: - # Check for the existance of the 'SecurityGroups' key and also if + # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index fbcab9ba2a4938..fb317265d45e76 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -79,7 +79,7 @@ Create a file ``secrets.py`` looking like following, and put it in some folder w GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem') GCE_KEYWORD_PARAMS = {'project': 'project_id'} -Ensure to enter the email adress from the created services account and not the one from your main account. +Ensure to enter the email address from the created services account and not the one from your main account. Now the modules can be used as above, but the account information can be omitted. diff --git a/examples/ansible.cfg b/examples/ansible.cfg index f6b7208b2bcd20..2481f01f0dd43e 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -87,7 +87,7 @@ timeout = 10 # templates indicates to users editing templates files will be replaced. # replacing {file}, {host} and {uid} and strftime codes with proper values. #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} -# This short version is better used in tempaltes as it won't flag the file as changed every run. +# This short version is better used in templates as it won't flag the file as changed every run. ansible_managed = Ansible managed: {file} on {host} # by default, ansible-playbook will display "Skipping [host]" if it determines a task @@ -236,5 +236,5 @@ accelerate_daemon_timeout = 30 [selinux] # file systems that require special treatment when dealing with security context # the default behaviour that copies the existing context or uses the user default -# needs to be changed to use the file system dependant context. +# needs to be changed to use the file system dependent context. #special_context_filesystems=nfs,vboxsf,fuse,ramfs diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 2c2930d6824906..43ae782e195709 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -109,7 +109,7 @@ def shell_expand_path(path): # sections in config file DEFAULTS='defaults' -# generaly configurable things +# generally configurable things DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts'))) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None) From 9c5a6d7b5a57911062d705c7998978c3efdf41d6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 10 Jul 2015 12:59:52 -0400 Subject: [PATCH 1183/3617] fixed all references to old plugins/inventory to point at contrib/inventory --- contrib/inventory/digital_ocean.py | 2 +- contrib/inventory/gce.py | 2 +- contrib/inventory/ovirt.py | 2 +- docsite/rst/guide_gce.rst | 6 +++--- docsite/rst/intro_dynamic_inventory.rst | 14 +++++++------- test/integration/Makefile | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/contrib/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py index 1323a384ba9a6e..4f312e7c246ada 100755 --- a/contrib/inventory/digital_ocean.py +++ b/contrib/inventory/digital_ocean.py @@ -111,7 +111,7 @@ # (c) 2013, Evan Wies # # Inspired by the EC2 inventory plugin: -# https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # # This file is part of Ansible, # diff --git a/contrib/inventory/gce.py b/contrib/inventory/gce.py index 5fe3db93f8e35e..59947fb1665cc3 100755 --- a/contrib/inventory/gce.py +++ b/contrib/inventory/gce.py @@ -66,7 +66,7 @@ $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" Use the GCE inventory script to print out instance specific information - $ plugins/inventory/gce.py --host my_instance + $ contrib/inventory/gce.py --host my_instance Author: Eric Johnson Version: 0.0.1 diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py index 4cb4b09eaefa2d..dc022c5dfd25bc 100755 --- a/contrib/inventory/ovirt.py +++ b/contrib/inventory/ovirt.py @@ -56,7 +56,7 @@ $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" Use the ovirt inventory script to print out instance specific information - $ plugins/inventory/ovirt.py --host my_instance + $ contrib/inventory/ovirt.py --host my_instance Author: Josha Inglis based on the gce.py by Eric Johnson Version: 0.0.1 diff --git a/docsite/rst/guide_gce.rst b/docsite/rst/guide_gce.rst index fb317265d45e76..c689632818ea05 100644 --- a/docsite/rst/guide_gce.rst +++ b/docsite/rst/guide_gce.rst @@ -88,9 +88,9 @@ GCE Dynamic Inventory The best way to interact with your hosts is to use the gce inventory plugin, which dynamically queries GCE and tells Ansible what nodes can be managed. -Note that when using the inventory script ``gce.py``, you also need to populate the ``gce.ini`` file that you can find in the plugins/inventory directory of the ansible checkout. +Note that when using the inventory script ``gce.py``, you also need to populate the ``gce.ini`` file that you can find in the contrib/inventory directory of the ansible checkout. -To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugins/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script. +To use the GCE dynamic inventory script, copy ``gce.py`` from ``contrib/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script. Let's see if inventory is working: @@ -111,7 +111,7 @@ Now let's see if we can use the inventory script to talk to Google. "x.x.x.x" ], -As with all dynamic inventory plugins in Ansible, you can configure the inventory path in ansible.cfg. The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. This can allow for cloud inventory to be used alongside local inventory (such as a physical datacenter) or machines running in different providers. +As with all dynamic inventory scripts in Ansible, you can configure the inventory path in ansible.cfg. The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. This can allow for cloud inventory to be used alongside local inventory (such as a physical datacenter) or machines running in different providers. Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead of an individual file will cause ansible to evaluate each file in that directory for inventory. diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 00023a4ccae7c8..5b634d86cd9a83 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -12,7 +12,7 @@ in a different software system. Ansible provides a basic text-based system as d Frequent examples include pulling inventory from a cloud provider, LDAP, `Cobbler `_, or a piece of expensive enterprisey CMDB software. -Ansible easily supports all of these options via an external inventory system. The plugins directory contains some of these already -- including options for EC2/Eucalyptus, Rackspace Cloud, and OpenStack, examples of some of which will be detailed below. +Ansible easily supports all of these options via an external inventory system. The contrib/inventory directory contains some of these already -- including options for EC2/Eucalyptus, Rackspace Cloud, and OpenStack, examples of some of which will be detailed below. :doc:`tower` also provides a database to store inventory results that is both web and REST Accessible. Tower syncs with all Ansible dynamic inventory sources you might be using, and also includes a graphical inventory editor. By having a database record of all of your hosts, it's easy to correlate past event history and see which ones have had failures on their last playbook runs. @@ -30,7 +30,7 @@ While primarily used to kickoff OS installations and manage DHCP and DNS, Cobble layer that allows it to represent data for multiple configuration management systems (even at the same time), and has been referred to as a 'lightweight CMDB' by some admins. -To tie Ansible's inventory to Cobbler (optional), copy `this script `_ to /etc/ansible and `chmod +x` the file. cobblerd will now need +To tie Ansible's inventory to Cobbler (optional), copy `this script `_ to /etc/ansible and `chmod +x` the file. cobblerd will now need to be running when you are using Ansible and you'll need to use Ansible's ``-i`` command line option (e.g. ``-i /etc/ansible/cobbler.py``). This particular script will communicate with Cobbler using Cobbler's XMLRPC API. @@ -80,14 +80,14 @@ So in other words, you can use those variables in arguments/actions as well. Example: AWS EC2 External Inventory Script `````````````````````````````````````````` -If you use Amazon Web Services EC2, maintaining an inventory file might not be the best approach, because hosts may come and go over time, be managed by external applications, or you might even be using AWS autoscaling. For this reason, you can use the `EC2 external inventory `_ script. +If you use Amazon Web Services EC2, maintaining an inventory file might not be the best approach, because hosts may come and go over time, be managed by external applications, or you might even be using AWS autoscaling. For this reason, you can use the `EC2 external inventory `_ script. You can use this script in one of two ways. The easiest is to use Ansible's ``-i`` command line option and specify the path to the script after marking it executable:: ansible -i ec2.py -u ubuntu us-east-1d -m ping -The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally. +The second option is to copy the script to `/etc/ansible/hosts` and `chmod +x` it. You will also need to copy the `ec2.ini `_ file to `/etc/ansible/ec2.ini`. Then you can run ansible as you would normally. To successfully make an API call to AWS, you will need to configure Boto (the Python interface to AWS). There are a `variety of methods `_ available, but the simplest is just to export two environment variables:: @@ -96,7 +96,7 @@ To successfully make an API call to AWS, you will need to configure Boto (the Py You can test the script by itself to make sure your config is correct:: - cd plugins/inventory + cd contrib/inventory ./ec2.py --list After a few moments, you should see your entire EC2 inventory across all regions in JSON. @@ -185,7 +185,7 @@ Both ``ec2_security_group_ids`` and ``ec2_security_group_names`` are comma-separ To see the complete list of variables available for an instance, run the script by itself:: - cd plugins/inventory + cd contrib/inventory ./ec2.py --host ec2-12-12-12-12.compute-1.amazonaws.com Note that the AWS inventory script will cache results to avoid repeated API calls, and this cache setting is configurable in ec2.ini. To @@ -210,7 +210,7 @@ In addition to Cobbler and EC2, inventory scripts are also available for:: Vagrant (not to be confused with the provisioner in vagrant, which is preferred) Zabbix -Sections on how to use these in more detail will be added over time, but by looking at the "plugins/" directory of the Ansible checkout +Sections on how to use these in more detail will be added over time, but by looking at the "contrib/inventory" directory of the Ansible checkout it should be very obvious how to use them. The process for the AWS inventory script is the same. If you develop an interesting inventory script that might be general purpose, please submit a pull request -- we'd likely be glad diff --git a/test/integration/Makefile b/test/integration/Makefile index 69416b1658c078..c197bd415302c6 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -164,7 +164,7 @@ $(CONSUL_RUNNING): consul: ifeq ($(CONSUL_RUNNING), True) ansible-playbook -i $(INVENTORY) consul.yml ; \ - ansible-playbook -i ../../plugins/inventory/consul_io.py consul_inventory.yml + ansible-playbook -i ../../contrib/inventory/consul_io.py consul_inventory.yml else @echo "Consul agent is not running locally. To run a cluster locally see http://github.com/sgargan/consul-vagrant" endif From 4608897c27c2f86c4582c733e15f93e7d56aab07 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 13:55:34 -0400 Subject: [PATCH 1184/3617] Submodule update --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 8257053756766a..9acf10face033d 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 8257053756766ad52b43e22e413343b0fedf7e69 +Subproject commit 9acf10face033dda6d5b1f570fb35cbd3deabac5 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 639902ff2081aa..8a89f4afe45286 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 639902ff2081aa7f90e051878a3abf3f1a67eac4 +Subproject commit 8a89f4afe452868eccdb8eab841cb501b7bf0548 From 1aa2191fd55a627a1ca867228498d5b1d24ae629 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 10 Jul 2015 15:54:18 -0400 Subject: [PATCH 1185/3617] Update tests for win_get_url module to test force parameter and invalid URLs/paths. --- .../roles/test_win_get_url/defaults/main.yml | 7 ++ .../roles/test_win_get_url/tasks/main.yml | 76 +++++++++++++++++-- 2 files changed, 76 insertions(+), 7 deletions(-) create mode 100644 test/integration/roles/test_win_get_url/defaults/main.yml diff --git a/test/integration/roles/test_win_get_url/defaults/main.yml b/test/integration/roles/test_win_get_url/defaults/main.yml new file mode 100644 index 00000000000000..6e507ecf31c202 --- /dev/null +++ b/test/integration/roles/test_win_get_url/defaults/main.yml @@ -0,0 +1,7 @@ +--- + +test_win_get_url_link: http://docs.ansible.com +test_win_get_url_path: "C:\\Users\\{{ansible_ssh_user}}\\docs_index.html" +test_win_get_url_invalid_link: http://docs.ansible.com/skynet_module.html +test_win_get_url_invalid_path: "Q:\\Filez\\Cyberdyne.html" +test_win_get_url_dir_path: "C:\\Users\\{{ansible_ssh_user}}" diff --git a/test/integration/roles/test_win_get_url/tasks/main.yml b/test/integration/roles/test_win_get_url/tasks/main.yml index 26fb334c95a5ab..b0705eabd5649f 100644 --- a/test/integration/roles/test_win_get_url/tasks/main.yml +++ b/test/integration/roles/test_win_get_url/tasks/main.yml @@ -17,19 +17,81 @@ # along with Ansible. If not, see . - name: remove test file if it exists - raw: PowerShell -Command {Remove-Item "C:\Users\Administrator\win_get_url.jpg" -Force} + raw: > + PowerShell -Command Remove-Item "{{test_win_get_url_path}}" -Force + ignore_errors: true - name: test win_get_url module - win_get_url: url=http://placehold.it/10x10.jpg dest='C:\Users\Administrator\win_get_url.jpg' + win_get_url: + url: "{{test_win_get_url_link}}" + dest: "{{test_win_get_url_path}}" register: win_get_url_result -- name: check win_get_url result +- name: check that url was downloaded assert: that: - "not win_get_url_result|failed" - "win_get_url_result|changed" + - "win_get_url_result.win_get_url.url" + - "win_get_url_result.win_get_url.dest" -# FIXME: -# - Test invalid url -# - Test invalid dest, when dest is directory -# - Test idempotence when downloading same url/dest (not yet implemented) +- name: test win_get_url module again (force should be yes by default) + win_get_url: + url: "{{test_win_get_url_link}}" + dest: "{{test_win_get_url_path}}" + register: win_get_url_result_again + +- name: check that url was downloaded again + assert: + that: + - "not win_get_url_result_again|failed" + - "win_get_url_result_again|changed" + +- name: test win_get_url module again with force=no + win_get_url: + url: "{{test_win_get_url_link}}" + dest: "{{test_win_get_url_path}}" + force: no + register: win_get_url_result_noforce + +- name: check that url was not downloaded again + assert: + that: + - "not win_get_url_result_noforce|failed" + - "not win_get_url_result_noforce|changed" + +- name: test win_get_url module with url that returns a 404 + win_get_url: + url: "{{test_win_get_url_invalid_link}}" + dest: "{{test_win_get_url_path}}" + register: win_get_url_result_invalid_link + ignore_errors: true + +- name: check that the download failed for an invalid url + assert: + that: + - "win_get_url_result_invalid_link|failed" + +- name: test win_get_url module with an invalid path + win_get_url: + url: "{{test_win_get_url_link}}" + dest: "{{test_win_get_url_invalid_path}}" + register: win_get_url_result_invalid_path + ignore_errors: true + +- name: check that the download failed for an invalid path + assert: + that: + - "win_get_url_result_invalid_path|failed" + +- name: test win_get_url module with a valid path that is a directory + win_get_url: + url: "{{test_win_get_url_link}}" + dest: "{{test_win_get_url_dir_path}}" + register: win_get_url_result_dir_path + ignore_errors: true + +- name: check that the download failed if dest is a directory + assert: + that: + - "win_get_url_result_dir_path|failed" From 705018a417e830d6985f10cef108f02456b25871 Mon Sep 17 00:00:00 2001 From: teh 4r9h Date: Sat, 11 Jul 2015 02:53:32 +0200 Subject: [PATCH 1186/3617] Misspell in mail.py callback. Looks like little misspell. --- lib/ansible/plugins/callback/mail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/mail.py b/lib/ansible/plugins/callback/mail.py index 46b2409130742a..c82acdf2fcb714 100644 --- a/lib/ansible/plugins/callback/mail.py +++ b/lib/ansible/plugins/callback/mail.py @@ -81,7 +81,7 @@ def v2_runner_on_failed(self, res, ignore_errors=False): body += 'A complete dump of the error:\n\n' + str(res._result['msg']) mail(sender=sender, subject=subject, body=body) - def v2_runner_on_unreachable(self, ressult): + def v2_runner_on_unreachable(self, result): host = result._host.get_name() res = result._result From 4b9a79d42bf280a742b7f759ac38a0b326ebd941 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 10 Jul 2015 22:11:45 -0400 Subject: [PATCH 1187/3617] removed trailing newline fix from #10973 by @retr0h --- lib/ansible/galaxy/data/metadata_template.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2 index 328e13a814c480..c6b6fd9dbdf694 100644 --- a/lib/ansible/galaxy/data/metadata_template.j2 +++ b/lib/ansible/galaxy/data/metadata_template.j2 @@ -40,6 +40,6 @@ dependencies: [] # List your role dependencies here, one per line. # Be sure to remove the '[]' above if you add dependencies # to this list. - {% for dependency in dependencies %} + {%- for dependency in dependencies %} #- {{ dependency }} - {% endfor %} + {%- endfor %} From 7a1bce1b5de396a4bdb16c584f177859090ad175 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 10 Jul 2015 23:48:12 -0400 Subject: [PATCH 1188/3617] added verbose option to show callback loaded info also made mail module print nicer without all those 'u' --- lib/ansible/plugins/callback/__init__.py | 7 ++++++- lib/ansible/plugins/callback/context_demo.py | 1 + lib/ansible/plugins/callback/default.py | 1 + lib/ansible/plugins/callback/hipchat.py | 3 ++- lib/ansible/plugins/callback/log_plays.py | 1 + lib/ansible/plugins/callback/mail.py | 10 +++++++--- lib/ansible/plugins/callback/minimal.py | 1 + lib/ansible/plugins/callback/osx_say.py | 1 + lib/ansible/plugins/callback/syslog_json.py | 1 + lib/ansible/plugins/callback/timer.py | 1 + 10 files changed, 22 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 776ad15717bc1d..17a6606fb870b4 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -16,7 +16,7 @@ # along with Ansible. If not, see . # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) +from __future__ import (absolute_import, division) __metaclass__ = type __all__ = ["CallbackBase"] @@ -34,6 +34,11 @@ class CallbackBase: def __init__(self, display): self._display = display + if self._display.verbosity >= 4: + name = getattr(self, 'CALLBACK_NAME', 'with no defined name') + ctype = getattr(self, 'CALLBACK_TYPE', 'unknwon') + version = getattr(self, 'CALLBACK_VERSION', 'unknwon') + self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) def set_connection_info(self, conn_info): pass diff --git a/lib/ansible/plugins/callback/context_demo.py b/lib/ansible/plugins/callback/context_demo.py index f204ecb3bedd5d..ad22ead07df258 100644 --- a/lib/ansible/plugins/callback/context_demo.py +++ b/lib/ansible/plugins/callback/context_demo.py @@ -24,6 +24,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'aggregate' + CALLBACK_TYPE = 'context_demo' def v2_on_any(self, *args, **kwargs): i = 0 diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 2c4a8cea88b53c..00ba9c72c86a89 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -32,6 +32,7 @@ class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'default' def v2_runner_on_failed(self, result, ignore_errors=False): if 'exception' in result._result: diff --git a/lib/ansible/plugins/callback/hipchat.py b/lib/ansible/plugins/callback/hipchat.py index a2709e3d5b9cb7..b0d1bfb67e6a3d 100644 --- a/lib/ansible/plugins/callback/hipchat.py +++ b/lib/ansible/plugins/callback/hipchat.py @@ -42,7 +42,8 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' + CALLBACK_VERSION = 2.0 + CALLBACK_NAME = 'hipchat' def __init__(self, display): diff --git a/lib/ansible/plugins/callback/log_plays.py b/lib/ansible/plugins/callback/log_plays.py index 65036e6763bdf3..7cdedcb00e3853 100644 --- a/lib/ansible/plugins/callback/log_plays.py +++ b/lib/ansible/plugins/callback/log_plays.py @@ -34,6 +34,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'log_plays' TIME_FORMAT="%b %d %Y %H:%M:%S" MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n" diff --git a/lib/ansible/plugins/callback/mail.py b/lib/ansible/plugins/callback/mail.py index c82acdf2fcb714..af86e61df9cedd 100644 --- a/lib/ansible/plugins/callback/mail.py +++ b/lib/ansible/plugins/callback/mail.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright 2012 Dag Wieers # # This file is part of Ansible @@ -17,6 +18,7 @@ import os import smtplib +import json from ansible.plugins.callback import CallbackBase def mail(subject='Ansible error mail', sender=None, to=None, cc=None, bcc=None, body=None, smtphost=None): @@ -58,6 +60,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'mail' def v2_runner_on_failed(self, res, ignore_errors=False): @@ -66,8 +69,9 @@ def v2_runner_on_failed(self, res, ignore_errors=False): if ignore_errors: return sender = '"Ansible: %s" ' % host - subject = 'Failed: %s' % (res._task.action) - body = 'The following task failed for host ' + host + ':\n\n%s\n\n' % (res._task.action) + attach = "%s: %s" % (res._result['invocation']['module_name'], json.dumps(res._result['invocation']['module_args'])) + subject = 'Failed: %s' % attach + body = 'The following task failed for host ' + host + ':\n\n%s\n\n' % attach if 'stdout' in res._result.keys() and res._result['stdout']: subject = res._result['stdout'].strip('\r\n').split('\n')[-1] @@ -78,7 +82,7 @@ def v2_runner_on_failed(self, res, ignore_errors=False): if 'msg' in res._result.keys() and res._result['msg']: subject = res._result['msg'].strip('\r\n').split('\n')[0] body += 'with the following message:\n\n' + res._result['msg'] + '\n\n' - body += 'A complete dump of the error:\n\n' + str(res._result['msg']) + body += 'A complete dump of the error:\n\n' + json.dumps(res._result, indent=4) mail(sender=sender, subject=subject, body=body) def v2_runner_on_unreachable(self, result): diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index d0c314e1b9018c..d5950fae011a9a 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -33,6 +33,7 @@ class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'minimal' def v2_on_any(self, *args, **kwargs): pass diff --git a/lib/ansible/plugins/callback/osx_say.py b/lib/ansible/plugins/callback/osx_say.py index bb785b3872fde2..36b053026e29ef 100644 --- a/lib/ansible/plugins/callback/osx_say.py +++ b/lib/ansible/plugins/callback/osx_say.py @@ -33,6 +33,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'notification' + CALLBACK_NAME = 'osx_say' def __init__(self, display): diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py index 3be64ee154c9e4..fe0281b780b45c 100644 --- a/lib/ansible/plugins/callback/syslog_json.py +++ b/lib/ansible/plugins/callback/syslog_json.py @@ -21,6 +21,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'syslog_json' def __init__(self, display): diff --git a/lib/ansible/plugins/callback/timer.py b/lib/ansible/plugins/callback/timer.py index d7f2b42a96445b..058cb4f4a4d787 100644 --- a/lib/ansible/plugins/callback/timer.py +++ b/lib/ansible/plugins/callback/timer.py @@ -10,6 +10,7 @@ class CallbackModule(CallbackBase): """ CALLBACK_VERSION = 2.0 CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'timer' start_time = datetime.now() From fdea00880bd67600ae0a8b9859628068c07b2a9e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 00:02:40 -0400 Subject: [PATCH 1189/3617] now default shows time taken when -vv or above --- lib/ansible/plugins/callback/default.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 00ba9c72c86a89..5292b74c007457 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -70,6 +70,8 @@ def v2_runner_on_ok(self, result): if 'verbose_always' in result._result: indent = 4 del result._result['verbose_always'] + if self._display.verbosity >= 2 and 'delta' in result._result: + msg += " [time: %s]" % (result._result['delta']) msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False) self._display.display(msg, color=color) From 1274ce565dbbd302aef3cbc8de84055b6d549558 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 00:47:59 -0400 Subject: [PATCH 1190/3617] added result sanitation to registered var and to callbacks removed time display as it only is provided by command module --- lib/ansible/constants.py | 1 + lib/ansible/executor/process/result.py | 4 +++- lib/ansible/plugins/callback/__init__.py | 15 +++++++++++++++ lib/ansible/plugins/callback/default.py | 12 ++++-------- lib/ansible/plugins/callback/mail.py | 2 +- lib/ansible/plugins/callback/minimal.py | 4 +--- lib/ansible/plugins/callback/syslog_json.py | 12 ++++++------ 7 files changed, 31 insertions(+), 19 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 43ae782e195709..5b7c901415d1ef 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -235,3 +235,4 @@ def shell_expand_path(path): DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 +RESULT_SANITIZE = frozenset(['invocation','warnings']) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 505457f7d20191..71d6746be0fe34 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -33,6 +33,7 @@ except ImportError: HAS_ATFORK=False +from ansible import constants as C from ansible.playbook.handler import Handler from ansible.playbook.task import Task @@ -107,7 +108,8 @@ def run(self): # if this task is registering a result, do it now if result._task.register: - self._send_result(('register_host_var', result._host, result._task.register, result._result)) + res = {k: result._result[k] for k in set(result._result.keys()).difference(C.RESULT_SANITIZE)} + self._send_result(('register_host_var', result._host, result._task.register, res)) # send callbacks, execute other options based on the result status # FIXME: this should all be cleaned up and probably moved to a sub-function. diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 17a6606fb870b4..a5a13c1cfff534 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -19,8 +19,13 @@ from __future__ import (absolute_import, division) __metaclass__ = type +import json + +from ansible import constants as C + __all__ = ["CallbackBase"] + class CallbackBase: ''' @@ -40,6 +45,16 @@ def __init__(self, display): version = getattr(self, 'CALLBACK_VERSION', 'unknwon') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) + def _dump_results(self, result, sanitize=True, indent=4, sort_keys=True): + if sanitize: + res = self._sanitize_result(result) + else: + res = results + return json.dumps(res, indent=indent, ensure_ascii=False, sort_keys=sort_keys) + + def _sanitize_result(self, result): + return {k: result[k] for k in set(result.keys()).difference(C.RESULT_SANITIZE)} + def set_connection_info(self, conn_info): pass diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 5292b74c007457..2bbc697f53c211 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -19,8 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import json - from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): @@ -48,7 +46,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): # finally, remove the exception from the result so it's not shown every time del result._result['exception'] - self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red') + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') if result._task.ignore_errors: self._display.display("...ignoring") @@ -70,9 +68,7 @@ def v2_runner_on_ok(self, result): if 'verbose_always' in result._result: indent = 4 del result._result['verbose_always'] - if self._display.verbosity >= 2 and 'delta' in result._result: - msg += " [time: %s]" % (result._result['delta']) - msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False) + msg += " => %s" % self._dump_results(result._result, indent=indent) self._display.display(msg, color=color) def v2_runner_on_skipped(self, result): @@ -82,11 +78,11 @@ def v2_runner_on_skipped(self, result): if 'verbose_always' in result._result: indent = 4 del result._result['verbose_always'] - msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False) + msg += " => %s" % self._dump_results(result._result, indent=indent) self._display.display(msg, color='cyan') def v2_runner_on_unreachable(self, result): - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red') + self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') def v2_playbook_on_no_hosts_matched(self): self._display.display("skipping: no hosts matched", color='cyan') diff --git a/lib/ansible/plugins/callback/mail.py b/lib/ansible/plugins/callback/mail.py index af86e61df9cedd..4828062df93305 100644 --- a/lib/ansible/plugins/callback/mail.py +++ b/lib/ansible/plugins/callback/mail.py @@ -82,7 +82,7 @@ def v2_runner_on_failed(self, res, ignore_errors=False): if 'msg' in res._result.keys() and res._result['msg']: subject = res._result['msg'].strip('\r\n').split('\n')[0] body += 'with the following message:\n\n' + res._result['msg'] + '\n\n' - body += 'A complete dump of the error:\n\n' + json.dumps(res._result, indent=4) + body += 'A complete dump of the error:\n\n' + self._dump_results(res._result) mail(sender=sender, subject=subject, body=body) def v2_runner_on_unreachable(self, result): diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index d5950fae011a9a..86e5694a15fefe 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -19,8 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import json - from ansible.plugins.callback import CallbackBase @@ -55,7 +53,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red') def v2_runner_on_ok(self, result): - self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), json.dumps(result._result, indent=4)), color='green') + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result)), color='green') def v2_runner_on_skipped(self, result): pass diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py index fe0281b780b45c..991a94dd31bb9e 100644 --- a/lib/ansible/plugins/callback/syslog_json.py +++ b/lib/ansible/plugins/callback/syslog_json.py @@ -40,22 +40,22 @@ def __init__(self, display): def runner_on_failed(self, host, res, ignore_errors=False): - self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def runner_on_ok(self, host, res): - self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) + self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def runner_on_skipped(self, host, item=None): self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname,host, 'skipped')) def runner_on_unreachable(self, host, res): - self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) + self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def runner_on_async_failed(self, host, res): - self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def playbook_on_import_for_host(self, host, imported_file): - self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) + self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) def playbook_on_not_import_for_host(self, host, missing_file): - self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: %s' % (self.hostname,host,json.dumps(res, sort_keys=True))) + self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: %s' % (self.hostname,host,self._dump_results(res))) From 698b2776019d523b0fc57ab6ff940d618e88f0bc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 11:33:28 -0400 Subject: [PATCH 1191/3617] changed github and galaxy to always be https fixes #9925 --- docsite/_themes/srtd/footer.html | 2 +- docsite/rst/community.rst | 14 +++++++------- docsite/rst/developing_modules.rst | 2 +- docsite/rst/galaxy.rst | 2 +- docsite/rst/guide_rax.rst | 2 +- docsite/rst/intro_windows.rst | 2 +- docsite/rst/playbooks_delegation.rst | 4 ++-- docsite/rst/playbooks_lookups.rst | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html index b6422f9a2dd331..b70cfde7ad80dd 100644 --- a/docsite/_themes/srtd/footer.html +++ b/docsite/_themes/srtd/footer.html @@ -20,6 +20,6 @@ {%- endif %}

-Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs. {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %} +Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs. {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %} diff --git a/docsite/rst/community.rst b/docsite/rst/community.rst index 561e214bd9db19..5cac69fe9a1c76 100644 --- a/docsite/rst/community.rst +++ b/docsite/rst/community.rst @@ -62,11 +62,11 @@ I'd Like To Report A Bug Ansible practices responsible disclosure - if this is a security related bug, email `security@ansible.com `_ instead of filing a ticket or posting to the Google Group and you will receive a prompt response. -Bugs related to the core language should be reported to `github.com/ansible/ansible `_ after -signing up for a free github account. Before reporting a bug, please use the bug/issue search -to see if the issue has already been reported. +Bugs related to the core language should be reported to `github.com/ansible/ansible `_ after +signing up for a free github account. Before reporting a bug, please use the bug/issue search +to see if the issue has already been reported. -MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. +MODULE related bugs however should go to `ansible-modules-core `_ or `ansible-modules-extras `_ based on the classification of the module. This is listed on the bottom of the docs page for any module. When filing a bug, please use the `issue template `_ to provide all relevant information, regardless of what repo you are filing a ticket against. @@ -132,9 +132,9 @@ Modules are some of the easiest places to get started. Contributing Code (Features or Bugfixes) ---------------------------------------- -The Ansible project keeps its source on github at `github.com/ansible/ansible `_ for -the core application, and two sub repos `github.com/ansible/ansible-modules-core `_ -and `ansible/ansible-modules-extras `_ for module related items. +The Ansible project keeps its source on github at `github.com/ansible/ansible `_ for +the core application, and two sub repos `github.com/ansible/ansible-modules-core `_ +and `ansible/ansible-modules-extras `_ for module related items. If you need to know if a module is in 'core' or 'extras', consult the web documentation page for that module. The project takes contributions through `github pull requests `_. diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index affd7f067e8dae..ce2195b48dc279 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -18,7 +18,7 @@ The directory "./library", alongside your top level playbooks, is also automatic added as a search directory. Should you develop an interesting Ansible module, consider sending a pull request to the -`modules-extras project `_. There's also a core +`modules-extras project `_. There's also a core repo for more established and widely used modules. "Extras" modules may be promoted to core periodically, but there's no fundamental difference in the end - both ship with ansible, all in one package, regardless of how you acquire ansible. diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index d7639848a6122e..808e3e4235696a 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -8,7 +8,7 @@ Ansible Galaxy The Website ``````````` -The website `Ansible Galaxy `_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects. +The website `Ansible Galaxy `_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects. You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. diff --git a/docsite/rst/guide_rax.rst b/docsite/rst/guide_rax.rst index 2a2f415e698589..5be2f5f3f7212d 100644 --- a/docsite/rst/guide_rax.rst +++ b/docsite/rst/guide_rax.rst @@ -6,7 +6,7 @@ Rackspace Cloud Guide Introduction ```````````` -.. note:: This section of the documentation is under construction. We are in the process of adding more examples about the Rackspace modules and how they work together. Once complete, there will also be examples for Rackspace Cloud in `ansible-examples `_. +.. note:: This section of the documentation is under construction. We are in the process of adding more examples about the Rackspace modules and how they work together. Once complete, there will also be examples for Rackspace Cloud in `ansible-examples `_. Ansible contains a number of core modules for interacting with Rackspace Cloud. diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index 5dd9ad5d1d0f85..645248fde503c6 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -26,7 +26,7 @@ Installing on the Control Machine On a Linux control machine:: - pip install http://github.com/diyan/pywinrm/archive/master.zip#egg=pywinrm + pip install https://github.com/diyan/pywinrm/archive/master.zip#egg=pywinrm If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host):: diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index 8f672791adde25..20981503df4c91 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -9,7 +9,7 @@ This in particular is very applicable when setting up continuous deployment infr Additional features allow for tuning the orders in which things complete, and assigning a batch window size for how many machines to process at once during a rolling update. -This section covers all of these features. For examples of these items in use, `please see the ansible-examples repository `_. There are quite a few examples of zero-downtime update procedures for different kinds of applications. +This section covers all of these features. For examples of these items in use, `please see the ansible-examples repository `_. There are quite a few examples of zero-downtime update procedures for different kinds of applications. You should also consult the :doc:`modules` section, various modules like 'ec2_elb', 'nagios', and 'bigip_pool', and 'netscaler' dovetail neatly with the concepts mentioned here. @@ -189,7 +189,7 @@ use the default remote connection type:: :doc:`playbooks` An introduction to playbooks - `Ansible Examples on GitHub `_ + `Ansible Examples on GitHub `_ Many examples of full-stack deployments `User Mailing List `_ Have a question? Stop by the google group! diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index ac770dab39b4c0..a7d459c80084ab 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -178,7 +178,7 @@ Here are some examples:: # The following lookups were added in 1.9 - debug: msg="{{item}}" with_url: - - 'http://github.com/gremlin.keys' + - 'https://github.com/gremlin.keys' # outputs the cartesian product of the supplied lists - debug: msg="{{item}}" From e4097ed279484adf224d3a6fed9cae568d742c83 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 14:24:00 -0400 Subject: [PATCH 1192/3617] simplified ansible errors, moved md5 hash import with notes to be more prominent --- lib/ansible/parsing/vault/__init__.py | 51 ++++++++++++++------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 7a2bd378c11400..2aab6fdfe4e8b2 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -29,15 +29,17 @@ import tempfile from io import BytesIO from subprocess import call -from ansible import errors +from ansible.errors import AnsibleError from hashlib import sha256 -# Note: Only used for loading obsolete VaultAES files. All files are written -# using the newer VaultAES256 which does not require md5 -from hashlib import md5 from binascii import hexlify from binascii import unhexlify from six import binary_type, PY3, text_type +# Note: Only used for loading obsolete VaultAES files. All files are written +# using the newer VaultAES256 which does not require md5 +from hashlib import md5 + + try: from six import byte2int except ImportError: @@ -88,7 +90,7 @@ def byte2int(bs): def check_prereqs(): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH: - raise errors.AnsibleError(CRYPTO_UPGRADE) + raise AnsibleError(CRYPTO_UPGRADE) class VaultLib(object): @@ -108,17 +110,17 @@ def encrypt(self, data): data = to_unicode(data) if self.is_encrypted(data): - raise errors.AnsibleError("data is already encrypted") + raise AnsibleError("data is already encrypted") if not self.cipher_name: self.cipher_name = "AES256" - # raise errors.AnsibleError("the cipher must be set before encrypting data") + # raise AnsibleError("the cipher must be set before encrypting data") if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST: cipher = globals()['Vault' + self.cipher_name] this_cipher = cipher() else: - raise errors.AnsibleError("{0} cipher could not be found".format(self.cipher_name)) + raise AnsibleError("{0} cipher could not be found".format(self.cipher_name)) """ # combine sha + data @@ -137,10 +139,10 @@ def decrypt(self, data): data = to_bytes(data) if self.password is None: - raise errors.AnsibleError("A vault password must be specified to decrypt data") + raise AnsibleError("A vault password must be specified to decrypt data") if not self.is_encrypted(data): - raise errors.AnsibleError("data is not encrypted") + raise AnsibleError("data is not encrypted") # clean out header data = self._split_header(data) @@ -151,12 +153,12 @@ def decrypt(self, data): cipher = globals()['Vault' + ciphername] this_cipher = cipher() else: - raise errors.AnsibleError("{0} cipher could not be found".format(ciphername)) + raise AnsibleError("{0} cipher could not be found".format(ciphername)) # try to unencrypt data data = this_cipher.decrypt(data, self.password) if data is None: - raise errors.AnsibleError("Decryption failed") + raise AnsibleError("Decryption failed") return data @@ -166,7 +168,7 @@ def _add_header(self, data): #tmpdata = hexlify(data) tmpdata = [to_bytes(data[i:i+80]) for i in range(0, len(data), 80)] if not self.cipher_name: - raise errors.AnsibleError("the cipher must be set before adding a header") + raise AnsibleError("the cipher must be set before adding a header") dirty_data = to_bytes(HEADER + ";" + self.version + ";" + self.cipher_name + "\n") for l in tmpdata: @@ -246,7 +248,7 @@ def create_file(self): check_prereqs() if os.path.isfile(self.filename): - raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename) + raise AnsibleError("%s exists, please use 'edit' instead" % self.filename) # Let the user specify contents and save file self._edit_file_helper(cipher=self.cipher_name) @@ -256,18 +258,18 @@ def decrypt_file(self): check_prereqs() if not os.path.isfile(self.filename): - raise errors.AnsibleError("%s does not exist" % self.filename) + raise AnsibleError("%s does not exist" % self.filename) tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) if this_vault.is_encrypted(tmpdata): dec_data = this_vault.decrypt(tmpdata) if dec_data is None: - raise errors.AnsibleError("Decryption failed") + raise AnsibleError("Decryption failed") else: self.write_data(dec_data, self.filename) else: - raise errors.AnsibleError("%s is not encrypted" % self.filename) + raise AnsibleError("%s is not encrypted" % self.filename) def edit_file(self): @@ -305,7 +307,7 @@ def encrypt_file(self): check_prereqs() if not os.path.isfile(self.filename): - raise errors.AnsibleError("%s does not exist" % self.filename) + raise AnsibleError("%s does not exist" % self.filename) tmpdata = self.read_data(self.filename) this_vault = VaultLib(self.password) @@ -314,7 +316,7 @@ def encrypt_file(self): enc_data = this_vault.encrypt(tmpdata) self.write_data(enc_data, self.filename) else: - raise errors.AnsibleError("%s is already encrypted" % self.filename) + raise AnsibleError("%s is already encrypted" % self.filename) def rekey_file(self, new_password): @@ -375,11 +377,11 @@ def __init__(self, password, filename): self.filename = filename if not os.path.isfile(self.filename): - raise errors.AnsibleError("%s does not exist" % self.filename) + raise AnsibleError("%s does not exist" % self.filename) try: self.filehandle = open(filename, "rb") except Exception as e: - raise errors.AnsibleError("Could not open %s: %s" % (self.filename, str(e))) + raise AnsibleError("Could not open %s: %s" % (self.filename, str(e))) _, self.tmpfile = tempfile.mkstemp() @@ -403,7 +405,7 @@ def get_decrypted(self): this_vault = VaultLib(self.password) dec_data = this_vault.decrypt(tmpdata) if dec_data is None: - raise errors.AnsibleError("Decryption failed") + raise AnsibleError("Decryption failed") else: self.tempfile.write(dec_data) return self.tmpfile @@ -423,7 +425,7 @@ class VaultAES(object): def __init__(self): if not HAS_AES: - raise errors.AnsibleError(CRYPTO_UPGRADE) + raise AnsibleError(CRYPTO_UPGRADE) def aes_derive_key_and_iv(self, password, salt, key_length, iv_length): @@ -527,7 +529,7 @@ def decrypt(self, data, password, key_length=32): test_sha = sha256(to_bytes(this_data)).hexdigest() if this_sha != test_sha: - raise errors.AnsibleError("Decryption failed") + raise AnsibleError("Decryption failed") return this_data @@ -652,3 +654,4 @@ def is_equal(self, a, b): else: result |= ord(x) ^ ord(y) return result == 0 + From fe91f7b506b5615c80c32623f4144f182ac83308 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 14:24:45 -0400 Subject: [PATCH 1193/3617] moved read_vault_file to CLI from utils and renamed to clearer read_vault_password_file --- lib/ansible/cli/__init__.py | 31 ++++++++++++++++++++ lib/ansible/cli/adhoc.py | 3 +- lib/ansible/cli/playbook.py | 3 +- lib/ansible/cli/pull.py | 1 - lib/ansible/cli/vault.py | 3 +- lib/ansible/utils/vault.py | 56 ------------------------------------- 6 files changed, 34 insertions(+), 63 deletions(-) delete mode 100644 lib/ansible/utils/vault.py diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 7ff8755ef8ad66..00de29dd589005 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -34,6 +34,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.utils.unicode import to_bytes from ansible.utils.display import Display +from ansible.utils.path import is_executable class SortedOptParser(optparse.OptionParser): '''Optparser which sorts the options by opt before outputting --help''' @@ -462,3 +463,33 @@ def tty_ify(self, text): t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' return t + + @staticmethod + def read_vault_password_file(vault_password_file): + """ + Read a vault password from a file or if executable, execute the script and + retrieve password from STDOUT + """ + + this_path = os.path.realpath(os.path.expanduser(vault_password_file)) + if not os.path.exists(this_path): + raise AnsibleError("The vault password file %s was not found" % this_path) + + if is_executable(this_path): + try: + # STDERR not captured to make it easier for users to prompt for input in their scripts + p = subprocess.Popen(this_path, stdout=subprocess.PIPE) + except OSError as e: + raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) + stdout, stderr = p.communicate() + vault_pass = stdout.strip('\r\n') + else: + try: + f = open(this_path, "rb") + vault_pass=f.read().strip() + f.close() + except (OSError, IOError) as e: + raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) + + return vault_pass + diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index cb3af394f7fafd..ce5bb0d720e960 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -24,7 +24,6 @@ from ansible.parsing.splitter import parse_kv from ansible.playbook.play import Play from ansible.cli import CLI -from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager ######################################################## @@ -95,7 +94,7 @@ def run(self): if self.options.vault_password_file: # read vault_pass from a file - vault_pass = read_vault_file(self.options.vault_password_file) + vault_pass = CLI.read_vault_password_file(self.options.vault_password_file) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0] diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index 630ba391ffff74..9e97f53c53f4ad 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -34,7 +34,6 @@ from ansible.utils.display import Display from ansible.utils.unicode import to_unicode from ansible.utils.vars import combine_vars -from ansible.utils.vault import read_vault_file from ansible.vars import VariableManager #--------------------------------------------------------------------------------------------------- @@ -98,7 +97,7 @@ def run(self): if self.options.vault_password_file: # read vault_pass from a file - vault_pass = read_vault_file(self.options.vault_password_file) + vault_pass = CLI.read_vault_password_file(self.options.vault_password_file) elif self.options.ask_vault_pass: vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0] diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index d66ceddc06e1d3..a4bb1218228a2a 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -28,7 +28,6 @@ from ansible.cli import CLI from ansible.plugins import module_loader from ansible.utils.display import Display -from ansible.utils.vault import read_vault_file from ansible.utils.cmd_functions import run_cmd ######################################################## diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index cac9dc7177e314..1fa29d1d0696a4 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -25,7 +25,6 @@ from ansible.parsing.vault import VaultEditor from ansible.cli import CLI from ansible.utils.display import Display -from ansible.utils.vault import read_vault_file class VaultCLI(CLI): """ Vault command line class """ @@ -74,7 +73,7 @@ def run(self): if self.options.vault_password_file: # read vault_pass from a file - self.vault_pass = read_vault_file(self.options.vault_password_file) + self.vault_pass = read_vault_password_file(self.options.vault_password_file) elif self.options.ask_vault_pass: self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False) diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py deleted file mode 100644 index 5c704afac59b2b..00000000000000 --- a/lib/ansible/utils/vault.py +++ /dev/null @@ -1,56 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os -import subprocess - -from ansible import constants as C -from ansible.errors import AnsibleError -from ansible.utils.path import is_executable - -def read_vault_file(vault_password_file): - """ - Read a vault password from a file or if executable, execute the script and - retrieve password from STDOUT - """ - - this_path = os.path.realpath(os.path.expanduser(vault_password_file)) - if not os.path.exists(this_path): - raise AnsibleError("The vault password file %s was not found" % this_path) - - if is_executable(this_path): - try: - # STDERR not captured to make it easier for users to prompt for input in their scripts - p = subprocess.Popen(this_path, stdout=subprocess.PIPE) - except OSError as e: - raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e)) - stdout, stderr = p.communicate() - vault_pass = stdout.strip('\r\n') - else: - try: - f = open(this_path, "rb") - vault_pass=f.read().strip() - f.close() - except (OSError, IOError) as e: - raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e)) - - return vault_pass - From 064a34689a944f2fd8efb59a61232d85b78f89ec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 14:53:23 -0400 Subject: [PATCH 1194/3617] now actually continues play on ignore errors --- lib/ansible/plugins/strategies/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index bcc57c8a4124df..fe97c98b3796c0 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -170,7 +170,7 @@ def _process_pending_results(self, iterator): self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) - self._tqm.send_callback('v2_runner_on_failed', task_result) + self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) elif result[0] == 'host_unreachable': self._tqm._unreachable_hosts[host.name] = True self._tqm._stats.increment('dark', host.name) From d993e7000c9570e1ae3c34d4bed03f109ef987a9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 15:01:50 -0400 Subject: [PATCH 1195/3617] added cyan back to ignoring message --- lib/ansible/plugins/callback/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 2bbc697f53c211..cff5fa1ad75e72 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -49,7 +49,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') if result._task.ignore_errors: - self._display.display("...ignoring") + self._display.display("...ignoring", color='cyan') def v2_runner_on_ok(self, result): From 032690a8439012833ca4206acd3ce3fe4d725e6c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 15:05:32 -0400 Subject: [PATCH 1196/3617] fix read_vault_password_file ref --- lib/ansible/cli/vault.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 1fa29d1d0696a4..969ea2b6fa63ce 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -73,7 +73,7 @@ def run(self): if self.options.vault_password_file: # read vault_pass from a file - self.vault_pass = read_vault_password_file(self.options.vault_password_file) + self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file) elif self.options.ask_vault_pass: self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False) From 4203b699a8d051908d092a17c834da9bd6c061e7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 15:15:46 -0400 Subject: [PATCH 1197/3617] removed dict comprehension as 2.6 does not like --- lib/ansible/executor/process/result.py | 4 +++- lib/ansible/plugins/callback/__init__.py | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 71d6746be0fe34..2750261e04d8b9 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -108,7 +108,9 @@ def run(self): # if this task is registering a result, do it now if result._task.register: - res = {k: result._result[k] for k in set(result._result.keys()).difference(C.RESULT_SANITIZE)} + res = {} + for k in set(result._result.keys()).difference(C.RESULT_SANITIZE): + res[k] = result._result[k] self._send_result(('register_host_var', result._host, result._task.register, res)) # send callbacks, execute other options based on the result status diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index a5a13c1cfff534..d39af7e092a5c8 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -53,7 +53,10 @@ def _dump_results(self, result, sanitize=True, indent=4, sort_keys=True): return json.dumps(res, indent=indent, ensure_ascii=False, sort_keys=sort_keys) def _sanitize_result(self, result): - return {k: result[k] for k in set(result.keys()).difference(C.RESULT_SANITIZE)} + res = {} + for k in set(result.keys()).difference(C.RESULT_SANITIZE): + res[k] = result[k] + return res def set_connection_info(self, conn_info): pass From e0a5003b275c0dc3dab98cf9759fbc934710e4cd Mon Sep 17 00:00:00 2001 From: Jason Young Date: Sat, 11 Jul 2015 20:53:05 -0400 Subject: [PATCH 1198/3617] ability to specify any combination of EC2 instance states to return --- contrib/inventory/ec2.ini | 5 +++++ contrib/inventory/ec2.py | 26 ++++++++++++++++++++++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/contrib/inventory/ec2.ini b/contrib/inventory/ec2.ini index a1d9b1d805d17a..50430ce0ed4d81 100644 --- a/contrib/inventory/ec2.ini +++ b/contrib/inventory/ec2.ini @@ -58,6 +58,11 @@ route53 = False # 'all_instances' to True to return all instances regardless of state. all_instances = False +# By default, only EC2 instances in the 'running' state are returned. Specify +# EC2 instance states to return as a comma-separated list. This +# option is overriden when 'all_instances' is True. +# instance_states = pending, running, shutting-down, terminated, stopping, stopped + # By default, only RDS instances in the 'available' state are returned. Set # 'all_rds_instances' to True return all RDS instances regardless of state. all_rds_instances = False diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index f2d9b51c903624..00d647fb05b2f0 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -244,6 +244,28 @@ def read_settings(self): else: self.all_instances = False + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + ec2_valid_instance_states = [ + 'pending', + 'running', + 'shutting-down', + 'terminated', + 'stopping', + 'stopped' + ] + self.ec2_instance_states = [] + if self.all_instances: + self.ec2_instance_states = ec2_valid_instance_states + elif config.has_option('ec2', 'instance_states'): + for instance_state in config.get('ec2', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in ec2_valid_instance_states: + continue + self.ec2_instance_states.append(instance_state) + else: + self.ec2_instance_states = ['running'] + # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') @@ -531,8 +553,8 @@ def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' - # Only want running instances unless all_instances is True - if not self.all_instances and instance.state != 'running': + # Only return instances with desired instance states + if instance.state not in self.ec2_instance_states: return # Select the best destination address From c5c1dc2f11c16f0395dd2586a5384849b2653767 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 11 Jul 2015 21:49:35 -0400 Subject: [PATCH 1199/3617] Removing tags/when from role param hash calculation --- lib/ansible/playbook/role/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index ad9ad9c8bcb1e4..71dd00381168d5 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -101,8 +101,6 @@ def load(role_include, play, parent_role=None): # We use frozenset to make the dictionary hashable. params = role_include.get_role_params() - params['tags'] = role_include.tags - params['when'] = role_include.when hashed_params = hash_params(params) if role_include.role in play.ROLE_CACHE: for (entry, role_obj) in play.ROLE_CACHE[role_include.role].iteritems(): From ba929656707d640e2da2f3c496ace22799cd506e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 12 Jul 2015 16:10:34 -0400 Subject: [PATCH 1200/3617] fix for when invocation data is missing --- lib/ansible/plugins/callback/mail.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/mail.py b/lib/ansible/plugins/callback/mail.py index 4828062df93305..3357e014093fb7 100644 --- a/lib/ansible/plugins/callback/mail.py +++ b/lib/ansible/plugins/callback/mail.py @@ -69,7 +69,10 @@ def v2_runner_on_failed(self, res, ignore_errors=False): if ignore_errors: return sender = '"Ansible: %s" ' % host - attach = "%s: %s" % (res._result['invocation']['module_name'], json.dumps(res._result['invocation']['module_args'])) + attach = res._task.action + if 'invocation' in res._result: + attach = "%s: %s" % (res._result['invocation']['module_name'], json.dumps(res._result['invocation']['module_args'])) + subject = 'Failed: %s' % attach body = 'The following task failed for host ' + host + ':\n\n%s\n\n' % attach From f40b66d841585de204b205afb7df334800e51049 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 12 Jul 2015 16:39:27 -0400 Subject: [PATCH 1201/3617] Make sure the basedir is unicode Fixes #10773 --- lib/ansible/parsing/__init__.py | 3 ++- lib/ansible/playbook/role/definition.py | 2 +- lib/ansible/plugins/__init__.py | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py index 027691d18ea2dc..0605afdd746229 100644 --- a/lib/ansible/parsing/__init__.py +++ b/lib/ansible/parsing/__init__.py @@ -31,6 +31,7 @@ from ansible.parsing.yaml.loader import AnsibleLoader from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode from ansible.utils.path import unfrackpath +from ansible.utils.unicode import to_unicode class DataLoader(): @@ -175,7 +176,7 @@ def set_basedir(self, basedir): ''' sets the base directory, used to find files when a relative path is given ''' if basedir is not None: - self._basedir = basedir + self._basedir = to_unicode(basedir) def path_dwim(self, given): ''' diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py index d46bca6b2e9d13..1cd84ff77849cd 100644 --- a/lib/ansible/playbook/role/definition.py +++ b/lib/ansible/playbook/role/definition.py @@ -129,7 +129,7 @@ def _load_role_path(self, role_name): return (role_name, role_path) else: # we always start the search for roles in the base directory of the playbook - role_search_paths = [os.path.join(self._loader.get_basedir(), 'roles'), './roles', './'] + role_search_paths = [os.path.join(self._loader.get_basedir(), u'roles'), u'./roles', u'./'] # also search in the configured roles path if C.DEFAULT_ROLES_PATH: diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index bbbe0bd7950649..d40a4f5f810ac3 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -29,6 +29,7 @@ from ansible import constants as C from ansible.utils.display import Display +from ansible.utils.unicode import to_unicode from ansible import errors MODULE_CACHE = {} @@ -38,7 +39,7 @@ def push_basedir(basedir): # avoid pushing the same absolute dir more than once - basedir = os.path.realpath(basedir) + basedir = to_unicode(os.path.realpath(basedir)) if basedir not in _basedirs: _basedirs.insert(0, basedir) From 8efc42d9933ceff17f637fcb9bcbee5f070607db Mon Sep 17 00:00:00 2001 From: Alejandro Guirao Date: Mon, 13 Jul 2015 10:31:35 +0200 Subject: [PATCH 1202/3617] Add shelvefile lookup plugin --- lib/ansible/plugins/lookup/shelvefile.py | 81 ++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 lib/ansible/plugins/lookup/shelvefile.py diff --git a/lib/ansible/plugins/lookup/shelvefile.py b/lib/ansible/plugins/lookup/shelvefile.py new file mode 100644 index 00000000000000..5d27222c829bbd --- /dev/null +++ b/lib/ansible/plugins/lookup/shelvefile.py @@ -0,0 +1,81 @@ +# (c) 2015, Alejandro Guirao +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import shelve +import os +from ansible import utils, errors + +class LookupModule(object): + + def __init__(self, basedir=None, **kwargs): + self.basedir = basedir + + def read_shelve(self, shelve_filename, key): + """ + Read the value of "key" from a shelve file + """ + d = shelve.open(shelve_filename) + res = d.get(key, None) + d.close() + return res + + def run(self, terms, inject=None, **kwargs): + + terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) + ret = [] + + if not isinstance(terms, list): + terms = [ terms ] + + for term in terms: + playbook_path = None + relative_path = None + paramvals = {"file": None, "key": None} + params = term.split() + + try: + for param in params: + name, value = param.split('=') + assert(name in paramvals) + paramvals[name] = value + + except (ValueError, AssertionError), e: + # In case "file" or "key" are not present + raise errors.AnsibleError(e) + + file = paramvals['file'] + key = paramvals['key'] + basedir_path = utils.path_dwim(self.basedir, file) + + # Search also in the role/files directory and in the playbook directory + if '_original_file' in inject: + relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', file, self.basedir, check=False) + if 'playbook_dir' in inject: + playbook_path = os.path.join(inject['playbook_dir'], file) + + for path in (basedir_path, relative_path, playbook_path): + if path and os.path.exists(path): + res = self.read_shelve(path, key) + if res is None: + raise errors.AnsibleError("Key %s not found in shelve file %s" % (key, file)) + # Convert the value read to string + ret.append(str(res)) + break + else: + raise errors.AnsibleError("Could not locate shelve file in lookup: %s" % file) + + return ret From 962f681bde58bf9ebae75059b1de13b3604cee22 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 09:22:54 -0400 Subject: [PATCH 1203/3617] added readme to v1 --- v1/README.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 v1/README.md diff --git a/v1/README.md b/v1/README.md new file mode 100644 index 00000000000000..396e8434c4dcf2 --- /dev/null +++ b/v1/README.md @@ -0,0 +1,6 @@ +This is dead code, it is here for convinience for those testing current devel so as to acertain if a bug was introduced in the v2 rewrite or was preexisitng in the 1.x codebase. + +DO NOT: + * use this code as reference + * make PRs against this code + * expect this code to be shipped with the 2.0 version of ansible From 6e99023c84e20d1de97187597e40e610705adf77 Mon Sep 17 00:00:00 2001 From: Alejandro Guirao Date: Mon, 13 Jul 2015 15:37:27 +0200 Subject: [PATCH 1204/3617] Changed to support Ansible v2 --- lib/ansible/plugins/lookup/shelvefile.py | 32 +++++++++++++----------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/lib/ansible/plugins/lookup/shelvefile.py b/lib/ansible/plugins/lookup/shelvefile.py index 5d27222c829bbd..1e02cd30ec0a2c 100644 --- a/lib/ansible/plugins/lookup/shelvefile.py +++ b/lib/ansible/plugins/lookup/shelvefile.py @@ -14,12 +14,16 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type import shelve import os -from ansible import utils, errors -class LookupModule(object): +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase + +class LookupModule(LookupBase): def __init__(self, basedir=None, **kwargs): self.basedir = basedir @@ -33,17 +37,17 @@ def read_shelve(self, shelve_filename, key): d.close() return res - def run(self, terms, inject=None, **kwargs): - - terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) - ret = [] + def run(self, terms, variables=None, **kwargs): if not isinstance(terms, list): terms = [ terms ] + ret = [] + for term in terms: playbook_path = None relative_path = None + paramvals = {"file": None, "key": None} params = term.split() @@ -55,27 +59,27 @@ def run(self, terms, inject=None, **kwargs): except (ValueError, AssertionError), e: # In case "file" or "key" are not present - raise errors.AnsibleError(e) + raise AnsibleError(e) file = paramvals['file'] key = paramvals['key'] - basedir_path = utils.path_dwim(self.basedir, file) + basedir_path = self._loader.path_dwim(file) # Search also in the role/files directory and in the playbook directory - if '_original_file' in inject: - relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', file, self.basedir, check=False) - if 'playbook_dir' in inject: - playbook_path = os.path.join(inject['playbook_dir'], file) + if 'role_path' in variables: + relative_path = self._loader.path_dwim_relative(variables['role_path'], 'files', file) + if 'playbook_dir' in variables: + playbook_path = self._loader.path_dwim_relative(variables['playbook_dir'],'files', file) for path in (basedir_path, relative_path, playbook_path): if path and os.path.exists(path): res = self.read_shelve(path, key) if res is None: - raise errors.AnsibleError("Key %s not found in shelve file %s" % (key, file)) + raise AnsibleError("Key %s not found in shelve file %s" % (key, file)) # Convert the value read to string ret.append(str(res)) break else: - raise errors.AnsibleError("Could not locate shelve file in lookup: %s" % file) + raise AnsibleError("Could not locate shelve file in lookup: %s" % file) return ret From 587a6cb44c3b5412061ec1764e43539b2dd7b0c4 Mon Sep 17 00:00:00 2001 From: Alejandro Guirao Date: Mon, 13 Jul 2015 15:48:23 +0200 Subject: [PATCH 1205/3617] Remove v1 code --- lib/ansible/plugins/lookup/shelvefile.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ansible/plugins/lookup/shelvefile.py b/lib/ansible/plugins/lookup/shelvefile.py index 1e02cd30ec0a2c..89e393694b3198 100644 --- a/lib/ansible/plugins/lookup/shelvefile.py +++ b/lib/ansible/plugins/lookup/shelvefile.py @@ -25,8 +25,6 @@ class LookupModule(LookupBase): - def __init__(self, basedir=None, **kwargs): - self.basedir = basedir def read_shelve(self, shelve_filename, key): """ From d8abae71a477a9a49764840355063422c7188e3c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 10:34:44 -0400 Subject: [PATCH 1206/3617] now assemble skips during checkmode TODO: actually make it check with checkmode fixes http://github.com/ansible/ansible-modules-core/issues/661 --- lib/ansible/plugins/action/assemble.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py index c62f7f7dc9bac9..f4d8fe88614c21 100644 --- a/lib/ansible/plugins/action/assemble.py +++ b/lib/ansible/plugins/action/assemble.py @@ -77,6 +77,9 @@ def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=Non def run(self, tmp=None, task_vars=dict()): + if self._connection_info.check_mode: + return dict(skipped=True, msg=("skipped, this module does not support check_mode.")) + src = self._task.args.get('src', None) dest = self._task.args.get('dest', None) delimiter = self._task.args.get('delimiter', None) @@ -125,7 +128,7 @@ def run(self, tmp=None, task_vars=dict()): self._remote_chmod('a+r', xfered, tmp) # run the copy module - + new_module_args = self._task.args.copy() new_module_args.update( dict( From 91c9bb96e317bf5a67fdbc45745acbfaf3a27c2f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 13 Jul 2015 10:41:46 -0400 Subject: [PATCH 1207/3617] Moving jsonfile cache plugin over and fixing #10883 Fixes #10883 --- lib/ansible/plugins/cache/jsonfile.py | 159 ++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 lib/ansible/plugins/cache/jsonfile.py diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py new file mode 100644 index 00000000000000..9eb4faa84feda7 --- /dev/null +++ b/lib/ansible/plugins/cache/jsonfile.py @@ -0,0 +1,159 @@ +# (c) 2014, Brian Coca, Josh Drake, et al +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import time +import errno +import codecs + +try: + import simplejson as json +except ImportError: + import json + +from ansible import constants as C +from ansible.errors import * +from ansible.parsing.utils.jsonify import jsonify +from ansible.plugins.cache.base import BaseCacheModule + +class CacheModule(BaseCacheModule): + """ + A caching module backed by json files. + """ + def __init__(self, *args, **kwargs): + + self._timeout = float(C.CACHE_PLUGIN_TIMEOUT) + self._cache = {} + self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path + if not self._cache_dir: + raise AnsibleError("error, fact_caching_connection is not set, cannot use fact cache") + + if not os.path.exists(self._cache_dir): + try: + os.makedirs(self._cache_dir) + except (OSError,IOError), e: + # FIXME: this is in display now, but cache plugins don't have that + #utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e))) + return None + + def get(self, key): + + if key in self._cache: + return self._cache.get(key) + + if self.has_expired(key): + raise KeyError + + cachefile = "%s/%s" % (self._cache_dir, key) + print("getting %s" % cachefile) + try: + f = codecs.open(cachefile, 'r', encoding='utf-8') + except (OSError,IOError), e: + # FIXME: this is in display now, but cache plugins don't have that + #utils.warning("error while trying to read %s : %s" % (cachefile, str(e))) + pass + else: + try: + value = json.load(f) + self._cache[key] = value + return value + except ValueError: + # FIXME: this is in display now, but cache plugins don't have that + #utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) + return dict() + finally: + f.close() + + def set(self, key, value): + + self._cache[key] = value + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + f = codecs.open(cachefile, 'w', encoding='utf-8') + except (OSError,IOError), e: + # FIXME: this is in display now, but cache plugins don't have that + #utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) + pass + else: + f.write(jsonify(value)) + finally: + f.close() + + def has_expired(self, key): + + cachefile = "%s/%s" % (self._cache_dir, key) + try: + st = os.stat(cachefile) + except (OSError,IOError), e: + if e.errno == errno.ENOENT: + return False + else: + # FIXME: this is in display now, but cache plugins don't have that + #utils.warning("error while trying to stat %s : %s" % (cachefile, str(e))) + pass + + if time.time() - st.st_mtime <= self._timeout: + return False + + if key in self._cache: + del self._cache[key] + return True + + def keys(self): + keys = [] + for k in os.listdir(self._cache_dir): + if not (k.startswith('.') or self.has_expired(k)): + keys.append(k) + return keys + + def contains(self, key): + cachefile = "%s/%s" % (self._cache_dir, key) + + if key in self._cache: + return True + + if self.has_expired(key): + return False + try: + st = os.stat(cachefile) + return True + except (OSError,IOError), e: + if e.errno == errno.ENOENT: + return False + else: + # FIXME: this is in display now, but cache plugins don't have that + #utils.warning("error while trying to stat %s : %s" % (cachefile, str(e))) + pass + + def delete(self, key): + del self._cache[key] + try: + os.remove("%s/%s" % (self._cache_dir, key)) + except (OSError,IOError), e: + pass #TODO: only pass on non existing? + + def flush(self): + self._cache = {} + for key in self.keys(): + self.delete(key) + + def copy(self): + ret = dict() + for key in self.keys(): + ret[key] = self.get(key) + return ret From 932d1e57f7ec4f33b564a642e21c4e0eb903151f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 13 Jul 2015 10:56:09 -0400 Subject: [PATCH 1208/3617] Removing stray debugging print --- lib/ansible/plugins/cache/jsonfile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py index 9eb4faa84feda7..356d899325eaf7 100644 --- a/lib/ansible/plugins/cache/jsonfile.py +++ b/lib/ansible/plugins/cache/jsonfile.py @@ -59,7 +59,6 @@ def get(self, key): raise KeyError cachefile = "%s/%s" % (self._cache_dir, key) - print("getting %s" % cachefile) try: f = codecs.open(cachefile, 'r', encoding='utf-8') except (OSError,IOError), e: From d977da5b41f34933ca11c69d3af766f8ec283b55 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 13 Jul 2015 11:06:03 -0400 Subject: [PATCH 1209/3617] Fixing up fact_cache use in VariableManager --- lib/ansible/plugins/cache/jsonfile.py | 2 +- lib/ansible/vars/__init__.py | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/cache/jsonfile.py b/lib/ansible/plugins/cache/jsonfile.py index 356d899325eaf7..08c57018cbb8e2 100644 --- a/lib/ansible/plugins/cache/jsonfile.py +++ b/lib/ansible/plugins/cache/jsonfile.py @@ -73,7 +73,7 @@ def get(self, key): except ValueError: # FIXME: this is in display now, but cache plugins don't have that #utils.warning("error while trying to write to %s : %s" % (cachefile, str(e))) - return dict() + raise KeyError finally: f.close() diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 591066e0785bf3..0f1561b5a219c1 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -181,7 +181,10 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): all_vars = self._combine_vars(all_vars, host.get_vars()) # next comes the facts cache and the vars cache, respectively - all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.get_name(), dict())) + try: + all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.name, dict())) + except KeyError: + pass if play: all_vars = self._combine_vars(all_vars, play.get_vars()) @@ -345,11 +348,13 @@ def set_host_facts(self, host, facts): assert isinstance(facts, dict) - host_name = host.get_name() - if host_name not in self._fact_cache: - self._fact_cache[host_name] = facts + if host.name not in self._fact_cache: + self._fact_cache[host.name] = facts else: - self._fact_cache[host_name].update(facts) + try: + self._fact_cache[host.name].update(facts) + except KeyError: + self._fact_cache[host.name] = facts def set_host_variable(self, host, varname, value): ''' From b6b74746d9b0954fb42f1efa274add700126c0b2 Mon Sep 17 00:00:00 2001 From: objectified Date: Mon, 13 Jul 2015 17:17:05 +0200 Subject: [PATCH 1210/3617] fixed Github links to plugin sources --- docsite/rst/developing_plugins.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docsite/rst/developing_plugins.rst b/docsite/rst/developing_plugins.rst index c2349ed676f614..4f459a6ef05fdd 100644 --- a/docsite/rst/developing_plugins.rst +++ b/docsite/rst/developing_plugins.rst @@ -21,7 +21,7 @@ Carrier Pigeon?) it's as simple as copying the format of one of the existing mod directory. The value of 'smart' for a connection allows selection of paramiko or openssh based on system capabilities, and chooses 'ssh' if OpenSSH supports ControlPersist, in Ansible 1.2.1 an later. Previous versions did not support 'smart'. -More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/runner/connection_plugins `_ and figure things out pretty easily. +More documentation on writing connection plugins is pending, though you can jump into `lib/ansible/plugins/connections `_ and figure +More documentation on writing lookup plugins is pending, though you can jump into `lib/ansible/plugins/lookup `_ and figure things out pretty easily. .. _developing_vars_plugins: @@ -54,7 +54,7 @@ Filter Plugins If you want more Jinja2 filters available in a Jinja2 template (filters like to_yaml and to_json are provided by default), they can be extended by writing a filter plugin. Most of the time, when someone comes up with an idea for a new filter they would like to make available in a playbook, we'll just include them in 'core.py' instead. -Jump into `lib/ansible/runner/filter_plugins/ `_ for details. +Jump into `lib/ansible/plugins/filter `_ for details. .. _developing_callbacks: @@ -68,17 +68,17 @@ Callbacks are one of the more interesting plugin types. Adding additional callb Examples ++++++++ -Example callbacks are shown in `plugins/callbacks `_. +Example callbacks are shown in `lib/ansible/plugins/callback `_. The `log_plays -`_ +`_ callback is an example of how to intercept playbook events to a log file, and the `mail -`_ +`_ callback sends email when playbooks complete. The `osx_say -`_ +`_ callback provided is particularly entertaining -- it will respond with computer synthesized speech on OS X in relation to playbook events, and is guaranteed to entertain and/or annoy coworkers. From c18fdd0c18d26cc0c5c3033509da28c30443c0ed Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 13 Jul 2015 15:18:05 -0400 Subject: [PATCH 1211/3617] Re-implement "conditional imports" for vars_files --- lib/ansible/vars/__init__.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 0f1561b5a219c1..13c9cc8f08baac 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -189,13 +189,26 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): if play: all_vars = self._combine_vars(all_vars, play.get_vars()) templar = Templar(loader=loader, variables=all_vars) - for vars_file in play.get_vars_files(): + + for vars_file_item in play.get_vars_files(): try: - vars_file = templar.template(vars_file) - data = loader.load_from_file(vars_file) - if data is None: - data = dict() - all_vars = self._combine_vars(all_vars, data) + # we assume each item in the list is itself a list, as we + # support "conditional includes" for vars_files, which mimics + # the with_first_found mechanism. + vars_file_list = templar.template(vars_file_item) + if not isinstance(vars_file_list, list): + vars_file_list = [ vars_file_list ] + + # now we iterate through the (potential) files, and break out + # as soon as we read one from the list. If none are found, we + # raise an error, which is silently ignored at this point. + for vars_file in vars_file_list: + data = loader.load_from_file(vars_file) + if data is not None: + all_vars = self._combine_vars(all_vars, data) + break + else: + raise AnsibleError("vars file %s was not found" % vars_file_item) except: # FIXME: get_vars should probably be taking a flag to determine # whether or not vars files errors should be fatal at this From 3a768b3b9fd3c82c783b11139c1251cecef1ba24 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 15:32:14 -0400 Subject: [PATCH 1212/3617] removed unused methods, these now live in base class --- lib/ansible/plugins/callback/minimal.py | 57 +------------------------ 1 file changed, 1 insertion(+), 56 deletions(-) diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index 86e5694a15fefe..90a200089dd756 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -33,9 +33,6 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'minimal' - def v2_on_any(self, *args, **kwargs): - pass - def v2_runner_on_failed(self, result, ignore_errors=False): if 'exception' in result._result: if self._display.verbosity < 3: @@ -50,7 +47,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): # finally, remove the exception from the result so it's not shown every time del result._result['exception'] - self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red') + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') def v2_runner_on_ok(self, result): self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result)), color='green') @@ -60,55 +57,3 @@ def v2_runner_on_skipped(self, result): def v2_runner_on_unreachable(self, result): self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') - - def v2_runner_on_no_hosts(self, task): - pass - - def v2_runner_on_async_poll(self, host, res, jid, clock): - pass - - def v2_runner_on_async_ok(self, host, res, jid): - pass - - def v2_runner_on_async_failed(self, host, res, jid): - pass - - def v2_playbook_on_start(self): - pass - - def v2_playbook_on_notify(self, host, handler): - pass - - def v2_playbook_on_no_hosts_matched(self): - pass - - def v2_playbook_on_no_hosts_remaining(self): - pass - - def v2_playbook_on_task_start(self, task, is_conditional): - pass - - def v2_playbook_on_cleanup_task_start(self, task): - pass - - def v2_playbook_on_handler_task_start(self, task): - pass - - def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): - pass - - def v2_playbook_on_setup(self): - pass - - def v2_playbook_on_import_for_host(self, result, imported_file): - pass - - def v2_playbook_on_not_import_for_host(self, result, missing_file): - pass - - def v2_playbook_on_play_start(self, play): - pass - - def v2_playbook_on_stats(self, stats): - pass - From 8ad52c2e4f71eb2f40826af9bda111f37aa2e980 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 15:42:47 -0400 Subject: [PATCH 1213/3617] readded oneline output feature to adhoc fixes #11573 --- lib/ansible/cli/adhoc.py | 7 ++- lib/ansible/plugins/callback/minimal.py | 2 +- lib/ansible/plugins/callback/oneline.py | 57 +++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 2 deletions(-) create mode 100644 lib/ansible/plugins/callback/oneline.py diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index ce5bb0d720e960..4ea3bab78c481a 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -128,6 +128,11 @@ def run(self): play_ds = self._play_ds(pattern) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) + if self.options.one_line: + cb = 'oneline' + else: + cb = 'minimal' + # now create a task queue manager to execute the play self._tqm = None try: @@ -138,7 +143,7 @@ def run(self): display=self.display, options=self.options, passwords=passwords, - stdout_callback='minimal', + stdout_callback=cb, ) result = self._tqm.run(play) finally: diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index 90a200089dd756..dd61ee023a1317 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -53,7 +53,7 @@ def v2_runner_on_ok(self, result): self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result)), color='green') def v2_runner_on_skipped(self, result): - pass + self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') def v2_runner_on_unreachable(self, result): self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py new file mode 100644 index 00000000000000..1fbc5bb0322e09 --- /dev/null +++ b/lib/ansible/plugins/callback/oneline.py @@ -0,0 +1,57 @@ +# (c) 2012-2014, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + + ''' + This is the default callback interface, which simply prints messages + to stdout when new callback events are received. + ''' + + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'oneline' + + def v2_runner_on_failed(self, result, ignore_errors=False): + if 'exception' in result._result: + if self._display.verbosity < 3: + # extract just the actual error message from the exception text + error = result._result['exception'].strip().split('\n')[-1] + msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error + else: + msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','') + + self._display.display(msg, color='red') + + # finally, remove the exception from the result so it's not shown every time + del result._result['exception'] + + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red') + + def v2_runner_on_ok(self, result): + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green') + + + def v2_runner_on_unreachable(self, result): + self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') From 373830b5df9924985d35e40ff0332024182b8ae4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 13 Jul 2015 15:45:20 -0400 Subject: [PATCH 1214/3617] Fix removal of .git from modules directories Also changed the setup.py maintainers email to our default support one. Fixes #11051 --- MANIFEST.in | 2 ++ setup.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 8af0aa9bc171b3..b9bf5f42764f44 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -15,5 +15,7 @@ include VERSION include MANIFEST.in include contrib/README.md include contrib/inventory * +exclude lib/ansible/modules/core/.git* +exclude lib/ansible/modules/extras/.git* prune lib/ansible/modules/core/.git prune lib/ansible/modules/extras/.git diff --git a/setup.py b/setup.py index 1f73836cbd3c54..38f00ba9e3b3e0 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ version=__version__, description='Radically simple IT automation', author=__author__, - author_email='michael@ansible.com', + author_email='support@ansible.com', url='http://ansible.com/', license='GPLv3', install_requires=['paramiko', 'jinja2', "PyYAML", 'setuptools', 'pycrypto >= 2.6', 'six'], From c4b6d91275ac9564f2e64f768b1c893f82bcf3f7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 15:53:55 -0400 Subject: [PATCH 1215/3617] added skipped to oneline --- lib/ansible/plugins/callback/oneline.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py index 1fbc5bb0322e09..d7e76151b4c6b3 100644 --- a/lib/ansible/plugins/callback/oneline.py +++ b/lib/ansible/plugins/callback/oneline.py @@ -55,3 +55,6 @@ def v2_runner_on_ok(self, result): def v2_runner_on_unreachable(self, result): self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') + + def v2_runner_on_skipped(self, result): + self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') From 24b830bbc8f228015841bc20ba423af6f04129a0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 16:23:14 -0400 Subject: [PATCH 1216/3617] fixed executable for raw module --- lib/ansible/plugins/action/__init__.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 83f0f4765ca652..02f30d4b5975ae 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -412,26 +412,22 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var debug("done with _execute_module (%s, %s)" % (module_name, module_args)) return data - def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None): + def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None, executable=None): ''' This is the function which executes the low level shell command, which may be commands to create/remove directories for temporary files, or to run the module code or python directly when pipelining. ''' + if executable is not None: + cmd = executable + ' -c ' + cmd + debug("in _low_level_execute_command() (%s)" % (cmd,)) if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) debug("no command, exiting _low_level_execute_command()") return dict(stdout='', stderr='') - #FIXME: disabled as this should happen in the connection plugin, verify before removing - #prompt = None - #success_key = None - # - #if sudoable: - # cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd) - debug("executing the command %s through the connection" % cmd) rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, in_data=in_data, sudoable=sudoable) debug("command execution done") From 9c8f0da32754cc4377f3fb58b496241a38bf8344 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Tue, 14 Jul 2015 00:14:13 +0200 Subject: [PATCH 1217/3617] Do not combine group_vars with an empty file This addresses a specific case with multiple vars files in a group_vars/${groupname}/ directory where one of those files is empty, which returns None instead of an empty dict. --- lib/ansible/vars/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 13c9cc8f08baac..96313ef4f4326b 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -308,7 +308,8 @@ def _load_inventory_file(self, path, loader): paths = [os.path.join(path, name) for name in names if not name.startswith('.')] for p in paths: _found, results = self._load_inventory_file(path=p, loader=loader) - data = self._combine_vars(data, results) + if results is not None: + data = self._combine_vars(data, results) else: file_name, ext = os.path.splitext(path) From d5fb11d89c4094ef0eab0c19a431575a0af4d068 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Tue, 14 Jul 2015 00:20:04 +0200 Subject: [PATCH 1218/3617] Use YAML_FILENAME_EXTENSIONS for vars files. The v2 codebase didn't use this previously introduced constant yet. C.YAML_FILENAME_EXTENSIONS --- lib/ansible/vars/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 13c9cc8f08baac..3f9fb8fc5cbfe7 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -314,11 +314,11 @@ def _load_inventory_file(self, path, loader): file_name, ext = os.path.splitext(path) data = None if not ext: - for ext in ('', '.yml', '.yaml'): + for ext in C.YAML_FILENAME_EXTENSIONS: new_path = path + ext if loader.path_exists(new_path): - data = loader.load_from_file(new_path) - break + data = loader.load_from_file(new_path) + break else: if loader.path_exists(path): data = loader.load_from_file(path) From a09f44210e5c0e0658a553f375b74c7cb9922f6d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 19:22:31 -0400 Subject: [PATCH 1219/3617] now callback errors are not silent but warnings --- lib/ansible/executor/task_queue_manager.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 41e28c3baef781..bb9d19d12f2957 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -300,5 +300,8 @@ def send_callback(self, method_name, *args, **kwargs): ] for method in methods: if method is not None: - method(*args, **kwargs) + try: + method(*args, **kwargs) + except Exception as e: + self._display.warning('Error when using %s: %s' % (method, str(e))) From 73eca8239b172596f3eacea5a44aade426e475c9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 19:30:38 -0400 Subject: [PATCH 1220/3617] added sts_assume_role --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f4f3fdaa0f054e..a14c4589609a5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ New Modules: * amazon: iam * amazon: iam_policy * amazon: route53_zone + * amazon: sts_assume_role * bundler * circonus_annotation * consul From 3102469b94272954d02f99b64fe7d321679d3bf3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 20:40:40 -0400 Subject: [PATCH 1221/3617] fixing become success string --- lib/ansible/plugins/action/raw.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py index a0da97798acf60..2a0d368511c97c 100644 --- a/lib/ansible/plugins/action/raw.py +++ b/lib/ansible/plugins/action/raw.py @@ -34,7 +34,7 @@ def run(self, tmp=None, task_vars=dict()): # for some modules (script, raw), the sudo success key # may leak into the stdout due to the way the sudo/su # command is constructed, so we filter that out here - if result.get('stdout','').strip().startswith('SUDO-SUCCESS-'): - result['stdout'] = re.sub(r'^((\r)?\n)?SUDO-SUCCESS.*(\r)?\n', '', result['stdout']) + if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'): + result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout']) return result From 2b723c6130f7d7887ba13cf5623bd49c39150bbf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 13 Jul 2015 20:42:09 -0400 Subject: [PATCH 1222/3617] added missing re import --- lib/ansible/plugins/action/raw.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py index 2a0d368511c97c..d59be1c890e47a 100644 --- a/lib/ansible/plugins/action/raw.py +++ b/lib/ansible/plugins/action/raw.py @@ -19,6 +19,8 @@ from ansible.plugins.action import ActionBase +import re + class ActionModule(ActionBase): TRANSFERS_FILES = False From 9a586c35127769ef52f65bde78ce4c6cd97fcb55 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 13 Jul 2015 16:20:19 -0400 Subject: [PATCH 1223/3617] Properly catch AnsibleError and not all errors --- lib/ansible/vars/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index a08e9c55bd2b82..599499ca2ada82 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -209,7 +209,7 @@ def get_vars(self, loader, play=None, host=None, task=None, use_cache=True): break else: raise AnsibleError("vars file %s was not found" % vars_file_item) - except: + except AnsibleError, e: # FIXME: get_vars should probably be taking a flag to determine # whether or not vars files errors should be fatal at this # stage, or just base it on whether a host was specified? From 610223fbf4047f9288155406dad3729cb0dcc7de Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Wed, 13 May 2015 23:54:52 -0400 Subject: [PATCH 1224/3617] explain source of EC2 inventory error https://github.com/ansible/ansible/issues/10840 before RDS: `ERROR: Inventory script (ec2.py) had an execution error: Forbidden` EC2: `ERROR: Inventory script (ec2.py) had an execution error: Error connecting to AWS backend. You are not authorized to perform this operation.` after RDS: `ERROR: Inventory script (ec2.py) had an execution error: ERROR: "Forbidden", while: getting RDS instances` EC2: `ERROR: Inventory script (ec2.py) had an execution error: ERROR: "Error connecting to AWS backend. You are not authorized to perform this operation.", while: getting EC2 instances` --- contrib/inventory/ec2.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index f2d9b51c903624..e17e41cc689077 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -406,7 +406,9 @@ def get_instances_by_region(self, region): else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) - self.fail_with_error(error) + self.fail_with_error( + 'ERROR: "{error}", while: {err_operation}'.format( + error=error, err_operation='getting EC2 instances')) def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular @@ -425,7 +427,9 @@ def get_rds_instances_by_region(self, region): error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message - self.fail_with_error(error) + self.fail_with_error( + 'ERROR: "{error}", while: {err_operation}'.format( + error=error, err_operation='getting RDS instances')) def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with From 17b94cf139ca1882e8c827010a3c4aa4fa624ba6 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Mon, 13 Jul 2015 23:46:33 -0400 Subject: [PATCH 1225/3617] generalize error context reporting, add elasticache explanations --- contrib/inventory/ec2.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index e17e41cc689077..f0b01ef194c48d 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -406,9 +406,7 @@ def get_instances_by_region(self, region): else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) - self.fail_with_error( - 'ERROR: "{error}", while: {err_operation}'.format( - error=error, err_operation='getting EC2 instances')) + self.fail_with_error(error, 'getting EC2 instances') def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular @@ -427,9 +425,7 @@ def get_rds_instances_by_region(self, region): error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message - self.fail_with_error( - 'ERROR: "{error}", while: {err_operation}'.format( - error=error, err_operation='getting RDS instances')) + self.fail_with_error(error, 'getting RDS instances') def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with @@ -452,7 +448,7 @@ def get_elasticache_clusters_by_region(self, region): error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache is down:\n%s" % e.message - self.fail_with_error(error) + self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to CacheClusters or @@ -462,7 +458,7 @@ def get_elasticache_clusters_by_region(self, region): except KeyError as e: error = "ElastiCache query to AWS failed (unexpected format)." - self.fail_with_error(error) + self.fail_with_error(error, 'getting ElastiCache clusters') for cluster in clusters: self.add_elasticache_cluster(cluster, region) @@ -486,7 +482,7 @@ def get_elasticache_replication_groups_by_region(self, region): error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message - self.fail_with_error(error) + self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups @@ -496,7 +492,7 @@ def get_elasticache_replication_groups_by_region(self, region): except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." - self.fail_with_error(error) + self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region) @@ -518,8 +514,11 @@ def get_auth_error_message(self): return '\n'.join(errors) - def fail_with_error(self, err_msg): + def fail_with_error(self, err_msg, err_operation_context=None): '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation_context: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation_context) sys.stderr.write(err_msg) sys.exit(1) From 7092021d81c41626f51b765ea8fdc42e376ad905 Mon Sep 17 00:00:00 2001 From: Alex Lo Date: Mon, 13 Jul 2015 23:51:23 -0400 Subject: [PATCH 1226/3617] simplify variable names --- contrib/inventory/ec2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py index f0b01ef194c48d..be25a5b6943499 100755 --- a/contrib/inventory/ec2.py +++ b/contrib/inventory/ec2.py @@ -514,11 +514,11 @@ def get_auth_error_message(self): return '\n'.join(errors) - def fail_with_error(self, err_msg, err_operation_context=None): + def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' - if err_operation_context: + if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( - err_msg=err_msg, err_operation=err_operation_context) + err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) From 6971e92f39f1579a7ae99f115d11600238755182 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Jul 2015 00:23:17 -0400 Subject: [PATCH 1227/3617] Fixing up some output stuff --- lib/ansible/constants.py | 2 +- lib/ansible/plugins/action/__init__.py | 11 ++++---- lib/ansible/plugins/callback/__init__.py | 2 +- .../roles/test_command_shell/tasks/main.yml | 25 +++---------------- 4 files changed, 11 insertions(+), 29 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 5b7c901415d1ef..c95cb34b454588 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -235,4 +235,4 @@ def shell_expand_path(path): DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 -RESULT_SANITIZE = frozenset(['invocation','warnings']) +RESULT_SANITIZE = frozenset(['warnings']) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 02f30d4b5975ae..80dd43099ce229 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -23,7 +23,7 @@ import json import os import random -import sys # FIXME: probably not needed +import sys import tempfile import time @@ -404,10 +404,11 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var data['stdout_lines'] = data.get('stdout', '').splitlines() # store the module invocation details back into the result - data['invocation'] = dict( - module_args = module_args, - module_name = module_name, - ) + if self._task.async is not None: + data['invocation'] = dict( + module_args = module_args, + module_name = module_name, + ) debug("done with _execute_module (%s, %s)" % (module_name, module_args)) return data diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index d39af7e092a5c8..a13811b95411e2 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -49,7 +49,7 @@ def _dump_results(self, result, sanitize=True, indent=4, sort_keys=True): if sanitize: res = self._sanitize_result(result) else: - res = results + res = result return json.dumps(res, indent=indent, ensure_ascii=False, sort_keys=sort_keys) def _sanitize_result(self, result): diff --git a/test/integration/roles/test_command_shell/tasks/main.yml b/test/integration/roles/test_command_shell/tasks/main.yml index 325e76cffea4e1..976843e369bade 100644 --- a/test/integration/roles/test_command_shell/tasks/main.yml +++ b/test/integration/roles/test_command_shell/tasks/main.yml @@ -127,7 +127,6 @@ - "shell_result0.rc == 0" - "shell_result0.stderr == ''" - "shell_result0.stdout == 'win'" - - "not shell_result0.warnings" # executable @@ -156,7 +155,6 @@ - "shell_result2.rc == 0" - "shell_result2.stderr == ''" - "shell_result2.stdout == 'win'" - - "not shell_result2.warnings" # creates @@ -169,28 +167,11 @@ - name: verify that afile.txt is present file: path={{output_dir_test}}/afile.txt state=file -# removes - -- name: remove afile.txt using rm - shell: rm {{output_dir_test | expanduser}}/afile.txt removes={{output_dir_test | expanduser}}/afile.txt - register: shell_result3 - -- name: assert that using rm under shell causes a warning - assert: - that: - - "shell_result3.warnings" - -- name: verify that afile.txt is absent - file: path={{output_dir_test}}/afile.txt state=absent - register: shell_result4 - -- name: assert that the file was removed by the shell - assert: - that: - - "shell_result4.changed == False" - # multiline +- name: remove test file previously created + file: path={{output_dir_test | expanduser}}/afile.txt state=absent + - name: execute a shell command using a literal multiline block args: executable: /bin/bash From 6376dda5c7ba259d28451d930de22bc15c431151 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 07:12:13 -0400 Subject: [PATCH 1228/3617] clarified v1/ purpose and relationships with tags and branches --- v1/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v1/README.md b/v1/README.md index 396e8434c4dcf2..bbc03a45a1328c 100644 --- a/v1/README.md +++ b/v1/README.md @@ -1,4 +1,6 @@ This is dead code, it is here for convinience for those testing current devel so as to acertain if a bug was introduced in the v2 rewrite or was preexisitng in the 1.x codebase. +Using this code should be equivalent of checking out the v1_last tag, which was devel at a point between 1.9.1 and 1.9.2 releases. +The stable-1.9 is the maintenance branch for the 1.9.x code, which might continue to diverge from the v1/ tree as bugs get fixed. DO NOT: * use this code as reference From 8793308c39bf064106f08b74e5cb468c94bf1d83 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 07:28:32 -0400 Subject: [PATCH 1229/3617] made md5 into generic checksum function that uses sha now --- lib/ansible/module_utils/powershell.ps1 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1 index c2bc09ac885cfa..a11e316989c6f0 100644 --- a/lib/ansible/module_utils/powershell.ps1 +++ b/lib/ansible/module_utils/powershell.ps1 @@ -142,14 +142,14 @@ Function ConvertTo-Bool return } -# Helper function to calculate md5 of a file in a way which powershell 3 +# Helper function to calculate a hash of a file in a way which powershell 3 # and above can handle: -Function Get-FileMd5($path) +Function Get-FileChecksum($path) { $hash = "" If (Test-Path -PathType Leaf $path) { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider; $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); $fp.Dispose(); From 44aef347cbb1abae1a781ddec8b5eb13f1e4e792 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 08:05:57 -0400 Subject: [PATCH 1230/3617] enabled good parsing tests in parsing target fixed test_good_parsing role added raw duplicate parameters to test_good_parsing --- test/integration/Makefile | 2 +- test/integration/roles/test_good_parsing/tasks/main.yml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index c197bd415302c6..e6a85acd6bc772 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -29,7 +29,7 @@ parsing: #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 4 ] #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 4 ] #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 4 ] - #ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) + ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) echo "skipping for now..." includes: diff --git a/test/integration/roles/test_good_parsing/tasks/main.yml b/test/integration/roles/test_good_parsing/tasks/main.yml index 482d0efac5d69a..03afb99295cf66 100644 --- a/test/integration/roles/test_good_parsing/tasks/main.yml +++ b/test/integration/roles/test_good_parsing/tasks/main.yml @@ -97,6 +97,9 @@ that: result.cmd == "echo foo=bar foo=bar" +- name: raw duplicates, noop + raw: /bin/true foo=bar foo=bar + - name: multi-line inline shell commands (should use script module but hey) are a thing shell: "{{ multi_line }}" register: result From 7dd56008399d8f0a801e0b1991ba2f83546415c3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Jul 2015 08:25:48 -0400 Subject: [PATCH 1231/3617] Allow empty include files again Fixes #11582 --- lib/ansible/plugins/strategies/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index fe97c98b3796c0..46e1c7a13c7db5 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -369,6 +369,8 @@ def _load_included_file(self, included_file, iterator): try: data = self._loader.load_from_file(included_file._filename) + if data is None: + return [] except AnsibleError, e: for host in included_file._hosts: tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e))) From 4e94bb64d82eeb8756ff54f208f001c1056a12bd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Jul 2015 09:26:24 -0400 Subject: [PATCH 1232/3617] Fix group/host var loading relative to playbook basedir --- lib/ansible/executor/playbook_executor.py | 1 + lib/ansible/inventory/__init__.py | 21 +++++++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index 343ac4ed39f50f..e692b76b8f5eb7 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -73,6 +73,7 @@ def run(self): try: for playbook_path in self._playbooks: pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) + self._inventory.set_playbook_basedir(os.path.dirname(playbook_path)) if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 26e9e617875c52..77f4eabcf8e27a 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -595,22 +595,27 @@ def playbook_basedir(self): """ returns the directory of the current playbook """ return self._playbook_basedir - def set_playbook_basedir(self, dir): + def set_playbook_basedir(self, dir_name): """ sets the base directory of the playbook so inventory can use it as a basedir for host_ and group_vars, and other things. """ # Only update things if dir is a different playbook basedir - if dir != self._playbook_basedir: - self._playbook_basedir = dir + if dir_name != self._playbook_basedir: + self._playbook_basedir = dir_name # get group vars from group_vars/ files + # FIXME: excluding the new_pb_basedir directory may result in group_vars + # files loading more than they should, however with the file caching + # we do this shouldn't be too much of an issue. Still, this should + # be fixed at some point to allow a "first load" to touch all of the + # directories, then later runs only touch the new basedir specified for group in self.groups: - # FIXME: combine_vars - group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) + #group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True)) + group.vars = combine_vars(group.vars, self.get_group_vars(group)) # get host vars from host_vars/ files for host in self.get_hosts(): - # FIXME: combine_vars - host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) + #host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True)) + host.vars = combine_vars(host.vars, self.get_host_vars(host)) # invalidate cache self._vars_per_host = {} self._vars_per_group = {} @@ -646,7 +651,7 @@ def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False): # this can happen from particular API usages, particularly if not run # from /usr/bin/ansible-playbook if basedir is None: - continue + basedir = './' scan_pass = scan_pass + 1 From ea159ef9de3927c35b629cd7df9cb33eb83ad8bf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 10:07:30 -0400 Subject: [PATCH 1233/3617] fixed backup and validate fragments --- lib/ansible/utils/module_docs_fragments/backup.py | 1 + .../utils/module_docs_fragments/validate.py | 15 ++++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/ansible/utils/module_docs_fragments/backup.py b/lib/ansible/utils/module_docs_fragments/backup.py index bee7182a91f195..f6b2902512ac66 100644 --- a/lib/ansible/utils/module_docs_fragments/backup.py +++ b/lib/ansible/utils/module_docs_fragments/backup.py @@ -20,6 +20,7 @@ class ModuleDocFragment(object): # Standard documentation fragment DOCUMENTATION = ''' +options: backup: description: - Create a backup file including the timestamp information so you can get diff --git a/lib/ansible/utils/module_docs_fragments/validate.py b/lib/ansible/utils/module_docs_fragments/validate.py index 6b4a14b7fa2fc6..98fb07ac4e5013 100644 --- a/lib/ansible/utils/module_docs_fragments/validate.py +++ b/lib/ansible/utils/module_docs_fragments/validate.py @@ -20,11 +20,12 @@ class ModuleDocFragment(object): # Standard documentation fragment DOCUMENTATION = ''' - validate: - required: false - description: - - The validation command to run before copying into place. The path to the file to - validate is passed in via '%s' which must be present as in the apache example below. - The command is passed securely so shell features like expansion and pipes won't work. - default: None +options: + validate: + required: false + description: + - The validation command to run before copying into place. The path to the file to + validate is passed in via '%s' which must be present as in the apache example below. + The command is passed securely so shell features like expansion and pipes won't work. + default: None ''' From 42e355f9a3b20fb5a0b6e5e2413e0c2114a7fa00 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 10:07:46 -0400 Subject: [PATCH 1234/3617] fragments can now be a list --- lib/ansible/utils/module_docs.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index e296c0c6986238..57d6e1b7c829ef 100644 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -54,19 +54,21 @@ def get_docstring(filename, verbose=False): if isinstance(child, ast.Assign): if 'DOCUMENTATION' in (t.id for t in child.targets): doc = yaml.safe_load(child.value.s) - fragment_slug = doc.get('extends_documentation_fragment', - 'doesnotexist').lower() + fragments = doc.get('extends_documentation_fragment', []) + + if isinstance(fragments, basestring): + fragments = [ fragments ] # Allow the module to specify a var other than DOCUMENTATION # to pull the fragment from, using dot notation as a separator - if '.' in fragment_slug: - fragment_name, fragment_var = fragment_slug.split('.', 1) - fragment_var = fragment_var.upper() - else: - fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION' - + for fragment_slug in fragments: + fragment_slug = fragment_slug.lower() + if '.' in fragment_slug: + fragment_name, fragment_var = fragment_slug.split('.', 1) + fragment_var = fragment_var.upper() + else: + fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION' - if fragment_slug != 'doesnotexist': fragment_class = fragment_loader.get(fragment_name) assert fragment_class is not None From 3c7faa8378c2d0abfa0799a546b41d042b2ab6e3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 10:10:03 -0400 Subject: [PATCH 1235/3617] fixed missing self in self.action on rekey in vault fixes #11584 --- lib/ansible/cli/vault.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 969ea2b6fa63ce..a56a2205a8ea9a 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -58,7 +58,7 @@ def parse(self): self.parser.set_usage("usage: %prog view [options] file_name") elif self.action == "encrypt": self.parser.set_usage("usage: %prog encrypt [options] file_name") - elif action == "rekey": + elif self.action == "rekey": self.parser.set_usage("usage: %prog rekey [options] file_name") self.options, self.args = self.parser.parse_args() From 3b913943b2f6668fb3efb3a0ac27707beb3dd55e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Jul 2015 11:08:55 -0400 Subject: [PATCH 1236/3617] Updating base strategy unit test regarding bad file loads based on earlier change --- test/units/plugins/strategies/test_strategy_base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 28f1d254391cb6..6e3187bac97492 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -309,7 +309,8 @@ def test_strategy_base_load_included_file(self): res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) mock_inc_file._filename = "bad.yml" - self.assertRaises(AnsibleParserError, strategy_base._load_included_file, included_file=mock_inc_file, iterator=mock_iterator) + res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) + self.assertEqual(res, []) def test_strategy_base_run_handlers(self): workers = [] From 22165dd046c725929939145dfe38173681199409 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 11:44:45 -0400 Subject: [PATCH 1237/3617] fixed bad parsing tests --- test/integration/Makefile | 7 +------ test/integration/roles/test_bad_parsing/tasks/main.yml | 5 +++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index e6a85acd6bc772..3d4555b54f19e3 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -24,13 +24,8 @@ CONSUL_RUNNING := $(shell python consul_running.py) all: parsing test_var_precedence unicode test_templating_settings non_destructive destructive includes check_mode test_hash test_handlers test_group_by test_vault test_tags parsing: - #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario1; [ $$? -eq 4 ] - #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario2; [ $$? -eq 4 ] - #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario3; [ $$? -eq 4 ] - #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario4; [ $$? -eq 4 ] - #ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5; [ $$? -eq 4 ] + ansible-playbook bad_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -vvv $(TEST_FLAGS) --tags prepare,common,scenario5 ansible-playbook good_parsing.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) -v $(TEST_FLAGS) - echo "skipping for now..." includes: ansible-playbook test_includes.yml -i $(INVENTORY) -e @$(VARS_FILE) $(CREDENTIALS_ARG) $(TEST_FLAGS) diff --git a/test/integration/roles/test_bad_parsing/tasks/main.yml b/test/integration/roles/test_bad_parsing/tasks/main.yml index 4636383d9eb204..c0cad8798a4d9d 100644 --- a/test/integration/roles/test_bad_parsing/tasks/main.yml +++ b/test/integration/roles/test_bad_parsing/tasks/main.yml @@ -48,4 +48,9 @@ - name: test that a missing/malformed jinja2 filter fails debug: msg="{{output_dir|badfiltername}}" tags: scenario5 + register: filter_fail + ignore_errors: yes +- assert: + that: + - filter_fail|failed From 5eb25a48ee801239c7f9462d32fb123328c7dc3d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 12:05:20 -0400 Subject: [PATCH 1238/3617] added empty include test --- test/integration/roles/test_includes/tasks/empty.yml | 0 test/integration/test_includes2.yml | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 test/integration/roles/test_includes/tasks/empty.yml diff --git a/test/integration/roles/test_includes/tasks/empty.yml b/test/integration/roles/test_includes/tasks/empty.yml new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/test/integration/test_includes2.yml b/test/integration/test_includes2.yml index 9e8331ee180f01..1b15682d70fb89 100644 --- a/test/integration/test_includes2.yml +++ b/test/integration/test_includes2.yml @@ -14,9 +14,9 @@ - { role: test_includes, tags: test_includes } tasks: - include: roles/test_includes/tasks/not_a_role_task.yml + - include: roles/test_includes/tasks/empty.yml - assert: that: - "ca == 33000" - "cb == 33001" - "cc == 33002" - From f6c64a8c007b2d51e7da5b17643fd3d347c59da7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 12:12:43 -0400 Subject: [PATCH 1239/3617] fixed var file loading --- test/integration/test_var_precedence.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/test_var_precedence.yml b/test/integration/test_var_precedence.yml index 8bddfff4473c8a..ae4b4cfea16791 100644 --- a/test/integration/test_var_precedence.yml +++ b/test/integration/test_var_precedence.yml @@ -36,7 +36,7 @@ - hosts: inven_overridehosts vars_files: - - "{{ var_dir }}/test_var_precedence.yml" + - "test_var_precedence.yml" roles: - role: test_var_precedence_inven_override foo: bar From 8d887d8dd3f7e1a17bbbb5719f182ffd0cd66709 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 14 Jul 2015 15:02:20 -0400 Subject: [PATCH 1240/3617] Adding back --start-at-task feature Also implemented framework for --step, though it's not used yet --- lib/ansible/cli/playbook.py | 6 +++--- lib/ansible/executor/connection_info.py | 6 ++++++ lib/ansible/executor/play_iterator.py | 11 +++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index 9e97f53c53f4ad..1eab61eb4d3f67 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -60,12 +60,12 @@ def parse(self): # ansible playbook specific opts parser.add_option('--list-tasks', dest='listtasks', action='store_true', help="list all tasks that would be executed") + parser.add_option('--list-tags', dest='listtags', action='store_true', + help="list all available tags") parser.add_option('--step', dest='step', action='store_true', help="one-step-at-a-time: confirm each task before running") - parser.add_option('--start-at-task', dest='start_at', + parser.add_option('--start-at-task', dest='start_at_task', help="start the playbook at the task matching this name") - parser.add_option('--list-tags', dest='listtags', action='store_true', - help="list all available tags") self.options, self.args = parser.parse_args() diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py index 46ce129e45b15f..a760cc9aabb279 100644 --- a/lib/ansible/executor/connection_info.py +++ b/lib/ansible/executor/connection_info.py @@ -177,6 +177,8 @@ def __init__(self, play=None, options=None, passwords=None): self.no_log = False self.check_mode = False self.force_handlers = False + self.start_at_task = None + self.step = False #TODO: just pull options setup to above? # set options before play to allow play to override them @@ -241,6 +243,10 @@ def set_options(self, options): self.check_mode = boolean(options.check) if hasattr(options, 'force_handlers') and options.force_handlers: self.force_handlers = boolean(options.force_handlers) + if hasattr(options, 'step') and options.step: + self.step = boolean(options.step) + if hasattr(options, 'start_at_task') and options.start_at_task: + self.start_at_task = options.start_at_task # get the tag info from options, converting a comma-separated list # of values into a proper list if need be. We check to see if the diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 8794e7e403418c..2ca3815e4194fb 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -99,6 +99,17 @@ def __init__(self, inventory, play, connection_info, all_vars): self._host_states = {} for host in inventory.get_hosts(self._play.hosts): self._host_states[host.name] = HostState(blocks=self._blocks) + # if we're looking to start at a specific task, iterate through + # the tasks for this host until we find the specified task + if connection_info.start_at_task is not None: + while True: + (s, task) = self.get_next_task_for_host(host, peek=True) + if s.run_state == self.ITERATING_COMPLETE: + break + if task.get_name() != connection_info.start_at_task: + self.get_next_task_for_host(host) + else: + break # Extend the play handlers list to include the handlers defined in roles self._play.handlers.extend(play.compile_roles_handlers()) From 327b1676a8ea43f3add465b230b86f6cde07aed1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Jul 2015 11:48:41 -0700 Subject: [PATCH 1241/3617] Add support for SNI and TLS-1.1 and TLS-1.2 to the fetch_url() helper Fixes #1716 Fixes #1695 --- lib/ansible/module_utils/urls.py | 77 +++++++++++++++---- .../roles/test_get_url/tasks/main.yml | 32 ++++++++ .../integration/roles/test_uri/tasks/main.yml | 7 +- 3 files changed, 98 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index cf9a652ed148a8..2ba19b629f7742 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -95,9 +95,16 @@ try: import ssl - HAS_SSL=True + HAS_SSL = True except: - HAS_SSL=False + HAS_SSL = False + +try: + # SNI Handling needs python2.7.9's SSLContext + from ssl import create_default_context, SSLContext + HAS_SSLCONTEXT = True +except ImportError: + HAS_SSLCONTEXT = False HAS_MATCH_HOSTNAME = True try: @@ -277,6 +284,13 @@ class NoSSLError(SSLValidationError): class CustomHTTPSConnection(httplib.HTTPSConnection): + def __init__(self, *args, **kwargs): + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + if HAS_SSLCONTEXT: + self.context = create_default_context() + if self.cert_file: + self.context.load_cert_chain(self.cert_file, self.key_file) + def connect(self): "Connect to a host on a given (SSL) port." @@ -287,7 +301,10 @@ def connect(self): if self._tunnel_host: self.sock = sock self._tunnel() - self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) + if HAS_SSLCONTEXT: + self.sock = self.context.wrap_socket(sock, server_hostname=self.host) + else: + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) class CustomHTTPSHandler(urllib2.HTTPSHandler): @@ -462,9 +479,17 @@ def detect_no_proxy(self, url): return False return True + def _make_context(self, tmp_ca_cert_path): + context = create_default_context() + context.load_verify_locations(tmp_ca_cert_path) + return context + def http_request(self, req): tmp_ca_cert_path, paths_checked = self.get_ca_certs() https_proxy = os.environ.get('https_proxy') + context = None + if HAS_SSLCONTEXT: + context = self._make_context(tmp_ca_cert_path) # Detect if 'no_proxy' environment variable is set and if our URL is included use_proxy = self.detect_no_proxy(req.get_full_url()) @@ -486,14 +511,20 @@ def http_request(self, req): s.sendall('\r\n') connect_result = s.recv(4096) self.validate_proxy_response(connect_result) - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) - match_hostname(ssl_s.getpeercert(), self.hostname) + if context: + ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname')) + else: + ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1) + match_hostname(ssl_s.getpeercert(), self.hostname) else: raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) else: s.connect((self.hostname, self.port)) - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED) - match_hostname(ssl_s.getpeercert(), self.hostname) + if context: + ssl_s = context.wrap_socket(s, server_hostname=self.hostname) + else: + ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1) + match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection #ssl_s.unwrap() s.close() @@ -502,9 +533,14 @@ def http_request(self, req): if 'connection refused' in str(e).lower(): raise ConnectionError('Failed to connect to %s:%s.' % (self.hostname, self.port)) else: - raise SSLValidationError('Failed to validate the SSL certificate for %s:%s. ' - 'Use validate_certs=False (insecure) or make sure your managed systems have a valid CA certificate installed. ' - 'Paths checked for this platform: %s' % (self.hostname, self.port, ", ".join(paths_checked)) + raise SSLValidationError('Failed to validate the SSL certificate for %s:%s.' + ' Make sure your managed systems have a valid CA' + ' certificate installed. If the website serving the url' + ' uses SNI you need python >= 2.7.9 on your managed' + ' machine. You can use validate_certs=False if you do' + ' not need to confirm the server\s identity but this is' + ' unsafe and not recommended' + ' Paths checked for this platform: %s' % (self.hostname, self.port, ", ".join(paths_checked)) ) except CertificateError: raise SSLValidationError("SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=False (insecure)" % self.hostname) @@ -534,8 +570,6 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, if parsed[0] == 'https' and validate_certs: if not HAS_SSL: raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False, however this is unsafe and not recommended') - if not HAS_MATCH_HOSTNAME: - raise SSLValidationError('Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=False, however this is unsafe and not recommended') # do the cert validation netloc = parsed[1] @@ -630,13 +664,22 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, for header in headers: request.add_header(header, headers[header]) - if sys.version_info < (2,6,0): + urlopen_args = [request, None] + if sys.version_info >= (2,6,0): # urlopen in python prior to 2.6.0 did not # have a timeout parameter - r = urllib2.urlopen(request, None) - else: - r = urllib2.urlopen(request, None, timeout) - + urlopen_args.append(timeout) + + if HAS_SSLCONTEXT and not validate_certs: + # In 2.7.9, the default context validates certificates + context = SSLContext(ssl.PROTOCOL_SSLv23) + context.options |= ssl.OP_NO_SSLv2 + context.options |= ssl.OP_NO_SSLv3 + context.verify_mode = ssl.CERT_NONE + context.check_hostname = False + urlopen_args += (None, None, None, context) + + r = urllib2.urlopen(*urlopen_args) return r # diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 88ff3b2e21c648..6e3842f6abf373 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -60,3 +60,35 @@ that: - "result.changed == true" - "stat_result.stat.exists == true" + +# SNI Tests +# SNI is only built into the stdlib from python-2.7.9 onwards +- name: Test that SNI works + get_url: + # A test site that returns a page with information on what SNI information + # the client sent. A failure would have the string: did not send a TLS server name indication extension + url: 'https://foo.sni.velox.ch/' + dest: "{{ output_dir }}/sni.html" + register: get_url_result + ignore_errors: True + +- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" + register: data_result + when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + +# If distros start backporting SNI, can make a new conditional based on whether this works: +# python -c 'from ssl import SSLContext' +- debug: msg=get_url_result +- name: Assert that SNI works with this python version + assert: + that: + - 'data_result.rc == 0' + - '"failed" not in get_url_result' + when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + +# If the client doesn't support SNI then get_url should have failed with a certificate mismatch +- name: Assert that hostname verification failed because SNI is not supported on this version of python + assert: + that: + - 'get_url_result["failed"]' + when: "{{ ansible_python_version | version_compare('2.7.9', '<') }}" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 99c6048a59e181..7300578982d448 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -110,6 +110,11 @@ - "'certificate does not match ' in result.msg" - "stat_result.stat.exists == false" +- name: Clean up any cruft from the results directory + file: + name: "{{ output_dir }}/kreitz.html" + state: absent + - name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no get_url: url: "https://kennethreitz.org/" @@ -124,5 +129,5 @@ - name: Assert that the file was downloaded assert: that: - - "result.changed == true" - "stat_result.stat.exists == true" + - "result.changed == true" From 323362e23a970e9b649fa40a402f322b9efdc497 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 15:59:00 -0400 Subject: [PATCH 1242/3617] added stdout to test result --- test/units/executor/test_task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/executor/test_task_executor.py b/test/units/executor/test_task_executor.py index 64ce1d5faa2f2e..966be3c8c70ad0 100644 --- a/test/units/executor/test_task_executor.py +++ b/test/units/executor/test_task_executor.py @@ -299,7 +299,7 @@ def test_task_executor_poll_async_result(self): def _get(*args, **kwargs): mock_action = MagicMock() - mock_action.run.return_value = dict() + mock_action.run.return_value = dict(stdout='') return mock_action # testing with some bad values in the result passed to poll async, From 0e1d771a330eae40e121165b0f28cf143a0b6dee Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 16:47:47 -0400 Subject: [PATCH 1243/3617] updated submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9acf10face033d..c27c6d2c8c0ac2 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9acf10face033dda6d5b1f570fb35cbd3deabac5 +Subproject commit c27c6d2c8c0ac21e0a372515d5bccae64caefe91 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8a89f4afe45286..ff2386faf49dd4 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8a89f4afe452868eccdb8eab841cb501b7bf0548 +Subproject commit ff2386faf49dd44964fac084ed7199ab4ea5f741 From fbec8bfb90df1d2e8a0a4df7ac1d9879ca8f4dde Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 17:03:57 -0400 Subject: [PATCH 1244/3617] updated ref to add docfixes --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c27c6d2c8c0ac2..291fef3b34ea55 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c27c6d2c8c0ac21e0a372515d5bccae64caefe91 +Subproject commit 291fef3b34ea5510f031816d9c569f54098b8bec From ae6d9ebf28ad6f843687093824d431be7254b94d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 17:33:27 -0400 Subject: [PATCH 1245/3617] added maintainers (from author field) to ansible-doc --- lib/ansible/cli/doc.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 72ce3c1a5e5315..7215eb9ee10740 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -285,4 +285,12 @@ def get_man_text(doc): text.append(doc['returndocs']) text.append('') + if isinstance(doc['author'], basestring): + maintainers = [doc['author']] + else: + maintainers = doc['author'] + + text.append('MAINTAINERS: ' + ', '.join(maintainers)) + text.append('') + return "\n".join(text) From 0b035a4e35510d8e9f710f15f513b59b4c64084c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Jul 2015 01:55:45 -0400 Subject: [PATCH 1246/3617] Unicode in result debug statements caused a traceback --- lib/ansible/executor/process/result.py | 2 +- lib/ansible/plugins/strategies/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 2750261e04d8b9..5e09bd7f84a438 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -59,7 +59,7 @@ def __init__(self, final_q, workers): super(ResultProcess, self).__init__() def _send_result(self, result): - debug("sending result: %s" % (result,)) + debug(u"sending result: %s" % ([unicode(x) for x in result],)) self._final_q.put(result, block=False) debug("done sending result") diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 46e1c7a13c7db5..1b4c1a2c1d6dfd 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -155,7 +155,7 @@ def _process_pending_results(self, iterator): while not self._final_q.empty() and not self._tqm._terminated: try: result = self._final_q.get(block=False) - debug("got result from result worker: %s" % (result,)) + debug("got result from result worker: %s" % ([unicode(x) for x in result],)) # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): From 2d870b71125b7cc51ad9cce355df9e2d10e62a6e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Jul 2015 10:20:55 -0400 Subject: [PATCH 1247/3617] Fix logic where invocation details are added to results --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 80dd43099ce229..49038b29c9136a 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -404,7 +404,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var data['stdout_lines'] = data.get('stdout', '').splitlines() # store the module invocation details back into the result - if self._task.async is not None: + if self._task.async != 0: data['invocation'] = dict( module_args = module_args, module_name = module_name, From b76cb8f655fa1f7ef4402738a8fc28d9208eb541 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 15 Jul 2015 10:40:37 -0400 Subject: [PATCH 1248/3617] now that invocation is only async again, no need to sanitize --- lib/ansible/constants.py | 1 - lib/ansible/executor/process/result.py | 6 +----- lib/ansible/plugins/callback/__init__.py | 16 ++-------------- 3 files changed, 3 insertions(+), 20 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index c95cb34b454588..43ae782e195709 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -235,4 +235,3 @@ def shell_expand_path(path): DEFAULT_SU_PASS = None VAULT_VERSION_MIN = 1.0 VAULT_VERSION_MAX = 1.0 -RESULT_SANITIZE = frozenset(['warnings']) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 5e09bd7f84a438..baf7afcf5b4faf 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -33,7 +33,6 @@ except ImportError: HAS_ATFORK=False -from ansible import constants as C from ansible.playbook.handler import Handler from ansible.playbook.task import Task @@ -108,10 +107,7 @@ def run(self): # if this task is registering a result, do it now if result._task.register: - res = {} - for k in set(result._result.keys()).difference(C.RESULT_SANITIZE): - res[k] = result._result[k] - self._send_result(('register_host_var', result._host, result._task.register, res)) + self._send_result(('register_host_var', result._host, result._task.register, result._result)) # send callbacks, execute other options based on the result status # FIXME: this should all be cleaned up and probably moved to a sub-function. diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index a13811b95411e2..ea56d758a7ec7c 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -21,8 +21,6 @@ import json -from ansible import constants as C - __all__ = ["CallbackBase"] @@ -45,18 +43,8 @@ def __init__(self, display): version = getattr(self, 'CALLBACK_VERSION', 'unknwon') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) - def _dump_results(self, result, sanitize=True, indent=4, sort_keys=True): - if sanitize: - res = self._sanitize_result(result) - else: - res = result - return json.dumps(res, indent=indent, ensure_ascii=False, sort_keys=sort_keys) - - def _sanitize_result(self, result): - res = {} - for k in set(result.keys()).difference(C.RESULT_SANITIZE): - res[k] = result[k] - return res + def _dump_results(self, result, indent=4, sort_keys=True): + return json.dumps(result, indent=indent, ensure_ascii=False, sort_keys=sort_keys) def set_connection_info(self, conn_info): pass From 780e428bd36438cadeeb236facaedce57ceb68e8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 15 Jul 2015 11:55:26 -0400 Subject: [PATCH 1249/3617] fixed typos --- v1/README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/v1/README.md b/v1/README.md index bbc03a45a1328c..98ae99854d801b 100644 --- a/v1/README.md +++ b/v1/README.md @@ -1,8 +1,10 @@ -This is dead code, it is here for convinience for those testing current devel so as to acertain if a bug was introduced in the v2 rewrite or was preexisitng in the 1.x codebase. +This is dead code, it is here for convenience for those testing current devel so as to ascertain if a bug was introduced in the v2 rewrite or was preexisting in the 1.x codebase. Using this code should be equivalent of checking out the v1_last tag, which was devel at a point between 1.9.1 and 1.9.2 releases. The stable-1.9 is the maintenance branch for the 1.9.x code, which might continue to diverge from the v1/ tree as bugs get fixed. DO NOT: - * use this code as reference + * use this code as reference * make PRs against this code * expect this code to be shipped with the 2.0 version of ansible + + From 165fff8a1e6e9f5ed6d1d10c136c8c9fbd2a88c1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Jul 2015 11:56:01 -0400 Subject: [PATCH 1250/3617] Fixing module arg parsing splitting when action is a variable Fixes #11122 --- lib/ansible/parsing/mod_args.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index d7cc83a90557d8..ae86471a2d8c61 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -23,7 +23,7 @@ from ansible.errors import AnsibleParserError from ansible.plugins import module_loader -from ansible.parsing.splitter import parse_kv +from ansible.parsing.splitter import parse_kv, split_args # For filtering out modules correctly below RAW_PARAM_MODULES = ([ @@ -91,7 +91,7 @@ def __init__(self, task_ds=dict()): self._task_ds = task_ds - def _split_module_string(self, str): + def _split_module_string(self, module_string): ''' when module names are expressed like: action: copy src=a dest=b @@ -99,7 +99,7 @@ def _split_module_string(self, str): and the rest are strings pertaining to the arguments. ''' - tokens = str.split() + tokens = split_args(module_string) if len(tokens) > 1: return (tokens[0], " ".join(tokens[1:])) else: @@ -240,17 +240,13 @@ def parse(self): args = dict() - # - # We can have one of action, local_action, or module specified - # - - # this is the 'extra gross' scenario detailed above, so we grab # the args and pass them in as additional arguments, which can/will # be overwritten via dict updates from the other arg sources below # FIXME: add test cases for this additional_args = self._task_ds.get('args', dict()) + # We can have one of action, local_action, or module specified # action if 'action' in self._task_ds: # an old school 'action' statement From d6b058eaaed64a82dcaa1a695380badcedcc9f82 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Jul 2015 11:58:53 -0400 Subject: [PATCH 1251/3617] Removing invocation from async test, as it's pointless --- test/integration/roles/test_async/tasks/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/roles/test_async/tasks/main.yml b/test/integration/roles/test_async/tasks/main.yml index 0b9991ec0493b6..4432ad57271b74 100644 --- a/test/integration/roles/test_async/tasks/main.yml +++ b/test/integration/roles/test_async/tasks/main.yml @@ -34,7 +34,6 @@ - "'delta' in async_result" - "'end' in async_result" - "'finished' in async_result" - - "'invocation' in async_result" - "'rc' in async_result" - "'start' in async_result" - "'stderr' in async_result" From 9fe0f21f6a75080b9597ea87f85cbcb90fe41809 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Jul 2015 13:53:59 -0400 Subject: [PATCH 1252/3617] Allow omit to be used on Playbook-level fields Fixes #11173 --- lib/ansible/playbook/base.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index 4ff7f11c097682..fe593c2a1df1ca 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -250,6 +250,9 @@ def post_validate(self, templar): if self._loader is not None: basedir = self._loader.get_basedir() + # save the omit value for later checking + omit_value = templar._available_variables.get('omit') + for (name, attribute) in iteritems(self._get_base_attributes()): if getattr(self, name) is None: @@ -268,6 +271,12 @@ def post_validate(self, templar): # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) + # if this evaluated to the omit value, set the value back to + # the default specified in the FieldAttribute and move on + if omit_value is not None and value == omit_value: + value = attribute.default + continue + # and make sure the attribute is of the type it should be if value is not None: if attribute.isa == 'string': @@ -284,7 +293,7 @@ def post_validate(self, templar): if not isinstance(item, attribute.listof): raise AnsibleParserError("the field '%s' should be a list of %s, but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds()) elif attribute.isa == 'dict' and not isinstance(value, dict): - raise TypeError() + raise TypeError("%s is not a dictionary" % value) # and assign the massaged value back to the attribute field setattr(self, name, value) From 291f07242cb59457687eede689a7948c41c68d2c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Jul 2015 14:36:42 -0400 Subject: [PATCH 1253/3617] Properly return Jinja2 Undefined class for bad hostvars lookups Fixes #11176 --- lib/ansible/vars/hostvars.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index 9d2c3864893c45..29d1e1aa806621 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from jinja2 import Undefined as j2undefined + from ansible.template import Templar __all__ = ['HostVars'] @@ -37,6 +39,8 @@ def __getitem__(self, host_name): if host_name not in self._lookup: host = self._inventory.get_host(host_name) + if not host: + return j2undefined result = self._vars_manager.get_vars(loader=self._loader, play=self._play, host=host) templar = Templar(variables=result, loader=self._loader) self._lookup[host_name] = templar.template(result, fail_on_undefined=False) From ba7243c5f94b4fcd5ffcfe6edd17d3fb4e9c9eac Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 15 Jul 2015 15:11:46 -0400 Subject: [PATCH 1254/3617] Don't set changed for include tasks Fixes #11197 --- lib/ansible/executor/task_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 287c7431b429bd..06946346902fe2 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -243,7 +243,7 @@ def _execute(self, variables=None): include_variables = self._task.args.copy() include_file = include_variables.get('_raw_params') del include_variables['_raw_params'] - return dict(changed=True, include=include_file, include_variables=include_variables) + return dict(include=include_file, include_variables=include_variables) # get the connection and the handler for this execution self._connection = self._get_connection(variables) From 3d3e1c82a2377848f1a4a892517106c8255bc58d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 15 Jul 2015 13:17:00 -0700 Subject: [PATCH 1255/3617] Have openssl autonegotiate tls protocol on python < 2.7.9 This allows usage of tls-1.1 and tls-1.2 if the underlying openssl library supports it. Unfortunately it also allows sslv2 and sslv3 if the server is only configured to support those. In this day and age, that's probably something that the server administrator should fix anyhow. --- lib/ansible/module_utils/urls.py | 33 +++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 2ba19b629f7742..6530ba81e813dc 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -106,6 +106,33 @@ except ImportError: HAS_SSLCONTEXT = False +# Select a protocol that includes all secure tls protocols +# Exclude insecure ssl protocols if possible + +# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient +PROTOCOL = ssl.PROTOCOL_TLSv1 +if not HAS_SSLCONTEXT and HAS_SSL: + try: + import ctypes, ctypes.util + except ImportError: + # python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl) + pass + else: + libssl_name = ctypes.util.find_library('ssl') + libssl = ctypes.CDLL(libssl_name) + for method in ('TLSv1_1_method', 'TLSv1_2_method'): + try: + libssl[method] + # Found something - we'll let openssl autonegotiate and hope + # the server has disabled sslv2 and 3. best we can do. + PROTOCOL = ssl.PROTOCOL_SSLv23 + break + except AttributeError: + pass + del libssl + + + HAS_MATCH_HOSTNAME = True try: from ssl import match_hostname, CertificateError @@ -304,7 +331,7 @@ def connect(self): if HAS_SSLCONTEXT: self.sock = self.context.wrap_socket(sock, server_hostname=self.host) else: - self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1) + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) class CustomHTTPSHandler(urllib2.HTTPSHandler): @@ -514,7 +541,7 @@ def http_request(self, req): if context: ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname')) else: - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1) + ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) else: raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme')) @@ -523,7 +550,7 @@ def http_request(self, req): if context: ssl_s = context.wrap_socket(s, server_hostname=self.hostname) else: - ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_TLSv1) + ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) # close the ssl connection #ssl_s.unwrap() From 6ea772931fba2151fb2fb86caab8f7be10cf5769 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Tue, 14 Jul 2015 17:30:51 -0400 Subject: [PATCH 1256/3617] Connection function for boto3 Boto3 conn --- lib/ansible/module_utils/ec2.py | 49 +++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index 417e1b9521b664..9d406d0890a050 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -46,6 +46,19 @@ 'us-gov-west-1', ] +def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): + if conn_type not in ['both', 'resource', 'client']: + module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call') + + resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params) + client = resource.meta.client + + if conn_type == 'resource': + return resource + elif conn_type == 'client': + return client + else: + return client, resource def aws_common_argument_spec(): return dict( @@ -72,7 +85,7 @@ def boto_supports_profile_name(): return hasattr(boto.ec2.EC2Connection, 'profile_name') -def get_aws_connection_info(module): +def get_aws_connection_info(module, boto3=False): # Check module args for credentials, then check environment vars # access_key @@ -131,19 +144,31 @@ def get_aws_connection_info(module): # in case security_token came in as empty string security_token = None - boto_params = dict(aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - security_token=security_token) + if boto3: + boto_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + aws_session_token=security_token) + if validate_certs: + boto_params['verify'] = validate_certs - # profile_name only works as a key in boto >= 2.24 - # so only set profile_name if passed as an argument - if profile_name: - if not boto_supports_profile_name(): - module.fail_json("boto does not support profile_name before 2.24") - boto_params['profile_name'] = profile_name + if profile_name: + boto_params['profile_name'] = profile_name - if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): - boto_params['validate_certs'] = validate_certs + + else: + boto_params = dict(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + security_token=security_token) + + # profile_name only works as a key in boto >= 2.24 + # so only set profile_name if passed as an argument + if profile_name: + if not boto_supports_profile_name(): + module.fail_json("boto does not support profile_name before 2.24") + boto_params['profile_name'] = profile_name + + if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"): + boto_params['validate_certs'] = validate_certs return region, ec2_url, boto_params From 5a5b7ff561ce097ede8fd8462cde63b9de2a8d00 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 15 Jul 2015 19:47:59 -0400 Subject: [PATCH 1257/3617] fixed first_available_found for template, refactored into common function added deprecation warning fixed display.deprecated to make version optional (code already assumed this) turned warning + 'deprecated' in plugin loader into actual call to deprecated() --- lib/ansible/plugins/__init__.py | 3 +-- lib/ansible/plugins/action/__init__.py | 24 ++++++++++++++++++++++++ lib/ansible/plugins/action/copy.py | 16 ++-------------- lib/ansible/plugins/action/template.py | 19 ++----------------- lib/ansible/utils/display.py | 2 +- 5 files changed, 30 insertions(+), 34 deletions(-) diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py index d40a4f5f810ac3..c71da6b7d66cb2 100644 --- a/lib/ansible/plugins/__init__.py +++ b/lib/ansible/plugins/__init__.py @@ -250,8 +250,7 @@ def find_plugin(self, name, suffixes=None): if alias_name in self._plugin_path_cache: if not os.path.islink(self._plugin_path_cache[alias_name]): d = Display() - d.warning('%s has been deprecated, which means ' - 'it is kept for backwards compatibility ' + d.deprecated('%s is kept for backwards compatibility ' 'but usage is discouraged. The module ' 'documentation details page may explain ' 'more about this rationale.' % diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 49038b29c9136a..5ef52a44f01618 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -448,3 +448,27 @@ def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None, exec rc = 0 return dict(rc=rc, stdout=out, stderr=err) + + def _get_first_available_file(self, faf, of=None, searchdir='files'): + + self._connection._display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead") + for fn in faf: + fn_orig = fn + fnt = self._templar.template(fn) + if self._task._role is not None: + lead = self._task._role._role_path + else: + lead = fnt + fnd = self._loader.path_dwim_relative(lead, searchdir, fnt) + + if not os.path.exists(fnd) and of is not None: + if self._task._role is not None: + lead = self._task._role._role_path + else: + lead = of + fnd = self._loader.path_dwim_relative(lead, searchdir, of) + + if os.path.exists(fnd): + return fnd + + return None diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py index 7f11dfda2f303e..b97981015043cb 100644 --- a/lib/ansible/plugins/action/copy.py +++ b/lib/ansible/plugins/action/copy.py @@ -74,20 +74,8 @@ def run(self, tmp=None, task_vars=dict()): # if we have first_available_file in our vars # look up the files and use the first one we find as src elif faf: - #FIXME: issue deprecation warning for first_available_file, use with_first_found or lookup('first_found',...) instead - found = False - for fn in faf: - fn_orig = fn - fnt = self._templar.template(fn) - fnd = self._loader.path_dwim_relative(self._task._role._role_path, 'files', fnt) - of = task_vars.get('_original_file', None) - if not os.path.exists(fnd) and of is not None: - fnd = self._loader.path_dwim_relative(of, 'files', of) - if os.path.exists(fnd): - source = fnd - found = True - break - if not found: + source = self._get_first_available_file(faf, task_vars.get('_original_file', None)) + if source is None: return dict(failed=True, msg="could not find src in first_available_file list") else: if self._task._role is not None: diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index c13dc32b8a7613..09523967504e8f 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -64,23 +64,8 @@ def run(self, tmp=None, task_vars=dict()): tmp = self._make_tmp_path() if faf: - #FIXME: issue deprecation warning for first_available_file, use with_first_found or lookup('first_found',...) instead - found = False - for fn in faf: - fn_orig = fn - fnt = self._templar.template(fn) - fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', fnt) - - if not os.path.exists(fnd): - of = task_vars.get('_original_file', None) - if of is not None: - fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', of) - - if os.path.exists(fnd): - source = fnd - found = True - break - if not found: + source = self._get_first_available_file(faf, task_vars.get('_original_file', None, 'templates')) + if source is None: return dict(failed=True, msg="could not find src in first_available_file list") else: if self._task._role is not None: diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index a9a4f8bb50a520..ede2b29b8051a5 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -111,7 +111,7 @@ def verbose(self, msg, host=None, caplevel=2): else: self.display("<%s> %s" % (host, msg), color='blue', screen_only=True) - def deprecated(self, msg, version, removed=False): + def deprecated(self, msg, version=None, removed=False): ''' used to print out a deprecation message.''' if not removed and not C.DEPRECATION_WARNINGS: From f2bdd9af29f2e7fb58651be2972541a0fbdd82bd Mon Sep 17 00:00:00 2001 From: Piyush Date: Thu, 16 Jul 2015 17:40:43 +0530 Subject: [PATCH 1258/3617] Fix #11369 A result is skipped when all it's children are skipped. This makes it fundamentally different from a result that was changed/failed/unreachable --- lib/ansible/executor/task_result.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py index ad209a036cd998..d633f20736be45 100644 --- a/lib/ansible/executor/task_result.py +++ b/lib/ansible/executor/task_result.py @@ -40,7 +40,14 @@ def is_changed(self): return self._check_key('changed') def is_skipped(self): - return self._check_key('skipped') + if 'results' in self._result: + flag = True + for res in self._result.get('results', []): + if isinstance(res, dict): + flag &= res.get('skipped', False) + return flag + else: + return self._result.get('skipped', False) def is_failed(self): if 'failed_when_result' in self._result or \ From 052f3c2ece45fe4ab10509f3040c71324c1d4fbe Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 16 Jul 2015 11:39:40 -0400 Subject: [PATCH 1259/3617] Fixing allow_duplicate and variable resolution bugs Fixes #11205 --- lib/ansible/playbook/block.py | 2 +- lib/ansible/playbook/role/__init__.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 57a22c8cc1d692..c20286c8d9f2d9 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -56,7 +56,7 @@ def get_vars(self): all_vars = dict() if self._role: - all_vars.update(self._role.get_vars()) + all_vars.update(self._role.get_vars(self._dep_chain)) if self._parent_block: all_vars.update(self._parent_block.get_vars()) if self._task_include: diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 71dd00381168d5..d2f03e32b588da 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -160,6 +160,8 @@ def _load_role_data(self, role_include, parent_role=None): if metadata: self._metadata = RoleMetadata.load(metadata, owner=self, loader=self._loader) self._dependencies = self._load_dependencies() + else: + self._metadata = RoleMetadata() task_data = self._load_role_yaml('tasks') if task_data: @@ -242,16 +244,16 @@ def get_default_vars(self): default_vars = combine_vars(default_vars, self._default_vars) return default_vars - def get_inherited_vars(self): + def get_inherited_vars(self, dep_chain=[]): inherited_vars = dict() - for parent in self._parents: - inherited_vars = combine_vars(inherited_vars, parent.get_inherited_vars()) + + for parent in dep_chain: inherited_vars = combine_vars(inherited_vars, parent._role_vars) inherited_vars = combine_vars(inherited_vars, parent._role_params) return inherited_vars - def get_vars(self): - all_vars = self.get_inherited_vars() + def get_vars(self, dep_chain=[]): + all_vars = self.get_inherited_vars(dep_chain) for dep in self.get_all_dependencies(): all_vars = combine_vars(all_vars, dep.get_vars()) @@ -296,7 +298,7 @@ def has_run(self): at least one task was run ''' - return self._had_task_run and self._completed + return self._had_task_run and self._completed and not self._metadata.allow_duplicates def compile(self, play, dep_chain=[]): ''' From 86a83c16b871f2a1b9c47854d3de39d6b1dc245b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 16 Jul 2015 15:09:22 -0400 Subject: [PATCH 1260/3617] Remove some dead code from the base load_data method Was causing an odd error which threw off the error detection code when the datastructure was a string corresponding to a variable. --- lib/ansible/playbook/base.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py index fe593c2a1df1ca..d4da3dc0044aab 100644 --- a/lib/ansible/playbook/base.py +++ b/lib/ansible/playbook/base.py @@ -154,8 +154,11 @@ def load_data(self, ds, variable_manager=None, loader=None): else: self._loader = DataLoader() - if isinstance(ds, string_types) or isinstance(ds, FileIO): - ds = self._loader.load(ds) + # FIXME: is this required anymore? This doesn't seem to do anything + # helpful, and was added in very early stages of the base class + # development. + #if isinstance(ds, string_types) or isinstance(ds, FileIO): + # ds = self._loader.load(ds) # call the preprocess_data() function to massage the data into # something we can more easily parse, and then call the validation From c603caca27bec4697ee053902f46ae1e0a05930c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 09:57:45 -0400 Subject: [PATCH 1261/3617] removed extra print now that items are getting passed to callback in result --- lib/ansible/executor/task_executor.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 06946346902fe2..a1930e5e14d7e7 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -169,9 +169,6 @@ def _run_loop(self, items): res['item'] = item results.append(res) - # FIXME: we should be sending back a callback result for each item in the loop here - print(res) - return results def _squash_items(self, items, variables): From 5ba9fe47484424f19a6a15646005f8e46011965b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 15:18:33 -0400 Subject: [PATCH 1262/3617] now supports maintainers and author field for display as MAINTAINERS --- lib/ansible/cli/doc.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 7215eb9ee10740..8638bf389720fc 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -285,10 +285,18 @@ def get_man_text(doc): text.append(doc['returndocs']) text.append('') - if isinstance(doc['author'], basestring): - maintainers = [doc['author']] - else: - maintainers = doc['author'] + maintainers = set() + if 'author' in doc: + if isinstance(doc['author'], basestring): + maintainers.add(doc['author']) + else: + maintainers.update(doc['author']) + + if 'maintainers' in doc: + if isinstance(doc['maintainers'], basestring): + maintainers.add(doc['author']) + else: + maintainers.update(doc['author']) text.append('MAINTAINERS: ' + ', '.join(maintainers)) text.append('') From 94fa741f960e6986963ba6ab8fa159425106b62f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 16 Jul 2015 15:23:18 -0400 Subject: [PATCH 1263/3617] Make sure files loaded by template action are decoded properly Fixes #11247 --- lib/ansible/plugins/action/template.py | 28 +++++++++++++------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index c13dc32b8a7613..a188410f651abc 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -25,7 +25,7 @@ from ansible import constants as C from ansible.plugins.action import ActionBase from ansible.utils.hashing import checksum_s -from ansible.utils.unicode import to_bytes +from ansible.utils.unicode import to_bytes, to_unicode class ActionModule(ActionBase): @@ -100,34 +100,34 @@ def run(self, tmp=None, task_vars=dict()): # template the source data locally & get ready to transfer try: with open(source, 'r') as f: - template_data = f.read() + template_data = to_unicode(f.read()) try: template_uid = pwd.getpwuid(os.stat(source).st_uid).pw_name except: template_uid = os.stat(source).st_uid - vars = task_vars.copy() - vars['template_host'] = os.uname()[1] - vars['template_path'] = source - vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(source)) - vars['template_uid'] = template_uid - vars['template_fullpath'] = os.path.abspath(source) - vars['template_run_date'] = datetime.datetime.now() + temp_vars = task_vars.copy() + temp_vars['template_host'] = os.uname()[1] + temp_vars['template_path'] = source + temp_vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(source)) + temp_vars['template_uid'] = template_uid + temp_vars['template_fullpath'] = os.path.abspath(source) + temp_vars['template_run_date'] = datetime.datetime.now() managed_default = C.DEFAULT_MANAGED_STR managed_str = managed_default.format( - host = vars['template_host'], - uid = vars['template_uid'], - file = to_bytes(vars['template_path']) + host = temp_vars['template_host'], + uid = temp_vars['template_uid'], + file = to_bytes(temp_vars['template_path']) ) - vars['ansible_managed'] = time.strftime( + temp_vars['ansible_managed'] = time.strftime( managed_str, time.localtime(os.path.getmtime(source)) ) old_vars = self._templar._available_variables - self._templar.set_available_variables(vars) + self._templar.set_available_variables(temp_vars) resultant = self._templar.template(template_data, preserve_trailing_newlines=True) self._templar.set_available_variables(old_vars) except Exception as e: From db4f6b88788fce28e2b42e1dbbc09b58a79cff04 Mon Sep 17 00:00:00 2001 From: Jens Carl Date: Thu, 16 Jul 2015 19:56:21 +0000 Subject: [PATCH 1264/3617] Fix to handle user directory correctly (e.g. ~/.ansible/tmp). --- contrib/inventory/vmware.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/inventory/vmware.py b/contrib/inventory/vmware.py index 1d533a5e157645..b708d5999467a9 100755 --- a/contrib/inventory/vmware.py +++ b/contrib/inventory/vmware.py @@ -95,7 +95,7 @@ def _put_cache(self, name, value): Saves the value to cache with the name given. ''' if self.config.has_option('defaults', 'cache_dir'): - cache_dir = self.config.get('defaults', 'cache_dir') + cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir')) if not os.path.exists(cache_dir): os.makedirs(cache_dir) cache_file = os.path.join(cache_dir, name) From 978390693b1180934dde6f85d5ba04b4202b1162 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 16:44:33 -0400 Subject: [PATCH 1265/3617] changed to default 'auto' as it better describes the use= option --- lib/ansible/plugins/action/package.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/action/package.py b/lib/ansible/plugins/action/package.py index 89ac1b026c02b1..6dfabf3949273b 100644 --- a/lib/ansible/plugins/action/package.py +++ b/lib/ansible/plugins/action/package.py @@ -29,20 +29,21 @@ def run(self, tmp=None, task_vars=dict()): name = self._task.args.get('name', None) state = self._task.args.get('state', None) - module = self._task.args.get('use', None) + module = self._task.args.get('use', 'auto') - if module is None: + if module == 'auto': try: module = self._templar.template('{{ansible_pkg_mgr}}') except: pass # could not get it from template! - if module is None: - #TODO: autodetect the package manager, by invoking that specific fact snippet remotely + if module == 'auto': + #FIXME: autodetect the package manager run facts module remotely to get ansible_pkg_mgr + #module = self._execute_module(module_name=setup, module_args={filter: 'ansible_pkg_mgr'}, task_vars=task_vars) pass - if module is not None: + if module != 'auto': # run the 'package' module new_module_args = self._task.args.copy() if 'use' in new_module_args: From 888bda93c19bfc03db896c3b8e87b1c056798d26 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 16:51:26 -0400 Subject: [PATCH 1266/3617] added elasticsearch_plugin to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a14c4589609a5b..7bdaa6fb54d50c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ New Modules: * cloudstack: cs_vmsnapshot * datadog_monitor * dpkg_selections + * elasticsearch_plugin * expect * find * hall From d23ab261e181cdfef8bfa71597d40c6e9cb01972 Mon Sep 17 00:00:00 2001 From: Gerard Lynch Date: Thu, 16 Jul 2015 23:00:17 +0100 Subject: [PATCH 1267/3617] fixes 11607, allows ansible_ssh_port to be overridden from group or host_vars --- lib/ansible/inventory/host.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index c14a6f4a25e52c..c8083edb928656 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -78,8 +78,6 @@ def __init__(self, name=None, port=None): if port and port != C.DEFAULT_REMOTE_PORT: self.set_variable('ansible_ssh_port', int(port)) - else: - self.set_variable('ansible_ssh_port', C.DEFAULT_REMOTE_PORT) self._gathered_facts = False @@ -124,6 +122,10 @@ def get_vars(self): results['inventory_hostname'] = self.name results['inventory_hostname_short'] = self.name.split('.')[0] results['ansible_ssh_host'] = self.ipv4_address + + if 'ansible_ssh_port' not in results: + results['ansible_ssh_port'] = C.DEFAULT_REMOTE_PORT + results['group_names'] = sorted([ g.name for g in groups if g.name != 'all']) return results From 3c7a502c503c9d2171cbd90ed1ad44da1ec18f5c Mon Sep 17 00:00:00 2001 From: Gerard Lynch Date: Thu, 16 Jul 2015 23:56:18 +0100 Subject: [PATCH 1268/3617] updated to new location and non-classness of module_common --- hacking/test-module | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hacking/test-module b/hacking/test-module index 953f834aad0653..681e52a9c80a84 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -37,7 +37,7 @@ import optparse import ansible.utils as utils from ansible.parsing.utils.jsonify import jsonify from ansible.parsing.splitter import parse_kv -import ansible.module_common as module_common +import ansible.executor.module_common as module_common import ansible.constants as C try: @@ -89,7 +89,7 @@ def boilerplate_module(modfile, args, interpreter, check): #module_data = module_fh.read() #module_fh.close() - replacer = module_common.ModuleReplacer() + #replacer = module_common.ModuleReplacer() #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1 @@ -118,7 +118,7 @@ def boilerplate_module(modfile, args, interpreter, check): if check: complex_args['CHECKMODE'] = True - (module_data, module_style, shebang) = replacer.modify_module( + (module_data, module_style, shebang) = module_common.modify_module( modfile, complex_args, args, From d70c88bf8c79de0c6e85fccda18bec5015cfebb8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 19:08:13 -0400 Subject: [PATCH 1269/3617] added /os_nova_flavor to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bdaa6fb54d50c..8c0b452c62f0db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ New Modules: * openstack: os_floating_ip * openstack: os_image * openstack: os_network + * openstack: os_nova_flavor * openstack: os_object * openstack: os_security_group * openstack: os_security_group_rule From 28e2eae902d3cd623e5739a4edd979de3d6e0c2b Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Fri, 17 Jul 2015 12:56:27 +0530 Subject: [PATCH 1270/3617] Make gathering=explicit work again There was a confusion between the valid values for defaults.gathering (explicit/implicit/smart) and a play's gather_facts setting (boolean), which resulted in gathering=explicit being ignored. --- lib/ansible/executor/play_iterator.py | 14 +++++++++++++- lib/ansible/playbook/play.py | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 2ca3815e4194fb..8deeac8b4dda7a 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible import constants as C + from ansible.errors import * from ansible.playbook.block import Block from ansible.playbook.task import Task @@ -130,7 +132,17 @@ def get_next_task_for_host(self, host, peek=False): elif s.run_state == self.ITERATING_SETUP: s.run_state = self.ITERATING_TASKS s.pending_setup = True - if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts): + + # Gather facts if the default is 'smart' and we have not yet + # done it for this host; or if 'explicit' and the play sets + # gather_facts to True; or if 'implicit' and the play does + # NOT explicitly set gather_facts to False. + + gathering = C.DEFAULT_GATHERING + if ((gathering == 'smart' and not host._gathered_facts) or + (gathering == 'explicit' and boolean(self._play.gather_facts)) or + (gathering == 'implicit' and + (self._play.gather_facts is None or boolean(self._play.gather_facts)))): if not peek: # mark the host as having gathered facts host.set_gathered_facts(True) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index 2d31adec64c8b1..ecaeac23622241 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -58,7 +58,7 @@ class Play(Base, Taggable, Become): _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port # Connection - _gather_facts = FieldAttribute(isa='string', default='smart') + _gather_facts = FieldAttribute(isa='bool', default=None) _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types) _name = FieldAttribute(isa='string', default='') From 2f51f3bbc577495822f7d81af4a6cdbd7c499dda Mon Sep 17 00:00:00 2001 From: Gerard Lynch Date: Fri, 17 Jul 2015 11:44:00 +0100 Subject: [PATCH 1271/3617] updated to use new loader --- hacking/test-module | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hacking/test-module b/hacking/test-module index 0cbddf60735d06..daa6edf6e2e8c0 100755 --- a/hacking/test-module +++ b/hacking/test-module @@ -34,7 +34,8 @@ import os import subprocess import traceback import optparse -import ansible.utils as utils +import ansible.utils.vars as utils_vars +from ansible.parsing import DataLoader from ansible.parsing.utils.jsonify import jsonify from ansible.parsing.splitter import parse_kv import ansible.executor.module_common as module_common @@ -91,17 +92,18 @@ def boilerplate_module(modfile, args, interpreter, check): #module_fh.close() #replacer = module_common.ModuleReplacer() + loader = DataLoader() #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1 complex_args = {} if args.startswith("@"): # Argument is a YAML file (JSON is a subset of YAML) - complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:])) + complex_args = utils_vars.combine_vars(complex_args, loader.load_from_file(args[1:])) args='' elif args.startswith("{"): # Argument is a YAML document (not a file) - complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args)) + complex_args = utils_vars.combine_vars(complex_args, loader.load(args)) args='' inject = {} From 097ed1f17bbe76e0edde3071e00fbca068312fcb Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 17 Jul 2015 13:04:31 +0100 Subject: [PATCH 1272/3617] Add plugin that profiles playbook tasks Resubmission of https://github.com/ansible/ansible/pull/11270 to correct v2 file location. [Description and console output demonstration](https://github.com/aioue/ansible-plugin-profile/blob/mast er/README.md#features). Provides per-task timing, ongoing playbook elapsed time and ordered list of top 20 longest running tasks at end. --- lib/ansible/plugins/callback/profile_tasks.py | 106 ++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 lib/ansible/plugins/callback/profile_tasks.py diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py new file mode 100644 index 00000000000000..58dbdb16ecfae0 --- /dev/null +++ b/lib/ansible/plugins/callback/profile_tasks.py @@ -0,0 +1,106 @@ +# (C) 2015, Tom Paine, +# (C) 2014, Jharrod LaFon, @JharrodLaFon +# (C) 2012-2013, Michael DeHaan, +# +# This file is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# File is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# See for a copy of the +# GNU General Public License + +# Provides per-task timing, ongoing playbook elapsed time and +# ordered list of top 20 longest running tasks at end + +import time + +from ansible.callbacks import display + + +# define start time +t0 = tn = time.time() + + +def secondsToStr(t): + # http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds + rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:] + return "%d:%02d:%02d.%03d" % tuple(reduce(rediv, [[t * 1000, ], 1000, 60, 60])) + + +def filled(msg, fchar="*"): + if len(msg) == 0: + width = 79 + else: + msg = "%s " % msg + width = 79 - len(msg) + if width < 3: + width = 3 + filler = fchar * width + return "%s%s " % (msg, filler) + + +def timestamp(self): + if self.current is not None: + self.stats[self.current] = time.time() - self.stats[self.current] + + +def tasktime(): + global tn + time_current = time.strftime('%A %d %B %Y %H:%M:%S %z') + time_elapsed = secondsToStr(time.time() - tn) + time_total_elapsed = secondsToStr(time.time() - t0) + display(filled('%s (%s)%s%s' % (time_current, time_elapsed, ' ' * 7, time_total_elapsed))) + tn = time.time() + + +class CallbackModule(object): + + def __init__(self): + self.stats = {} + self.current = None + + def playbook_on_task_start(self, name, is_conditional): + """ + Logs the start of each task + """ + tasktime() + timestamp(self) + + # Record the start time of the current task + self.current = name + self.stats[self.current] = time.time() + + def playbook_on_setup(self): + tasktime() + + def playbook_on_stats(self, stats): + tasktime() + display(filled("", fchar="=")) + + timestamp(self) + + # Sort the tasks by their running time + results = sorted( + self.stats.items(), + key=lambda value: value[1], + reverse=True, + ) + + # Just keep the top 20 + results = results[:20] + + # Print the timings + for name, elapsed in results: + print( + "{0:-<70}{1:->9}".format( + '{0} '.format(name), + ' {0:.02f}s'.format(elapsed), + ) + ) + print '' From 10e5c2b46d42b20d58c445b55788e1bc8117cf52 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 08:54:28 -0400 Subject: [PATCH 1273/3617] fixed var scope --- lib/ansible/plugins/callback/timer.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/plugins/callback/timer.py b/lib/ansible/plugins/callback/timer.py index 058cb4f4a4d787..f75b55e4be6d19 100644 --- a/lib/ansible/plugins/callback/timer.py +++ b/lib/ansible/plugins/callback/timer.py @@ -12,13 +12,11 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'aggregate' CALLBACK_NAME = 'timer' - start_time = datetime.now() - def __init__(self, display): super(CallbackModule, self).__init__(display) - start_time = datetime.now() + self.start_time = datetime.now() def days_hours_minutes_seconds(self, timedelta): minutes = (timedelta.seconds//60)%60 From a09f6236a5f9ace208e7b17893e67c386abaa802 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 08:55:22 -0400 Subject: [PATCH 1274/3617] adapated to v2 --- lib/ansible/plugins/callback/profile_tasks.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py index 58dbdb16ecfae0..90ee25d3a2982b 100644 --- a/lib/ansible/plugins/callback/profile_tasks.py +++ b/lib/ansible/plugins/callback/profile_tasks.py @@ -20,13 +20,11 @@ import time -from ansible.callbacks import display - +from ansible.plugins.callback import CallbackBase # define start time t0 = tn = time.time() - def secondsToStr(t): # http://bytes.com/topic/python/answers/635958-handy-short-cut-formatting-elapsed-time-floating-point-seconds rediv = lambda ll, b: list(divmod(ll[0], b)) + ll[1:] @@ -59,12 +57,15 @@ def tasktime(): tn = time.time() -class CallbackModule(object): +class CallbackModule(CallbackBase): - def __init__(self): + def __init__(self, display): self.stats = {} self.current = None + super(CallbackModule, self).__init__(display) + + def playbook_on_task_start(self, name, is_conditional): """ Logs the start of each task @@ -97,10 +98,9 @@ def playbook_on_stats(self, stats): # Print the timings for name, elapsed in results: - print( + self.display.display( "{0:-<70}{1:->9}".format( '{0} '.format(name), ' {0:.02f}s'.format(elapsed), ) ) - print '' From 1aeb66148bcb97eae716bbe86430abb157157bbd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 19:45:44 -0400 Subject: [PATCH 1275/3617] actually now does what it says as it was just sorting by name --- hacking/authors.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/authors.sh b/hacking/authors.sh index 7c97840b2fbc83..528c3d82749e65 100755 --- a/hacking/authors.sh +++ b/hacking/authors.sh @@ -4,7 +4,7 @@ set -e # Get a list of authors ordered by number of commits # and remove the commit count column -AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- | sort -f) +AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- ) if [ -z "$AUTHORS" ] ; then echo "Authors list was empty" exit 1 From 811b10d13274ee017984d3470361443749ccc224 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 23:08:54 -0400 Subject: [PATCH 1276/3617] docs will not mention versions older than 1.5 --- hacking/module_formatter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index acddd700930098..72a4613adb1fb7 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -41,7 +41,7 @@ # if a module is added in a version of Ansible older than this, don't print the version added information # in the module documentation because everyone is assumed to be running something newer than this already. -TO_OLD_TO_BE_NOTABLE = 1.0 +TO_OLD_TO_BE_NOTABLE = 1.5 # Get parent directory of the directory this script lives in MODULEDIR=os.path.abspath(os.path.join( From a91eee358cc992ecfa68d482e8a8e65c4ed7c57f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 00:45:33 -0400 Subject: [PATCH 1277/3617] fixed title underline length --- docsite/rst/playbooks_best_practices.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst index 4347c4841f6ede..343d4bcc22d377 100644 --- a/docsite/rst/playbooks_best_practices.rst +++ b/docsite/rst/playbooks_best_practices.rst @@ -288,7 +288,7 @@ keep the OS configuration in separate playbooks from the app deployment. .. _staging_vs_production: Staging vs Production -+++++++++++++++++++ ++++++++++++++++++++++ As also mentioned above, a good way to keep your staging (or testing) and production environments separate is to use a separate inventory file for staging and production. This way you pick with -i what you are targeting. Keeping them all in one file can lead to surprises! From 8df71febb7cbc6d27d26d1c70ae5d6392bc1059a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 01:12:54 -0400 Subject: [PATCH 1278/3617] added missing win_unzip to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c0b452c62f0db..a1ff156a2a08cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ New Modules: * win_iis_webbinding * win_iis_website * win_regedit + * win_unzip * zabbix_host * zabbix_hostmacro * zabbix_screen From 6ba706f7536971f9c5f7ce874e570a6c5c0353e0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 10:00:02 -0400 Subject: [PATCH 1279/3617] minor doc reformatting now version_added < 1.3 does not get shown, up from 1.0 option's version_added is also now filterd against this threshold module version_added is more prominent exaples now uses pure rst instead of intermingled with html formatting aliases now shown in description for options bad version fields now throw warnings instead of exceptions ansible-doc errors now show traceback in very very verbose mode, for easier debugging --- hacking/module_formatter.py | 29 ++++++++++++++++++--------- hacking/templates/rst.j2 | 39 +++++++++++++------------------------ lib/ansible/cli/doc.py | 1 + 3 files changed, 35 insertions(+), 34 deletions(-) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index 72a4613adb1fb7..443e660958801c 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -31,6 +31,7 @@ import datetime import subprocess import cgi +import warnings from jinja2 import Environment, FileSystemLoader from ansible.utils import module_docs @@ -41,7 +42,7 @@ # if a module is added in a version of Ansible older than this, don't print the version added information # in the module documentation because everyone is assumed to be running something newer than this already. -TO_OLD_TO_BE_NOTABLE = 1.5 +TO_OLD_TO_BE_NOTABLE = 1.3 # Get parent directory of the directory this script lives in MODULEDIR=os.path.abspath(os.path.join( @@ -214,6 +215,17 @@ def jinja2_environment(template_dir, typ): return env, template, outputname ##################################################################################### +def too_old(added): + if not added: + return False + try: + added_tokens = str(added).split(".") + readded = added_tokens[0] + "." + added_tokens[1] + added_float = float(readded) + except ValueError as e: + warnings.warn("Could not parse %s: %s" % (added, str(e))) + return False + return (added_float < TO_OLD_TO_BE_NOTABLE) def process_module(module, options, env, template, outputname, module_map, aliases): @@ -271,15 +283,15 @@ def process_module(module, options, env, template, outputname, module_map, alias added = doc['version_added'] # don't show version added information if it's too old to be called out - if added: - added_tokens = str(added).split(".") - added = added_tokens[0] + "." + added_tokens[1] - added_float = float(added) - if added and added_float < TO_OLD_TO_BE_NOTABLE: - del doc['version_added'] + if too_old(added): + del doc['version_added'] if 'options' in doc: for (k,v) in doc['options'].iteritems(): + # don't show version added information if it's too old to be called out + if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']): + del doc['options'][k]['version_added'] + continue all_keys.append(k) all_keys = sorted(all_keys) @@ -329,7 +341,7 @@ def process_category(category, categories, options, env, template, outputname): category_file = open(category_file_path, "w") print "*** recording category %s in %s ***" % (category, category_file_path) - # TODO: start a new category file + # start a new category file category = category.replace("_"," ") category = category.title() @@ -352,7 +364,6 @@ def process_category(category, categories, options, env, template, outputname): deprecated.append(module) elif '/core/' in module_map[module]: core.append(module) - modules.append(module) modules.sort() diff --git a/hacking/templates/rst.j2 b/hacking/templates/rst.j2 index a30e16e41f1527..fbf50f4922b8d6 100644 --- a/hacking/templates/rst.j2 +++ b/hacking/templates/rst.j2 @@ -10,6 +10,11 @@ @{ title }@ @{ '+' * title_len }@ +{% if version_added is defined -%} +.. versionadded:: @{ version_added }@ +{% endif %} + + .. contents:: :local: :depth: 1 @@ -21,10 +26,6 @@ # --------------------------------------------#} -{% if aliases is defined -%} -Aliases: @{ ','.join(aliases) }@ -{% endif %} - {% if deprecated is defined -%} DEPRECATED ---------- @@ -35,14 +36,13 @@ DEPRECATED Synopsis -------- -{% if version_added is defined -%} -.. versionadded:: @{ version_added }@ -{% endif %} - {% for desc in description -%} @{ desc | convert_symbols_to_format }@ {% endfor %} +{% if aliases is defined -%} +Aliases: @{ ','.join(aliases) }@ +{% endif %} {% if requirements %} Requirements @@ -79,37 +79,26 @@ Options {% else %}
    {% for choice in v.get('choices',[]) -%}
  • @{ choice }@
  • {% endfor -%}
{% endif %} - {% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%} - + {% for desc in v.description -%}
@{ desc | html_ify }@
{% endfor -%} {% if 'aliases' in v and v.aliases -%}
+
aliases: @{ v.aliases|join(', ') }@
{%- endif %} {% endfor %} +
{% endif %} - {% if examples or plainexamples -%} Examples -------- -.. raw:: html + :: {% for example in examples %} - {% if example['description'] %}

@{ example['description'] | html_ify }@

{% endif %} -

-

+{% if example['description'] %}@{ example['description'] | indent(4, True) }@{% endif %}
 @{ example['code'] | escape | indent(4, True) }@
-    
-

{% endfor %} -
- -{% if plainexamples %} - -:: - -@{ plainexamples | indent(4, True) }@ -{% endif %} +{% if plainexamples %}@{ plainexamples | indent(4, True) }@{% endif %} {% endif %} diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 8638bf389720fc..910255cda778e2 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -122,6 +122,7 @@ def run(self): # probably a quoting issue. raise AnsibleError("Parsing produced an empty object.") except Exception, e: + self.display.vvv(traceback.print_exc()) raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e))) CLI.pager(text) From a6c8d30f3e3e9fd99e9b23463d52031ffa45c699 Mon Sep 17 00:00:00 2001 From: Gerard Lynch Date: Fri, 17 Jul 2015 15:26:46 +0100 Subject: [PATCH 1280/3617] callbacks require a version constant or the v2 code doesn't pass the display param and it gives an error --- lib/ansible/plugins/callback/profile_tasks.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py index 90ee25d3a2982b..f873b75ead0cf3 100644 --- a/lib/ansible/plugins/callback/profile_tasks.py +++ b/lib/ansible/plugins/callback/profile_tasks.py @@ -58,7 +58,14 @@ def tasktime(): class CallbackModule(CallbackBase): - + """ + This callback module provides per-task timing, ongoing playbook elapsed time + and ordered list of top 20 longest running tasks at end. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'profile_tasks' + def __init__(self, display): self.stats = {} self.current = None From 8d1549900c65d622dbb129e9f957de7aa4ff84a5 Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Fri, 17 Jul 2015 17:36:37 +0200 Subject: [PATCH 1281/3617] fix AnsibleError object name in subelements plugin fixes #11624 --- lib/ansible/plugins/lookup/subelements.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/lookup/subelements.py b/lib/ansible/plugins/lookup/subelements.py index b934a053ebf36d..d8c2b1086e389c 100644 --- a/lib/ansible/plugins/lookup/subelements.py +++ b/lib/ansible/plugins/lookup/subelements.py @@ -30,7 +30,7 @@ class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): def _raise_terms_error(msg=""): - raise errors.AnsibleError( + raise AnsibleError( "subelements lookup expects a list of two or three items, " + msg) terms = listify_lookup_plugin_terms(terms, variables, loader=self._loader) @@ -66,7 +66,7 @@ def _raise_terms_error(msg=""): ret = [] for item0 in elementlist: if not isinstance(item0, dict): - raise errors.AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0) + raise AnsibleError("subelements lookup expects a dictionary, got '%s'" % item0) if item0.get('skipped', False) is not False: # this particular item is to be skipped continue @@ -82,18 +82,18 @@ def _raise_terms_error(msg=""): if skip_missing: continue else: - raise errors.AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue)) + raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subkey, subvalue)) if not lastsubkey: if not isinstance(subvalue[subkey], dict): if skip_missing: continue else: - raise errors.AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey])) + raise AnsibleError("the key %s should point to a dictionary, got '%s'" % (subkey, subvalue[subkey])) else: subvalue = subvalue[subkey] else: # lastsubkey if not isinstance(subvalue[subkey], list): - raise errors.AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey])) + raise AnsibleError("the key %s should point to a list, got '%s'" % (subkey, subvalue[subkey])) else: sublist = subvalue.pop(subkey, []) for item1 in sublist: From 5abdd3b821e3ae012aa4f57dc7ce663de1e8f319 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 17 Jul 2015 12:02:26 -0400 Subject: [PATCH 1282/3617] Handle notifications when coupled with a loop Fixes #11606 --- lib/ansible/executor/process/result.py | 26 ++++++++++++++------------ lib/ansible/executor/task_executor.py | 6 ++++++ lib/ansible/plugins/action/normal.py | 10 ++++++++-- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index baf7afcf5b4faf..68a458bd869eff 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -122,18 +122,6 @@ def run(self): elif result.is_skipped(): self._send_result(('host_task_skipped', result)) else: - # if this task is notifying a handler, do it now - if result._task.notify and result._result.get('changed', False): - # The shared dictionary for notified handlers is a proxy, which - # does not detect when sub-objects within the proxy are modified. - # So, per the docs, we reassign the list so the proxy picks up and - # notifies all other threads - for notify in result._task.notify: - if result._task._role: - role_name = result._task._role.get_name() - notify = "%s : %s" %(role_name, notify) - self._send_result(('notify_handler', result._host, notify)) - if result._task.loop: # this task had a loop, and has more than one result, so # loop over all of them instead of a single result @@ -142,6 +130,20 @@ def run(self): result_items = [ result._result ] for result_item in result_items: + # if this task is notifying a handler, do it now + if 'ansible_notify' in result_item and result.is_changed(): + # The shared dictionary for notified handlers is a proxy, which + # does not detect when sub-objects within the proxy are modified. + # So, per the docs, we reassign the list so the proxy picks up and + # notifies all other threads + for notify in result_item['ansible_notify']: + if result._task._role: + role_name = result._task._role.get_name() + notify = "%s : %s" % (role_name, notify) + self._send_result(('notify_handler', result._host, notify)) + # now remove the notify field from the results, as its no longer needed + result_item.pop('ansible_notify') + if 'add_host' in result_item: # this task added a new host (add_host module) self._send_result(('add_host', result_item)) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index a1930e5e14d7e7..4322310603f293 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -330,6 +330,12 @@ def _execute(self, variables=None): if 'ansible_facts' in result: variables.update(result['ansible_facts']) + # save the notification target in the result, if it was specified, as + # this task may be running in a loop in which case the notification + # may be item-specific, ie. "notify: service {{item}}" + if self._task.notify: + result['ansible_notify'] = self._task.notify + # and return debug("attempt loop complete, returning result") return result diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index 445d8a7ae77f02..8e2f5c84cdf11b 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -23,7 +23,13 @@ class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): - #vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host) - return self._execute_module(tmp, task_vars=task_vars) + results = self._execute_module(tmp, task_vars=task_vars) + # Remove special fields from the result, which can only be set + # internally by the executor engine. We do this only here in + # the 'normal' action, as other action plugins may set this. + for field in ('ansible_facts', 'ansible_notify'): + if field in results: + results.pop(field) + return results From d4ac73a1bc3c09b7a5d7036d138f73584fadeb94 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 17 Jul 2015 13:44:22 -0400 Subject: [PATCH 1283/3617] Adding back capability to display warnings contained in results Fixes #11255 --- lib/ansible/plugins/callback/default.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index cff5fa1ad75e72..b3ac6ca8ddcf40 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible import constants as C from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): @@ -71,6 +72,11 @@ def v2_runner_on_ok(self, result): msg += " => %s" % self._dump_results(result._result, indent=indent) self._display.display(msg, color=color) + # display warnings, if enabled and any exist in the result + if C.COMMAND_WARNINGS and 'warnings' in result._result and result._result['warnings']: + for warning in result._result['warnings']: + self._display.display("warning: %s" % warning, color='purple') + def v2_runner_on_skipped(self, result): msg = "skipping: [%s]" % result._host.get_name() if self._display.verbosity > 0 or 'verbose_always' in result._result: From 1aa415526663bd2b11a1098c34200bee055671e1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 14:14:15 -0400 Subject: [PATCH 1284/3617] generalized warning handling, added it to adhoc also --- lib/ansible/plugins/callback/__init__.py | 8 ++++++++ lib/ansible/plugins/callback/default.py | 6 +----- lib/ansible/plugins/callback/minimal.py | 1 + 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index ea56d758a7ec7c..de5a92837fe90f 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -21,6 +21,8 @@ import json +from ansible import constants as C + __all__ = ["CallbackBase"] @@ -46,6 +48,12 @@ def __init__(self, display): def _dump_results(self, result, indent=4, sort_keys=True): return json.dumps(result, indent=indent, ensure_ascii=False, sort_keys=sort_keys) + def _handle_warnings(self, res): + ''' display warnings, if enabled and any exist in the result ''' + if C.COMMAND_WARNINGS and 'warnings' in res and res['warnings']: + for warning in res['warnings']: + self._display.warning(warning) + def set_connection_info(self, conn_info): pass diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index b3ac6ca8ddcf40..8fbb0654bef868 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -19,7 +19,6 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible import constants as C from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): @@ -72,10 +71,7 @@ def v2_runner_on_ok(self, result): msg += " => %s" % self._dump_results(result._result, indent=indent) self._display.display(msg, color=color) - # display warnings, if enabled and any exist in the result - if C.COMMAND_WARNINGS and 'warnings' in result._result and result._result['warnings']: - for warning in result._result['warnings']: - self._display.display("warning: %s" % warning, color='purple') + self._handle_warnings(result._result) def v2_runner_on_skipped(self, result): msg = "skipping: [%s]" % result._host.get_name() diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index dd61ee023a1317..8b3ac325eb3825 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -51,6 +51,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): def v2_runner_on_ok(self, result): self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result)), color='green') + self._handle_warnings(result._result) def v2_runner_on_skipped(self, result): self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') From 271a7f3281121087f7d66f01971a0a54c5b6cc6e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 17 Jul 2015 14:44:05 -0400 Subject: [PATCH 1285/3617] Cleaning up some of the notify/facts logic added earlier to fix problems --- lib/ansible/executor/process/result.py | 21 +++++++++++---------- lib/ansible/executor/task_executor.py | 2 +- lib/ansible/plugins/action/normal.py | 2 +- lib/ansible/plugins/strategies/__init__.py | 3 +-- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py index 68a458bd869eff..8961b43ce4411a 100644 --- a/lib/ansible/executor/process/result.py +++ b/lib/ansible/executor/process/result.py @@ -131,16 +131,17 @@ def run(self): for result_item in result_items: # if this task is notifying a handler, do it now - if 'ansible_notify' in result_item and result.is_changed(): - # The shared dictionary for notified handlers is a proxy, which - # does not detect when sub-objects within the proxy are modified. - # So, per the docs, we reassign the list so the proxy picks up and - # notifies all other threads - for notify in result_item['ansible_notify']: - if result._task._role: - role_name = result._task._role.get_name() - notify = "%s : %s" % (role_name, notify) - self._send_result(('notify_handler', result._host, notify)) + if 'ansible_notify' in result_item: + if result.is_changed(): + # The shared dictionary for notified handlers is a proxy, which + # does not detect when sub-objects within the proxy are modified. + # So, per the docs, we reassign the list so the proxy picks up and + # notifies all other threads + for notify in result_item['ansible_notify']: + if result._task._role: + role_name = result._task._role.get_name() + notify = "%s : %s" % (role_name, notify) + self._send_result(('notify_handler', result._host, notify)) # now remove the notify field from the results, as its no longer needed result_item.pop('ansible_notify') diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 4322310603f293..8393b6145971da 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -333,7 +333,7 @@ def _execute(self, variables=None): # save the notification target in the result, if it was specified, as # this task may be running in a loop in which case the notification # may be item-specific, ie. "notify: service {{item}}" - if self._task.notify: + if self._task.notify is not None: result['ansible_notify'] = self._task.notify # and return diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index 8e2f5c84cdf11b..763b1d5ea77570 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -28,7 +28,7 @@ def run(self, tmp=None, task_vars=dict()): # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. - for field in ('ansible_facts', 'ansible_notify'): + for field in ('ansible_notify',): if field in results: results.pop(field) diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py index 1b4c1a2c1d6dfd..c9154556bfa48b 100644 --- a/lib/ansible/plugins/strategies/__init__.py +++ b/lib/ansible/plugins/strategies/__init__.py @@ -213,7 +213,6 @@ def _process_pending_results(self, iterator): elif result[0] == 'notify_handler': host = result[1] handler_name = result[2] - if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] @@ -425,7 +424,7 @@ def run_handlers(self, iterator, connection_info): task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler) task_vars = self.add_tqm_variables(task_vars, play=iterator._play) self._queue_task(host, handler, task_vars, connection_info) - handler.flag_for_host(host) + #handler.flag_for_host(host) self._process_pending_results(iterator) self._wait_on_pending_results(iterator) # wipe the notification list From 1873e8ed081f9d0a6dd5f9b1e743fc0520c2d1bb Mon Sep 17 00:00:00 2001 From: Mathieu Lecarme Date: Fri, 17 Jul 2015 22:28:30 +0200 Subject: [PATCH 1286/3617] GCE tag prefix for creating ansible group. --- contrib/inventory/gce.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contrib/inventory/gce.py b/contrib/inventory/gce.py index 59947fb1665cc3..740e112332cc5e 100755 --- a/contrib/inventory/gce.py +++ b/contrib/inventory/gce.py @@ -257,7 +257,10 @@ def group_instances(self): tags = node.extra['tags'] for t in tags: - tag = 'tag_%s' % t + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t if groups.has_key(tag): groups[tag].append(name) else: groups[tag] = [name] From 36c9eeced502868138ba7cb1055690530f7f28cf Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Fri, 17 Jul 2015 17:41:57 -0400 Subject: [PATCH 1287/3617] comment out docs remarketing code because it adds a weird black bar --- docsite/_themes/srtd/layout.html | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 158f45008e9e74..93d4cd30165352 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -113,7 +113,7 @@ } - + + End of Google Code for Remarketing Tag --> @@ -147,7 +147,7 @@

-
+
From 384b2e023476b7bee242a5c7c70ebad0b0dfb33f Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 2 Dec 2015 11:29:51 -0600 Subject: [PATCH 2994/3617] Get v2_playbook_on_start working * Move self._tqm.load_callbacks() earlier to ensure that v2_on_playbook_start can fire * Pass the playbook instance to v2_on_playbook_start * Add a _file_name instance attribute to the playbook --- lib/ansible/executor/playbook_executor.py | 6 ++++-- lib/ansible/playbook/__init__.py | 3 +++ lib/ansible/plugins/callback/__init__.py | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py index d647c8246a3d50..60a416af73dcb6 100644 --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -82,6 +82,10 @@ def run(self): if self._tqm is None: # we are doing a listing entry = {'playbook': playbook_path} entry['plays'] = [] + else: + # make sure the tqm has callbacks loaded + self._tqm.load_callbacks() + self._tqm.send_callback('v2_playbook_on_start', pb) i = 1 plays = pb.get_plays() @@ -130,8 +134,6 @@ def run(self): entry['plays'].append(new_play) else: - # make sure the tqm has callbacks loaded - self._tqm.load_callbacks() self._tqm._unreachable_hosts.update(self._unreachable_hosts) # we are actually running plays diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 888299e1d9e6bf..0ae443f84360a2 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -44,6 +44,7 @@ def __init__(self, loader): self._entries = [] self._basedir = os.getcwd() self._loader = loader + self._file_name = None @staticmethod def load(file_name, variable_manager=None, loader=None): @@ -61,6 +62,8 @@ def _load_playbook_data(self, file_name, variable_manager): # set the loaders basedir self._loader.set_basedir(self._basedir) + self._file_name = file_name + # dynamically load any plugins from the playbook directory for name, obj in get_all_plugin_loaders(): if obj.subdir: diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index ccc8a7f8e53adc..03eb58d99db65d 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -246,7 +246,7 @@ def v2_runner_on_async_failed(self, result): def v2_runner_on_file_diff(self, result, diff): pass #no v1 correspondance - def v2_playbook_on_start(self): + def v2_playbook_on_start(self, playbook): self.playbook_on_start() def v2_playbook_on_notify(self, result, handler): From eb7db067f9cb41837602de995543367d322bbaff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 10:32:10 -0800 Subject: [PATCH 2995/3617] Fix template test results on python2.6 --- test/integration/roles/test_template/files/foo-py26.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/roles/test_template/files/foo-py26.txt b/test/integration/roles/test_template/files/foo-py26.txt index 84279bc7b3bf7c..76b0bb56f73ac0 100644 --- a/test/integration/roles/test_template/files/foo-py26.txt +++ b/test/integration/roles/test_template/files/foo-py26.txt @@ -3,6 +3,7 @@ templated_var_loaded { "bool": true, "multi_part": "1Foo", + "null_type": null, "number": 5, "string_num": "5" } From 8ff67e049451e48e5f79032da88435f570bb5311 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Dec 2015 12:40:46 -0500 Subject: [PATCH 2996/3617] Default msg param to AnsibleError to avoid serialization problems --- lib/ansible/errors/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py index a2411b7bef7695..017272af7cade0 100644 --- a/lib/ansible/errors/__init__.py +++ b/lib/ansible/errors/__init__.py @@ -44,7 +44,7 @@ class AnsibleError(Exception): which should be returned by the DataLoader() class. ''' - def __init__(self, message, obj=None, show_content=True): + def __init__(self, message="", obj=None, show_content=True): # we import this here to prevent an import loop problem, # since the objects code also imports ansible.errors from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject From a183972477de03c8f924525135908d4db258d44f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 2 Dec 2015 14:16:08 -0500 Subject: [PATCH 2997/3617] Don't use play vars in HostVars Fixes #13398 --- lib/ansible/executor/task_queue_manager.py | 1 - lib/ansible/vars/__init__.py | 15 ++------------- lib/ansible/vars/hostvars.py | 5 ++--- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 0f8f16ee6ceb04..d665000046ca8c 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -188,7 +188,6 @@ class HostVarsManager(SyncManager): pass hostvars = HostVars( - play=new_play, inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 2c9d8aca334ec1..d636e8d4b9e6eb 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -43,7 +43,6 @@ from ansible.utils.debug import debug from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.vars import combine_vars -from ansible.vars.hostvars import HostVars from ansible.vars.unsafe_proxy import wrap_var try: @@ -171,7 +170,8 @@ def _preprocess_vars(self, a): return data - + # FIXME: include_hostvars is no longer used, and should be removed, but + # all other areas of code calling get_vars need to be fixed too def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True): ''' Returns the variables, with optional "context" given via the parameters @@ -367,17 +367,6 @@ def _get_magic_variables(self, loader, play, host, task, include_hostvars, inclu variables['groups'] = dict() for (group_name, group) in iteritems(self._inventory.groups): variables['groups'][group_name] = [h.name for h in group.get_hosts()] - - #if include_hostvars: - # hostvars_cache_entry = self._get_cache_entry(play=play) - # if hostvars_cache_entry in HOSTVARS_CACHE: - # hostvars = HOSTVARS_CACHE[hostvars_cache_entry] - # else: - # hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self) - # HOSTVARS_CACHE[hostvars_cache_entry] = hostvars - # variables['hostvars'] = hostvars - # variables['vars'] = hostvars[host.get_name()] - if play: variables['role_names'] = [r._role_name for r in play.roles] diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py index a82e683d74aa92..afa00ec8a4bae4 100644 --- a/lib/ansible/vars/hostvars.py +++ b/lib/ansible/vars/hostvars.py @@ -46,11 +46,10 @@ class HostVars(collections.Mapping): ''' A special view of vars_cache that adds values from the inventory when needed. ''' - def __init__(self, play, inventory, variable_manager, loader): + def __init__(self, inventory, variable_manager, loader): self._lookup = dict() self._inventory = inventory self._loader = loader - self._play = play self._variable_manager = variable_manager self._cached_result = dict() @@ -68,7 +67,7 @@ def __getitem__(self, host_name): if host is None: raise j2undefined - data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False) + data = self._variable_manager.get_vars(loader=self._loader, host=host, include_hostvars=False) sha1_hash = sha1(str(data).encode('utf-8')).hexdigest() if sha1_hash in self._cached_result: From 6559616a04a1171289933f90a189e92492a1c406 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 1 Dec 2015 22:41:23 -0800 Subject: [PATCH 2998/3617] updated docs for 2.0 api --- docsite/rst/developing_api.rst | 72 ++++++++++++++++++++++++++++++---- 1 file changed, 64 insertions(+), 8 deletions(-) diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst index 76cebb64f12442..319417672e039c 100644 --- a/docsite/rst/developing_api.rst +++ b/docsite/rst/developing_api.rst @@ -17,11 +17,67 @@ This chapter discusses the Python API. .. _python_api: -Python API ----------- - The Python API is very powerful, and is how the ansible CLI and ansible-playbook -are implemented. +are implemented. In version 2.0 the core ansible got rewritten and the API was mostly rewritten. + +.. _python_api_20: + +Python API 2.0 +-------------- + +In 2.0 things get a bit more complicated to start, but you end up with much more discrete and readable classes:: + + + #!/usr/bin/python2 + + from collections import namedtuple + from ansible.parsing.dataloader import DataLoader + from ansible.vars import VariableManager + from ansible.inventory import Inventory + from ansible.playbook.play import Play + from ansible.executor.task_queue_manager import TaskQueueManager + + Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check']) + # initialize needed objects + variable_manager = VariableManager() + loader = DataLoader() + options = Options(connection='local', module_path='/path/to/mymodules', forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False) + passwords = dict(vault_pass='secret') + + # create inventory and pass to var manager + inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost') + variable_manager.set_inventory(inventory) + + # create play with tasks + play_source = dict( + name = "Ansible Play", + hosts = 'localhost', + gather_facts = 'no', + tasks = [ dict(action=dict(module='debug', args=(msg='Hello Galaxy!'))) ] + ) + play = Play().load(play_source, variable_manager=variable_manager, loader=loader) + + # actually run it + tqm = None + try: + tqm = TaskQueueManager( + inventory=inventory, + variable_manager=variable_manager, + loader=loader, + options=options, + passwords=passwords, + stdout_callback='default', + ) + result = tqm.run(play) + finally: + if tqm is not None: + tqm.cleanup() + + +.. _python_api_old: + +Python API pre 2.0 +------------------ It's pretty simple:: @@ -51,7 +107,7 @@ expressed in the :doc:`modules` documentation.:: A module can return any type of JSON data it wants, so Ansible can be used as a framework to rapidly build powerful applications and scripts. -.. _detailed_api_example: +.. _detailed_api_old_example: Detailed API Example ```````````````````` @@ -87,9 +143,9 @@ The following script prints out the uptime information for all hosts:: for (hostname, result) in results['dark'].items(): print "%s >>> %s" % (hostname, result) -Advanced programmers may also wish to read the source to ansible itself, for -it uses the Runner() API (with all available options) to implement the -command line tools ``ansible`` and ``ansible-playbook``. +Advanced programmers may also wish to read the source to ansible itself, +for it uses the API (with all available options) to implement the ``ansible`` +command line tools (``lib/ansible/cli/``). .. seealso:: From ac54ac618cf7a44f504a222142b749f18f4e2cef Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 14:48:27 -0800 Subject: [PATCH 2999/3617] Something's strange... let's see if python2.6 is really the same now... --- test/integration/roles/test_template/tasks/main.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index a35b93d9d924a8..28477d44e5ba90 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -48,11 +48,13 @@ - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt - when: pyver.stdout != '2.6' -- name: copy known good into place - copy: src=foo-py26.txt dest={{output_dir}}/foo.txt - when: pyver.stdout == '2.6' +# Seems that python-2.6 now outputs the same format as everywhere else? +# when: pyver.stdout != '2.6' + +#- name: copy known good into place +# copy: src=foo-py26.txt dest={{output_dir}}/foo.txt +# when: pyver.stdout == '2.6' - name: compare templated file to known good shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt From 2a33a13a20b622e14d9b9a81da461890b816ad16 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Dec 2015 16:44:43 -0800 Subject: [PATCH 3000/3617] updated port version --- packaging/port/sysutils/ansible/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/port/sysutils/ansible/Makefile b/packaging/port/sysutils/ansible/Makefile index 10016f99080b98..ef71c95c6c92a8 100644 --- a/packaging/port/sysutils/ansible/Makefile +++ b/packaging/port/sysutils/ansible/Makefile @@ -1,7 +1,7 @@ # $FreeBSD$ PORTNAME= ansible -PORTVERSION= 2.0 +PORTVERSION= 2.1 PORTREVISION= 1 CATEGORIES= python net-mgmt sysutils MASTER_SITES= http://releases.ansible.com/ansible/ From b85e6e008ddf0e5a3308afa8111bb058611a6f0b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Dec 2015 16:46:12 -0800 Subject: [PATCH 3001/3617] updated version that makefile uses --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d05cb3d4480f67..879b416e609a82 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0 0.5.beta3 +2.1 From 9b81c35d06a598aef05e546a1476a4aa18d115f3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 20:52:58 -0800 Subject: [PATCH 3002/3617] Don't compare or merge str with unicode Fixes #13387 --- lib/ansible/module_utils/basic.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index d2cf09458ea6bc..527a4c0a6c1e01 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -369,7 +369,12 @@ def return_values(obj): sensitive values pre-jsonification.""" if isinstance(obj, basestring): if obj: - yield obj + if isinstance(obj, bytes): + yield obj + else: + # Unicode objects should all convert to utf-8 + # (still must deal with surrogateescape on python3) + yield obj.encode('utf-8') return elif isinstance(obj, Sequence): for element in obj: @@ -391,10 +396,22 @@ def remove_values(value, no_log_strings): """ Remove strings in no_log_strings from value. If value is a container type, then remove a lot more""" if isinstance(value, basestring): - if value in no_log_strings: + if isinstance(value, unicode): + # This should work everywhere on python2. Need to check + # surrogateescape on python3 + bytes_value = value.encode('utf-8') + value_is_unicode = True + else: + bytes_value = value + value_is_unicode = False + if bytes_value in no_log_strings: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' for omit_me in no_log_strings: - value = value.replace(omit_me, '*' * 8) + bytes_value = bytes_value.replace(omit_me, '*' * 8) + if value_is_unicode: + value = unicode(bytes_value, 'utf-8', errors='replace') + else: + value = bytes_value elif isinstance(value, Sequence): return [remove_values(elem, no_log_strings) for elem in value] elif isinstance(value, Mapping): From 5fdfe6a0f262e24be6ad8ec906f983220169f5b8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 21:07:41 -0800 Subject: [PATCH 3003/3617] Add some test data that has unicode values --- test/units/module_utils/basic/test_no_log.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py index 24d38ddcfa0ae8..7b8c976c96f45b 100644 --- a/test/units/module_utils/basic/test_no_log.py +++ b/test/units/module_utils/basic/test_no_log.py @@ -69,6 +69,8 @@ class TestRemoveValues(unittest.TestCase): 'three': ['amigos', 'musketeers', None, {'ping': 'pong', 'base': ['balls', 'raquets']}]}, frozenset(['nope'])), + ('Toshio くら', frozenset(['とみ'])), + (u'Toshio くら', frozenset(['とみ'])), ) dataset_remove = ( ('string', frozenset(['string']), OMIT), @@ -94,6 +96,8 @@ class TestRemoveValues(unittest.TestCase): ('This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery', frozenset(['enigma', 'mystery', 'secret']), 'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********'), + ('Toshio くらとみ', frozenset(['くらとみ']), 'Toshio ********'), + (u'Toshio くらとみ', frozenset(['くらとみ']), u'Toshio ********'), ) def test_no_removal(self): From e00012994e9d26156380863f83266ac26a536e5d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 2 Dec 2015 21:09:53 -0800 Subject: [PATCH 3004/3617] Also some unicode tests for return_values() --- test/units/module_utils/basic/test_no_log.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py index 7b8c976c96f45b..3cb5d7b64b5773 100644 --- a/test/units/module_utils/basic/test_no_log.py +++ b/test/units/module_utils/basic/test_no_log.py @@ -46,6 +46,8 @@ class TestReturnValues(unittest.TestCase): 'three': ['amigos', 'musketeers', None, {'ping': 'pong', 'base': ('balls', 'raquets')}]}, frozenset(['1', 'dos', 'amigos', 'musketeers', 'pong', 'balls', 'raquets'])), + (u'Toshio くらとみ', frozenset(['Toshio くらとみ'])), + ('Toshio くらとみ', frozenset(['Toshio くらとみ'])), ) def test_return_values(self): From 9caa2b0452fa6e70bb7ae3f6b5b979d812b36642 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 07:59:23 -0800 Subject: [PATCH 3005/3617] Revert "Update docs and example config for requiretty + pipelining change" This reverts commit f873cc0fb54f309aa9ece4e4127bdf1071d1bfd7. Reverting pipelining change for now due to hard to pin down bugs: #13410 #13411 --- docsite/rst/intro_configuration.rst | 25 +++++++++---------------- examples/ansible.cfg | 12 ++++++------ 2 files changed, 15 insertions(+), 22 deletions(-) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index ca3fd006545099..dda07fc4506502 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -799,22 +799,15 @@ pipelining ========== Enabling pipelining reduces the number of SSH operations required to -execute a module on the remote server, by executing many ansible modules without actual file transfer. -This can result in a very significant performance improvement when enabled. -As of Ansible 2.1.0 this option is enabled by default. - -In previous versions, this option was disabled because of a bad interaction -with some sudo configurations. If sudo was configured to 'requiretty' for -operation then pipelining would not work and ansible would fail to connect -properly. This could be remedied by removing 'requiretty' in /etc/sudoers on -all managed hosts. - -It is recommended to leave this option enabled. If you are stuck with an old -version of ansible your first choice option should be to remove requiretty -from the sudoers configuration and only disable pipelining if you cannot do -that. Enabling this eliminates the need for :doc:`playbooks_acceleration`:: - - pipelining=True +execute a module on the remote server, by executing many ansible modules without actual file transfer. +This can result in a very significant performance improvement when enabled, however when using "sudo:" operations you must +first disable 'requiretty' in /etc/sudoers on all managed hosts. + +By default, this option is disabled to preserve compatibility with +sudoers configurations that have requiretty (the default on many distros), but is highly +recommended if you can enable it, eliminating the need for :doc:`playbooks_acceleration`:: + + pipelining=False .. _accelerate_settings: diff --git a/examples/ansible.cfg b/examples/ansible.cfg index d77dfba0c07dbd..74aef7a0246785 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -226,13 +226,13 @@ # Enabling pipelining reduces the number of SSH operations required to # execute a module on the remote server. This can result in a significant -# performance improvement when enabled. It is enabled by default. +# performance improvement when enabled, however when using "sudo:" you must +# first disable 'requiretty' in /etc/sudoers # -# In previous versions of ansible this was defaulted to off as it was -# incompatible with sudo's requiretty option. Ansible 2.1 and above contain -# a fix for that problem. -# -#pipelining = True +# By default, this option is disabled to preserve compatibility with +# sudoers configurations that have requiretty (the default on many distros). +# +#pipelining = False # if True, make ansible use scp if the connection type is ssh # (default is sftp) From fbb63d66e79a772642aea4db050401cc89332fac Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 08:00:28 -0800 Subject: [PATCH 3006/3617] Revert "Note crab and mgedmin's work to make pipelining compatible with sudo+requiretty" This reverts commit 1d8e178732dd7303ac2651377f75aaed96b5d037. Reverting for now due to hard to pin down bugs: #13410 #13411 --- CHANGELOG.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index efcbb2bdd3ffbb..f6c10c589b26df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,11 +3,6 @@ Ansible Changes By Release ## 2.1 TBD - ACTIVE DEVELOPMENT -###Major Changes: - -* A fix was applied to make ansible's pipelining mode work with sudo when sudo - is configured to use requiretty. Thanks to amenonsen and mgedmin! - ####New Modules: * cloudstack: cs_volume From e201a255d17a72b338be92b8db881effb79b5ece Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 08:01:05 -0800 Subject: [PATCH 3007/3617] Revert "Make sudo+requiretty and ANSIBLE_PIPELINING work together" This reverts commit f488de85997079f480d504f73537e3e33ff2495b. Reverting for now due to hard to pin down bugs: #13410 #13411 --- lib/ansible/constants.py | 2 +- lib/ansible/plugins/action/__init__.py | 6 ++-- lib/ansible/plugins/connection/ssh.py | 43 ++++++++++++++++++------- lib/ansible/plugins/shell/powershell.py | 2 +- lib/ansible/plugins/shell/sh.py | 11 ++----- 5 files changed, 39 insertions(+), 25 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 5837ecae8091a1..08d522fcb606b8 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -237,7 +237,7 @@ def load_config_file(): # CONNECTION RELATED ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-o ControlMaster=auto -o ControlPersist=60s') ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r") -ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', True, boolean=True) +ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True) ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True) PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 73eb5e4346f097..64a3b51e5d3cf4 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -177,7 +177,7 @@ def _late_needs_tmp_path(self, tmp, module_style): if tmp and "tmp" in tmp: # tmp has already been created return False - if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES: + if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su': # tmp is necessary to store the module source code # or we want to keep the files on the target system return True @@ -439,9 +439,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var # not sudoing or sudoing to root, so can cleanup files in the same step rm_tmp = tmp - python_interp = task_vars.get('ansible_python_interpreter', 'python') - - cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp, python_interpreter=python_interp) + cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp) cmd = cmd.strip() sudoable = True diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 607dcd667fef5a..debe36bd320509 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -241,7 +241,7 @@ def _build_command(self, binary, *other_args): return self._command - def _send_initial_data(self, fh, in_data, tty=False): + def _send_initial_data(self, fh, in_data): ''' Writes initial data to the stdin filehandle of the subprocess and closes it. (The handle must be closed; otherwise, for example, "sftp -b -" will @@ -252,8 +252,6 @@ def _send_initial_data(self, fh, in_data, tty=False): try: fh.write(in_data) - if tty: - fh.write("__EOF__942d747a0772c3284ffb5920e234bd57__\n") fh.close() except (OSError, IOError): raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh') @@ -316,7 +314,7 @@ def _examine_output(self, source, state, chunk, sudoable): return ''.join(output), remainder - def _run(self, cmd, in_data, sudoable=True, tty=False): + def _run(self, cmd, in_data, sudoable=True): ''' Starts the command and communicates with it until it ends. ''' @@ -324,10 +322,25 @@ def _run(self, cmd, in_data, sudoable=True, tty=False): display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]] display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) - # Start the given command. + # Start the given command. If we don't need to pipeline data, we can try + # to use a pseudo-tty (ssh will have been invoked with -tt). If we are + # pipelining data, or can't create a pty, we fall back to using plain + # old pipes. - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdin = p.stdin + p = None + if not in_data: + try: + # Make sure stdin is a proper pty to avoid tcgetattr errors + master, slave = pty.openpty() + p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = os.fdopen(master, 'w', 0) + os.close(slave) + except (OSError, IOError): + p = None + + if not p: + p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdin = p.stdin # If we are using SSH password authentication, write the password into # the pipe we opened in _build_command. @@ -390,7 +403,7 @@ def _run(self, cmd, in_data, sudoable=True, tty=False): # before we call select. if states[state] == 'ready_to_send' and in_data: - self._send_initial_data(stdin, in_data, tty) + self._send_initial_data(stdin, in_data) state += 1 while True: @@ -488,7 +501,7 @@ def _run(self, cmd, in_data, sudoable=True, tty=False): if states[state] == 'ready_to_send': if in_data: - self._send_initial_data(stdin, in_data, tty) + self._send_initial_data(stdin, in_data) state += 1 # Now we're awaiting_exit: has the child process exited? If it has, @@ -544,9 +557,17 @@ def _exec_command(self, cmd, in_data=None, sudoable=True): display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr) - cmd = self._build_command('ssh', '-tt', self.host, cmd) + # we can only use tty when we are not pipelining the modules. piping + # data into /usr/bin/python inside a tty automatically invokes the + # python interactive-mode but the modules are not compatible with the + # interactive-mode ("unexpected indent" mainly because of empty lines) + + if in_data: + cmd = self._build_command('ssh', self.host, cmd) + else: + cmd = self._build_command('ssh', '-tt', self.host, cmd) - (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable, tty=True) + (returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable) return (returncode, stdout, stderr) diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py index 9fd1541b63ea8c..096a0cf95d613f 100644 --- a/lib/ansible/plugins/shell/powershell.py +++ b/lib/ansible/plugins/shell/powershell.py @@ -110,7 +110,7 @@ def checksum(self, path, *args, **kwargs): ''' % dict(path=path) return self._encode_script(script) - def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None, python_interpreter=None): + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): cmd_parts = shlex.split(to_bytes(cmd), posix=False) cmd_parts = map(to_unicode, cmd_parts) if shebang and shebang.lower() == '#!powershell': diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index 719ac83ffb083b..f1fa3565b761b6 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -138,17 +138,12 @@ def checksum(self, path, python_interp): cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path) return cmd - def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None, python_interpreter='python'): + def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): # don't quote the cmd if it's an empty string, because this will # break pipelining mode - env = env_string.strip() - exe = shebang.replace("#!", "").strip() - if cmd.strip() == '': - reader = "%s -uc 'import sys; [sys.stdout.write(s) for s in iter(sys.stdin.readline, \"__EOF__942d747a0772c3284ffb5920e234bd57__\\n\")]'|" % python_interpreter - cmd_parts = [env, reader, env, exe] - else: + if cmd.strip() != '': cmd = pipes.quote(cmd) - cmd_parts = [env, exe, cmd] + cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd] if arg_path is not None: cmd_parts.append(arg_path) new_cmd = " ".join(cmd_parts) From 5f83a6aeda131f519a47d929eabe1666a7dff21b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Dec 2015 11:29:09 -0500 Subject: [PATCH 3008/3617] Properly default remote_user for delegated-to hosts Fixes #13323 --- lib/ansible/playbook/play_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index b2b7e44497505b..5c020939808d87 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -350,7 +350,7 @@ def set_task_and_variable_override(self, task, variables, templar): if user_var in delegated_vars: break else: - delegated_vars['ansible_user'] = None + delegated_vars['ansible_user'] = task.remote_user or self.remote_user else: delegated_vars = dict() From 29f5c5db7178b3bb26f4dd8410269a44d17e5315 Mon Sep 17 00:00:00 2001 From: Peter Sprygada Date: Thu, 3 Dec 2015 12:50:23 -0500 Subject: [PATCH 3009/3617] bugfix for ios.py shared module argument creation This patch fixes a bug in module_utils/ios.py where the the wrong shared module arguments are being generated. This bug prevented the shared module from operating correctly. This patch should be generally applied. --- lib/ansible/module_utils/ios.py | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py index dc46a860c6a207..085b68dcd28086 100644 --- a/lib/ansible/module_utils/ios.py +++ b/lib/ansible/module_utils/ios.py @@ -80,7 +80,7 @@ def ios_module(**kwargs): """ spec = kwargs.get('argument_spec') or dict() - argument_spec = url_argument_spec() + argument_spec = shell_argument_spec() argument_spec.update(IOS_COMMON_ARGS) if kwargs.get('argument_spec'): argument_spec.update(kwargs['argument_spec']) @@ -150,21 +150,6 @@ def send(self, commands): responses.append(response) return responses -def ios_from_args(module): - """Extracts the set of argumetns to build a valid IOS connection - """ - params = dict() - for arg, attrs in IOS_COMMON_ARGS.iteritems(): - if module.params['device']: - params[arg] = module.params['device'].get(arg) - if arg not in params or module.params[arg]: - params[arg] = module.params[arg] - if params[arg] is None: - if attrs.get('required'): - module.fail_json(msg='argument %s is required' % arg) - params[arg] = attrs.get('default') - return params - def ios_connection(module): """Creates a connection to an IOS device based on the module arguments """ @@ -180,16 +165,16 @@ def ios_connection(module): shell = IosShell() shell.connect(host, port=port, username=username, password=password, timeout=timeout) + shell.send('terminal length 0') except paramiko.ssh_exception.AuthenticationException, exc: module.fail_json(msg=exc.message) except socket.error, exc: module.fail_json(msg=exc.strerror, errno=exc.errno) - shell.send('terminal length 0') - if module.params['enable_mode']: shell.authorize(module.params['enable_password']) return shell + From 4a4e7a6ebb9026bcb8118ca342380302014fbacf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 11:20:00 -0800 Subject: [PATCH 3010/3617] added extract filter to changelog --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6c10c589b26df..f9f8b4b76a9800 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,9 @@ Ansible Changes By Release ####New Modules: * cloudstack: cs_volume +####New Filters: +* extract + ## 2.0 "Over the Hills and Far Away" ###Major Changes: From 4426b7f6e03553cabd9d698c4912f48c523ca2d9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Dec 2015 14:22:27 -0500 Subject: [PATCH 3011/3617] fix sorting of groups for host vars Fixes #13371 --- lib/ansible/vars/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index d636e8d4b9e6eb..1184ec5049243d 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -234,7 +234,7 @@ def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=Tru for item in data: all_vars = combine_vars(all_vars, item) - for group in host.get_groups(): + for group in sorted(host.get_groups(), key=lambda g: g.depth): if group.name in self._group_vars_files and group.name != 'all': for data in self._group_vars_files[group.name]: data = preprocess_vars(data) @@ -404,7 +404,7 @@ def _get_delegated_vars(self, loader, play, task, existing_variables): items = [] if task.loop is not None: if task.loop in lookup_loader: - #TODO: remove convert_bare true and deprecate this in with_ + #TODO: remove convert_bare true and deprecate this in with_ try: loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: @@ -604,4 +604,3 @@ def set_host_variable(self, host, varname, value): if host_name not in self._vars_cache: self._vars_cache[host_name] = dict() self._vars_cache[host_name][varname] = value - From f467f1770f8885b657fa01270b6f2909249d1f93 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 3 Dec 2015 15:25:54 -0500 Subject: [PATCH 3012/3617] Properly compare object references for Hosts when adding new ones Fixes #13397 --- lib/ansible/inventory/dir.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py index 7ae9611ddf1712..e4f7ee80f92b4b 100644 --- a/lib/ansible/inventory/dir.py +++ b/lib/ansible/inventory/dir.py @@ -192,6 +192,8 @@ def _add_group(self, group): if group.name not in self.groups: # it's brand new, add him! self.groups[group.name] = group + # the Group class does not (yet) implement __eq__/__ne__, + # so unlike Host we do a regular comparison here if self.groups[group.name] != group: # different object, merge self._merge_groups(self.groups[group.name], group) @@ -200,7 +202,10 @@ def _add_host(self, host): if host.name not in self.hosts: # Papa's got a brand new host self.hosts[host.name] = host - if self.hosts[host.name] != host: + # because the __eq__/__ne__ methods in Host() compare the + # name fields rather than references, we use id() here to + # do the object comparison for merges + if id(self.hosts[host.name]) != id(host): # different object, merge self._merge_hosts(self.hosts[host.name], host) From cfeef81303b6c9e197b48783c49376f989f67e18 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Dec 2015 14:15:37 -0800 Subject: [PATCH 3013/3617] For now, skip tests of module_utils/basic functions that are failing on py3 (these are only run on the target hosts, not on the controller). --- test/units/module_utils/basic/test_heuristic_log_sanitize.py | 1 + test/units/module_utils/basic/test_no_log.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/test/units/module_utils/basic/test_heuristic_log_sanitize.py b/test/units/module_utils/basic/test_heuristic_log_sanitize.py index 51a5c11adf74ec..14ffff0d7469c9 100644 --- a/test/units/module_utils/basic/test_heuristic_log_sanitize.py +++ b/test/units/module_utils/basic/test_heuristic_log_sanitize.py @@ -85,6 +85,7 @@ def test_hides_ssh_secrets(self): self.assertTrue(ssh_output.endswith("}")) self.assertIn(":********@foo.com/data'", ssh_output) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_hides_parameter_secrets(self): output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret'])) self.assertNotIn('secret', output) diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py index 3cb5d7b64b5773..102b7a3ab20834 100644 --- a/test/units/module_utils/basic/test_no_log.py +++ b/test/units/module_utils/basic/test_no_log.py @@ -50,6 +50,7 @@ class TestReturnValues(unittest.TestCase): ('Toshio くらとみ', frozenset(['Toshio くらとみ'])), ) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_return_values(self): for data, expected in self.dataset: self.assertEquals(frozenset(return_values(data)), expected) @@ -102,10 +103,12 @@ class TestRemoveValues(unittest.TestCase): (u'Toshio くらとみ', frozenset(['くらとみ']), u'Toshio ********'), ) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_no_removal(self): for value, no_log_strings in self.dataset_no_remove: self.assertEquals(remove_values(value, no_log_strings), value) + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") def test_strings_to_remove(self): for value, no_log_strings, expected in self.dataset_remove: self.assertEquals(remove_values(value, no_log_strings), expected) From 26520442bd0fe231abc0a4432c6f2943b61f8fb8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 28 Nov 2015 20:37:17 -0800 Subject: [PATCH 3014/3617] Now and/or shell expressions depend on shell plugin This should fix issues with fish shell users as && and || are not valid syntax, fish uses actual 'and' and 'or' programs. Also updated to allow for fish backticks pushed quotes to subshell, fish seems to handle spaces w/o them. Lastly, removed encompassing subshell () for fish compatibility. fixes #13199 --- lib/ansible/plugins/shell/fish.py | 7 +++++++ lib/ansible/plugins/shell/sh.py | 18 ++++++++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py index ff78941e19c028..342de99e5f8389 100644 --- a/lib/ansible/plugins/shell/fish.py +++ b/lib/ansible/plugins/shell/fish.py @@ -21,5 +21,12 @@ class ShellModule(ShModule): + _SHELL_AND = '; and' + _SHELL_OR = '; or' + _SHELL_SUB_LEFT = '(' + _SHELL_SUB_RIGHT = ')' + _SHELL_GROUP_LEFT = '' + _SHELL_GROUP_RIGHT = '' + def env_prefix(self, **kwargs): return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py index f1fa3565b761b6..7fbfa819ef156e 100644 --- a/lib/ansible/plugins/shell/sh.py +++ b/lib/ansible/plugins/shell/sh.py @@ -33,6 +33,12 @@ class ShellModule(object): # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\n' _SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1' + _SHELL_AND = '&&' + _SHELL_OR = '||' + _SHELL_SUB_LEFT = '"$(' + _SHELL_SUB_RIGHT = ')"' + _SHELL_GROUP_LEFT = '(' + _SHELL_GROUP_RIGHT = ')' def env_prefix(self, **kwargs): '''Build command prefix with environment variables.''' @@ -71,14 +77,14 @@ def mkdtemp(self, basefile=None, system=False, mode=None): basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile) if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')): basetmp = self.join_path('/tmp', basefile) - cmd = 'mkdir -p "`echo %s`"' % basetmp - cmd += ' && echo "`echo %s`"' % basetmp + cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) + cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT) # change the umask in a subshell to achieve the desired mode # also for directories created with `mkdir -p` if mode: tmp_umask = 0o777 & ~mode - cmd = '(umask %o && %s)' % (tmp_umask, cmd) + cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT) return cmd @@ -128,14 +134,14 @@ def checksum(self, path, python_interp): # used by a variety of shells on the remote host to invoke a python # "one-liner". shell_escaped_path = pipes.quote(path) - test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp) + test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) csums = [ "({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3) "({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4 ] - cmd = " || ".join(csums) - cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path) + cmd = (" %s " % self._SHELL_OR).join(csums) + cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path) return cmd def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None): From a1f516824ee2160121437edf6939ab2145972739 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 18:23:08 -0800 Subject: [PATCH 3015/3617] corrected playbook path, reformated options help the last just to make the help consistent and readable --- lib/ansible/cli/pull.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 04586c1d0c53da..9cc6c25e9f2fbd 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -74,8 +74,10 @@ def parse(self): help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') - self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', help='directory to checkout repository to') - self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') + self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', + help='directory to checkout repository to') + self.parser.add_option('-U', '--url', dest='url', default=None, + help='URL of the playbook repository') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -174,8 +176,7 @@ def run(self): display.display("Repository has not changed, quitting.") return 0 - playbook = self.select_playbook(path) - + playbook = self.select_playbook(self.options.dest) if playbook is None: raise AnsibleOptionsError("Could not find a playbook to run.") From 8d5f36a6c23ad17116ee0bb24c07f83745efb8e0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 19:39:57 -0800 Subject: [PATCH 3016/3617] return unique list of hosts --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index fdcbd37e78e211..59a3c37bf9326a 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -196,7 +196,7 @@ def get_hosts(self, pattern="all", ignore_limits_and_restrictions=False): hosts = [ h for h in hosts if h in self._restriction ] HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:] - return hosts + return list(set(hosts)) @classmethod def split_host_pattern(cls, pattern): From e1c62fb5afd5344dc1f3ff1606803263218b79ea Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 19:42:05 -0800 Subject: [PATCH 3017/3617] reverted to previous pull checkout dir behaviour This fixes bugs with not finding plays when not specifying checkout dir Also makes it backwards compatible --- lib/ansible/cli/pull.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 9cc6c25e9f2fbd..b2e402126dadf2 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -74,7 +74,7 @@ def parse(self): help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests') self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true', help='run the playbook even if the repository could not be updated') - self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', + self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') @@ -90,6 +90,11 @@ def parse(self): self.options, self.args = self.parser.parse_args() + if not self.options.dest: + hostname = socket.getfqdn() + # use a hostname dependent directory, in case of $HOME on nfs + self.options.dest = os.path.join('~/.ansible/pull', hostname) + if self.options.sleep: try: secs = random.randint(0,int(self.options.sleep)) From d5446f98046d379ec950b849317472982dcba757 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 3 Dec 2015 20:47:02 -0800 Subject: [PATCH 3018/3617] fixed ansible-pull broken options * sudo was not working, now it supports full become * now default checkout dir works, not only when specifying * paths for checkout dir get expanded * fixed limit options for playbook * added verbose and debug info --- lib/ansible/cli/__init__.py | 12 +++++++----- lib/ansible/cli/pull.py | 25 ++++++++++++++++--------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index da4d1b92d3d7d8..da1aabcc69891f 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -210,7 +210,7 @@ def expand_tilde(option, opt, value, parser): @staticmethod def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False, - async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False): + async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False): ''' create an options parser for most ansible scripts ''' # TODO: implement epilog parsing @@ -267,10 +267,6 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, if runas_opts: # priv user defaults to root later on to enable detecting when this option was given here - parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password (deprecated, use become)') - parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', - help='ask for su password (deprecated, use become)') parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo', help="run operations with sudo (nopasswd) (deprecated, use become)") parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None, @@ -287,6 +283,12 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS))) parser.add_option('--become-user', default=None, dest='become_user', type='string', help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER) + + if runas_opts or runas_prompt_opts: + parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true', + help='ask for sudo password (deprecated, use become)') + parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true', + help='ask for su password (deprecated, use become)') parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true', help='ask for privilege escalation password') diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index b2e402126dadf2..1543c704d57697 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -64,10 +64,12 @@ def parse(self): subset_opts=True, inventory_opts=True, module_opts=True, + runas_prompt_opts=True, ) # options unique to pull - self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run') + self.parser.add_option('--purge', default=False, action='store_true', + help='purge checkout after playbook run') self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', help='only run the playbook if the repository has been updated') self.parser.add_option('-s', '--sleep', dest='sleep', default=None, @@ -94,6 +96,7 @@ def parse(self): hostname = socket.getfqdn() # use a hostname dependent directory, in case of $HOME on nfs self.options.dest = os.path.join('~/.ansible/pull', hostname) + self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest)) if self.options.sleep: try: @@ -126,7 +129,7 @@ def run(self): node = platform.node() host = socket.getfqdn() limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]])) - base_opts = '-c local "%s"' % limit_opts + base_opts = '-c local ' if self.options.verbosity > 0: base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ]) @@ -137,7 +140,7 @@ def run(self): else: inv_opts = self.options.inventory - #TODO: enable more repo modules hg/svn? + #FIXME: enable more repo modules hg/svn? if self.options.module_name == 'git': repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest) if self.options.checkout: @@ -157,8 +160,8 @@ def run(self): raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name)) bin_path = os.path.dirname(os.path.abspath(sys.argv[0])) - cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % ( - bin_path, inv_opts, base_opts, self.options.module_name, repo_opts + cmd = '%s/ansible -i "%s" %s -m %s -a "%s" "%s"' % ( + bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts ) for ev in self.options.extra_vars: @@ -170,6 +173,8 @@ def run(self): time.sleep(self.options.sleep) # RUN the Checkout command + display.debug("running ansible with VCS module to checkout repo") + display.vvvv('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if rc != 0: @@ -193,16 +198,18 @@ def run(self): cmd += ' -i "%s"' % self.options.inventory for ev in self.options.extra_vars: cmd += ' -e "%s"' % ev - if self.options.ask_sudo_pass: - cmd += ' -K' + if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass: + cmd += ' --ask-become-pass' if self.options.tags: cmd += ' -t "%s"' % self.options.tags - if self.options.limit: - cmd += ' -l "%s"' % self.options.limit + if self.options.subset: + cmd += ' -l "%s"' % self.options.subset os.chdir(self.options.dest) # RUN THE PLAYBOOK COMMAND + display.debug("running ansible-playbook to do actual work") + display.debug('EXEC: %s' % cmd) rc, out, err = run_cmd(cmd, live=True) if self.options.purge: From e385c91fa528cb5e835077331512307b231ba393 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 09:57:06 -0800 Subject: [PATCH 3019/3617] Update submodule refs# --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index cd9a7667aa39bb..191347676eea08 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit cd9a7667aa39bbc1ccd606ebebaf3c62f228d601 +Subproject commit 191347676eea08817da3fb237f24cdbf2d16e307 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 3c4f954f0fece5..a10bdd6be948d3 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 3c4f954f0fece5dcb3241d6d5391273334206241 +Subproject commit a10bdd6be948d3aa5fad7ff4959908d6e78e0528 From 750adbaa270bca5a63f443808a7b8ddc2a026d9a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 4 Dec 2015 12:48:56 -0500 Subject: [PATCH 3020/3617] Changing up how host (in)equality is checked Fixes #13397 --- lib/ansible/inventory/dir.py | 2 +- lib/ansible/inventory/host.py | 2 +- test/units/inventory/test_host.py | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py index e4f7ee80f92b4b..e716987fd5fb5b 100644 --- a/lib/ansible/inventory/dir.py +++ b/lib/ansible/inventory/dir.py @@ -205,7 +205,7 @@ def _add_host(self, host): # because the __eq__/__ne__ methods in Host() compare the # name fields rather than references, we use id() here to # do the object comparison for merges - if id(self.hosts[host.name]) != id(host): + if self.hosts[host.name] != host: # different object, merge self._merge_hosts(self.hosts[host.name], host) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index a561b951b45ca7..a433463fa1b014 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -38,7 +38,7 @@ def __setstate__(self, data): def __eq__(self, other): if not isinstance(other, Host): return False - return self.name == other.name + return id(self) == id(other) def __ne__(self, other): return not self.__eq__(other) diff --git a/test/units/inventory/test_host.py b/test/units/inventory/test_host.py index 078d4321b573df..5c0945f7b4e0ce 100644 --- a/test/units/inventory/test_host.py +++ b/test/units/inventory/test_host.py @@ -29,9 +29,7 @@ def setUp(self): def test_equality(self): self.assertEqual(self.hostA, self.hostA) self.assertNotEqual(self.hostA, self.hostB) - self.assertEqual(self.hostA, Host('a')) - # __ne__ is a separate method - self.assertFalse(self.hostA != Host('a')) + self.assertNotEqual(self.hostA, Host('a')) def test_hashability(self): # equality implies the hash values are the same From 84507aedd4b4a4be48acf9657b90bb341c3bd1e2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 4 Dec 2015 13:33:27 -0500 Subject: [PATCH 3021/3617] Adding a uuid field so we can track host equality across serialization too --- lib/ansible/inventory/host.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index a433463fa1b014..6263dcbc80dbcc 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -19,6 +19,8 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import uuid + from ansible.inventory.group import Group from ansible.utils.vars import combine_vars @@ -38,7 +40,7 @@ def __setstate__(self, data): def __eq__(self, other): if not isinstance(other, Host): return False - return id(self) == id(other) + return self._uuid == other._uuid def __ne__(self, other): return not self.__eq__(other) @@ -55,6 +57,7 @@ def serialize(self): name=self.name, vars=self.vars.copy(), address=self.address, + uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, ) @@ -65,6 +68,7 @@ def deserialize(self, data): self.name = data.get('name') self.vars = data.get('vars', dict()) self.address = data.get('address', '') + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -84,6 +88,7 @@ def __init__(self, name=None, port=None): self.set_variable('ansible_port', int(port)) self._gathered_facts = False + self._uuid = uuid.uuid4() def __repr__(self): return self.get_name() From 0434644d12c64918d5182a7c0b0057687b1cdbc2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 11:50:39 -0800 Subject: [PATCH 3022/3617] Transform exceptions into ansible messages via to_unicode instead of str to avoid tracebacks. Fixes #13385 --- lib/ansible/executor/task_executor.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 4a7d7464ef862b..5d7430fad25062 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -146,7 +146,7 @@ def _clean_res(res): except AttributeError: pass except Exception as e: - display.debug("error closing connection: %s" % to_unicode(e)) + display.debug(u"error closing connection: %s" % to_unicode(e)) def _get_loop_items(self): ''' @@ -183,7 +183,7 @@ def _get_loop_items(self): loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=True) except AnsibleUndefinedVariable as e: - if 'has no attribute' in str(e): + if u'has no attribute' in to_unicode(e): loop_terms = [] display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.") else: @@ -231,7 +231,7 @@ def _run_loop(self, items): tmp_task = self._task.copy() tmp_play_context = self._play_context.copy() except AnsibleParserError as e: - results.append(dict(failed=True, msg=str(e))) + results.append(dict(failed=True, msg=to_unicode(e))) continue # now we swap the internal task and play context with their copies, @@ -401,7 +401,7 @@ def _execute(self, variables=None): try: result = self._handler.run(task_vars=variables) except AnsibleConnectionFailure as e: - return dict(unreachable=True, msg=str(e)) + return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") if self._task.async > 0: @@ -412,7 +412,7 @@ def _execute(self, variables=None): return result result = json.loads(result.get('stdout')) except (TypeError, ValueError) as e: - return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e)) + return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e)) if self._task.poll > 0: result = self._poll_async_result(result=result, templar=templar) From e8954e556a6f36e0eaeb8160bc04171ed655c43f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9CBrice?= Date: Fri, 4 Dec 2015 16:24:19 -0500 Subject: [PATCH 3023/3617] comment examples in default hosts file --- examples/hosts | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/examples/hosts b/examples/hosts index ce4cbb7caa474b..841f4bc6500208 100644 --- a/examples/hosts +++ b/examples/hosts @@ -10,35 +10,35 @@ # Ex 1: Ungrouped hosts, specify before any group headers. -green.example.com -blue.example.com -192.168.100.1 -192.168.100.10 +## green.example.com +## blue.example.com +## 192.168.100.1 +## 192.168.100.10 # Ex 2: A collection of hosts belonging to the 'webservers' group -[webservers] -alpha.example.org -beta.example.org -192.168.1.100 -192.168.1.110 +## [webservers] +## alpha.example.org +## beta.example.org +## 192.168.1.100 +## 192.168.1.110 # If you have multiple hosts following a pattern you can specify # them like this: -www[001:006].example.com +## www[001:006].example.com # Ex 3: A collection of database servers in the 'dbservers' group -[dbservers] - -db01.intranet.mydomain.net -db02.intranet.mydomain.net -10.25.1.56 -10.25.1.57 +## [dbservers] +## +## db01.intranet.mydomain.net +## db02.intranet.mydomain.net +## 10.25.1.56 +## 10.25.1.57 # Here's another example of host ranges, this time there are no # leading 0s: -db-[99:101]-node.example.com +## db-[99:101]-node.example.com From 1eb0a1ddf7cf2f9501ea48915307652e8ab55049 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 15:16:02 -0800 Subject: [PATCH 3024/3617] Correct VERSION in the devel branch --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 879b416e609a82..7ec1d6db408777 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.1 +2.1.0 From a96a879fcf8c80ee37ff3898f729d7baeac1cd6f Mon Sep 17 00:00:00 2001 From: sam-at-github Date: Sat, 5 Dec 2015 13:06:58 +1100 Subject: [PATCH 3025/3617] Add fullstop to make sentence make sense. Touch parargraph while at it. --- docsite/rst/playbooks_variables.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 18f1e57f7284f3..307387a72e58bf 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -793,8 +793,8 @@ Basically, anything that goes into "role defaults" (the defaults folder inside t .. rubric:: Footnotes -.. [1] Tasks in each role will see their own role's defaults tasks outside of roles will the last role's defaults -.. [2] Variables defined in inventory file or provided by dynamic inventory +.. [1] Tasks in each role will see their own role's defaults. Tasks defined outside of a role will see the last role's defaults. +.. [2] Variables defined in inventory file or provided by dynamic inventory. .. note:: Within a any section, redefining a var will overwrite the previous instance. If multiple groups have the same variable, the last one loaded wins. From fa71c38c2a7332ed450464e9239aac6e6698b095 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 5 Dec 2015 01:47:35 -0500 Subject: [PATCH 3026/3617] updated pull location in changelog it was in between of backslash description and example --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9f8b4b76a9800..d246be109338b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,9 +37,9 @@ Ansible Changes By Release * New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. `ansible_ssh_extra_args` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings). +* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. * Backslashes used when specifying parameters in jinja2 expressions in YAML dicts sometimes needed to be escaped twice. This has been fixed so that escaping once works. Here's an example of how playbooks need to be modified: -* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. ``` # Syntax in 1.9.x From 0129fb0a44080d324d110c3d5c5223ab2aa138b2 Mon Sep 17 00:00:00 2001 From: Nils Steinger Date: Sat, 5 Dec 2015 15:28:37 +0100 Subject: [PATCH 3027/3617] Remove duplicates from host list *before* caching it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ansible previously added hosts to the host list multiple times for commands like `ansible -i 'localhost,' -c local -m ping 'localhost,localhost' --list-hosts`. 8d5f36a fixed the obvious error, but still added the un-deduplicated list to a cache, so all future invocations of get_hosts() would retrieve a non-deduplicated list. This caused problems down the line: For some reason, Ansible only ever schedules "flush_handlers" tasks (instead of scheduling any actual tasks from the playbook) for hosts that are contained in the host lists multiple times. This probably happens because the host states are stored in a dictionary indexed by the hostnames, so duplicate hostname would cause the state to be overwritten by subsequent invocations of … something. --- lib/ansible/inventory/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 59a3c37bf9326a..14cd169265b571 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -195,8 +195,8 @@ def get_hosts(self, pattern="all", ignore_limits_and_restrictions=False): if self._restriction is not None: hosts = [ h for h in hosts if h in self._restriction ] - HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:] - return list(set(hosts)) + HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) + return HOSTS_PATTERNS_CACHE[pattern_hash][:] @classmethod def split_host_pattern(cls, pattern): From a1f6d17e37b059aa9d34a004b0aed05a6b8fa3b3 Mon Sep 17 00:00:00 2001 From: Nils Steinger Date: Sat, 5 Dec 2015 15:40:49 +0100 Subject: [PATCH 3028/3617] More meaningful string representation for meta tasks (like 'noop' and 'flush_handlers') --- lib/ansible/playbook/task.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 4f326b628bc4b5..21dbc87becfabc 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -133,7 +133,10 @@ def load(data, block=None, role=None, task_include=None, variable_manager=None, def __repr__(self): ''' returns a human readable representation of the task ''' - return "TASK: %s" % self.get_name() + if self.get_name() == 'meta ': + return "TASK: meta (%s)" % self.args['_raw_params'] + else: + return "TASK: %s" % self.get_name() def _preprocess_loop(self, ds, new_ds, k, v): ''' take a lookup plugin name and store it correctly ''' From f89f906f87c2c4d850702404f70cfabaa63be351 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 5 Dec 2015 10:10:25 -0500 Subject: [PATCH 3029/3617] simplified get_hosts code to have 1 retrun point --- lib/ansible/inventory/__init__.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 14cd169265b571..d7d0f03fb1fdd2 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -178,24 +178,24 @@ def get_hosts(self, pattern="all", ignore_limits_and_restrictions=False): if self._restriction: pattern_hash += u":%s" % to_unicode(self._restriction) - if pattern_hash in HOSTS_PATTERNS_CACHE: - return HOSTS_PATTERNS_CACHE[pattern_hash][:] + if pattern_hash not in HOSTS_PATTERNS_CACHE: - patterns = Inventory.split_host_pattern(pattern) - hosts = self._evaluate_patterns(patterns) + patterns = Inventory.split_host_pattern(pattern) + hosts = self._evaluate_patterns(patterns) - # mainly useful for hostvars[host] access - if not ignore_limits_and_restrictions: - # exclude hosts not in a subset, if defined - if self._subset: - subset = self._evaluate_patterns(self._subset) - hosts = [ h for h in hosts if h in subset ] + # mainly useful for hostvars[host] access + if not ignore_limits_and_restrictions: + # exclude hosts not in a subset, if defined + if self._subset: + subset = self._evaluate_patterns(self._subset) + hosts = [ h for h in hosts if h in subset ] + + # exclude hosts mentioned in any restriction (ex: failed hosts) + if self._restriction is not None: + hosts = [ h for h in hosts if h in self._restriction ] - # exclude hosts mentioned in any restriction (ex: failed hosts) - if self._restriction is not None: - hosts = [ h for h in hosts if h in self._restriction ] + HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) - HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) return HOSTS_PATTERNS_CACHE[pattern_hash][:] @classmethod From 8ea45e8608fc15e07493b11ce28fe3d3f38865b8 Mon Sep 17 00:00:00 2001 From: Luca Berruti Date: Sat, 5 Dec 2015 19:43:02 +0100 Subject: [PATCH 3030/3617] Make no_target_syslog consistent. no_target_syslog = False --> do log on target --- examples/ansible.cfg | 2 +- lib/ansible/constants.py | 2 +- lib/ansible/plugins/action/__init__.py | 2 +- lib/ansible/plugins/action/async.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 74aef7a0246785..87c089f45ae420 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -182,7 +182,7 @@ #no_log = False # prevents logging of tasks, but only on the targets, data is still logged on the master/controller -#no_target_syslog = True +#no_target_syslog = False # controls the compression level of variables sent to # worker processes. At the default of 0, no compression diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 08d522fcb606b8..6faae928dbe82c 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -159,7 +159,7 @@ def load_config_file(): # disclosure DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True) -DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', True, boolean=True) +DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True) # selinux DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 64a3b51e5d3cf4..497143224a72d6 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -382,7 +382,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var module_args['_ansible_check_mode'] = True # set no log in the module arguments, if required - if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG: + if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG: module_args['_ansible_no_log'] = True # set debug in the module arguments, if required diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py index 51e2413af27f47..8a7175aeb86b95 100644 --- a/lib/ansible/plugins/action/async.py +++ b/lib/ansible/plugins/action/async.py @@ -48,7 +48,7 @@ def run(self, tmp=None, task_vars=None): env_string = self._compute_environment_string() module_args = self._task.args.copy() - if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG: + if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG: module_args['_ansible_no_log'] = True # configure, upload, and chmod the target module From 955710267c1992c5e3b5b9eb77f4c76e289e3313 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 5 Dec 2015 15:59:51 -0500 Subject: [PATCH 3031/3617] only set become defaults at last possible moment tasks were overriding commandline with their defaults, not with the explicit setting, removed the setting of defaults from task init and pushed down to play context at last possible moment. fixes #13362 --- lib/ansible/playbook/become.py | 16 +++++++++------- lib/ansible/playbook/play_context.py | 3 +++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py index 643f2b555d54af..1e579751d46e13 100644 --- a/lib/ansible/playbook/become.py +++ b/lib/ansible/playbook/become.py @@ -90,16 +90,18 @@ def _preprocess_data_become(self, ds): display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)") - # if we are becoming someone else, but some fields are unset, - # make sure they're initialized to the default config values - if ds.get('become', False): - if ds.get('become_method', None) is None: - ds['become_method'] = C.DEFAULT_BECOME_METHOD - if ds.get('become_user', None) is None: - ds['become_user'] = C.DEFAULT_BECOME_USER return ds + def set_become_defaults(self, become, become_method, become_user): + ''' if we are becoming someone else, but some fields are unset, + make sure they're initialized to the default config values ''' + if become: + if become_method is None: + become_method = C.DEFAULT_BECOME_METHOD + if become_user is None: + become_user = C.DEFAULT_BECOME_USER + def _get_attr_become(self): ''' Override for the 'become' getattr fetcher, used from Base. diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 5c020939808d87..9320a23ed9b457 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -392,6 +392,9 @@ def set_task_and_variable_override(self, task, variables, templar): if new_info.no_log is None: new_info.no_log = C.DEFAULT_NO_LOG + # set become defaults if not previouslly set + task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user) + return new_info def make_become_cmd(self, cmd, executable=None): From 41773630edcf8ab138a36290c4904c6ba537390b Mon Sep 17 00:00:00 2001 From: Peter Sprygada Date: Mon, 23 Nov 2015 22:01:27 -0500 Subject: [PATCH 3032/3617] adds new device argument to nxapi command arguments The device argument allows a dict of nxapi parameters to be passed to the module to simplify passing the nxapi parameters --- lib/ansible/module_utils/nxapi.py | 75 ++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 25 deletions(-) diff --git a/lib/ansible/module_utils/nxapi.py b/lib/ansible/module_utils/nxapi.py index 0589b9a50c32b5..35bcc442fbd199 100644 --- a/lib/ansible/module_utils/nxapi.py +++ b/lib/ansible/module_utils/nxapi.py @@ -32,16 +32,16 @@ The nxapi module provides the following common argument spec: - * host (str) - [Required] The IPv4 address or FQDN of the network device + * host (str) - The IPv4 address or FQDN of the network device * port (str) - Overrides the default port to use for the HTTP/S connection. The default values are 80 for HTTP and 443 for HTTPS - * url_username (str) - [Required] The username to use to authenticate + * username (str) - The username to use to authenticate the HTTP/S connection. Aliases: username - * url_password (str) - [Required] The password to use to authenticate + * password (str) - The password to use to authenticate the HTTP/S connection. Aliases: password * use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS) @@ -51,6 +51,10 @@ device. Valid values in `cli_show`, `cli_show_ascii`, 'cli_conf` and `bash`. The default value is `cli_show_ascii` + * device (dict) - Used to send the entire set of connection parameters + as a dict object. This argument is mutually exclusive with the + host argument + In order to communicate with Cisco NXOS devices, the NXAPI feature must be enabled and configured on the device. @@ -58,34 +62,52 @@ NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash'] -def nxapi_argument_spec(spec=None): - """Creates an argument spec for working with NXAPI +NXAPI_COMMON_ARGS = dict( + host=dict(), + port=dict(), + username=dict(), + password=dict(), + use_ssl=dict(default=False, type='bool'), + device=dict(), + command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES) +) + +def nxapi_module(**kwargs): + """Append the common args to the argument_spec """ - arg_spec = url_argument_spec() - arg_spec.update(dict( - host=dict(required=True), - port=dict(), - url_username=dict(required=True, aliases=['username']), - url_password=dict(required=True, aliases=['password']), - use_ssl=dict(default=False, type='bool'), - command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES) - )) - if spec: - arg_spec.update(spec) - return arg_spec - -def nxapi_url(module): + spec = kwargs.get('argument_spec') or dict() + + argument_spec = url_argument_spec() + argument_spec.update(NXAPI_COMMON_ARGS) + if kwargs.get('argument_spec'): + argument_spec.update(kwargs['argument_spec']) + kwargs['argument_spec'] = argument_spec + + module = AnsibleModule(**kwargs) + + device = module.params.get('device') or dict() + for key, value in device.iteritems(): + if key in NXAPI_COMMON_ARGS: + module.params[key] = value + + params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS)) + for key, value in params.iteritems(): + if key != 'device': + module.params[key] = value + + return module + +def nxapi_url(params): """Constructs a valid NXAPI url """ - if module.params['use_ssl']: + if params['use_ssl']: proto = 'https' else: proto = 'http' - host = module.params['host'] + host = params['host'] url = '{}://{}'.format(proto, host) - port = module.params['port'] - if module.params['port']: - url = '{}:{}'.format(url, module.params['port']) + if params['port']: + url = '{}:{}'.format(url, params['port']) url = '{}/ins'.format(url) return url @@ -109,7 +131,7 @@ def nxapi_body(commands, command_type, **kwargs): def nxapi_command(module, commands, command_type=None, **kwargs): """Sends the list of commands to the device over NXAPI """ - url = nxapi_url(module) + url = nxapi_url(module.params) command_type = command_type or module.params['command_type'] @@ -118,6 +140,9 @@ def nxapi_command(module, commands, command_type=None, **kwargs): headers = {'Content-Type': 'text/json'} + module.params['url_username'] = module.params['username'] + module.params['url_password'] = module.params['password'] + response, headers = fetch_url(module, url, data=data, headers=headers, method='POST') From a8e015cc22d248e965157605e30b810de280b0a4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 6 Dec 2015 22:12:48 -0800 Subject: [PATCH 3033/3617] Add representers so we can output yaml for all the types we read in from yaml --- lib/ansible/parsing/yaml/dumper.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py index a51289b09b9b6c..a8a5015b8eaf39 100644 --- a/lib/ansible/parsing/yaml/dumper.py +++ b/lib/ansible/parsing/yaml/dumper.py @@ -22,7 +22,7 @@ import yaml from ansible.compat.six import PY3 -from ansible.parsing.yaml.objects import AnsibleUnicode +from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping from ansible.vars.hostvars import HostVars class AnsibleDumper(yaml.SafeDumper): @@ -50,3 +50,13 @@ def represent_hostvars(self, data): represent_hostvars, ) +AnsibleDumper.add_representer( + AnsibleSequence, + yaml.representer.SafeRepresenter.represent_list, +) + +AnsibleDumper.add_representer( + AnsibleMapping, + yaml.representer.SafeRepresenter.represent_dict, +) + From 4d637e5780503448840a3e4ef824b8f72aa5112a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 6 Dec 2015 22:16:31 -0800 Subject: [PATCH 3034/3617] Use self.args when we parse arguments that way the arguments can be constructed manually --- lib/ansible/cli/adhoc.py | 2 +- lib/ansible/cli/doc.py | 2 +- lib/ansible/cli/galaxy.py | 2 +- lib/ansible/cli/playbook.py | 2 +- lib/ansible/cli/pull.py | 2 +- lib/ansible/cli/vault.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 25f29fc2976b5c..120b2302112b3c 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -70,7 +70,7 @@ def parse(self): help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, default=C.DEFAULT_MODULE_NAME) - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) if len(self.args) != 1: raise AnsibleOptionsError("Missing target hosts") diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index 4eef1dd5dd690c..a17164eb50ed8d 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -62,7 +62,7 @@ def parse(self): self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet', help='Show playbook snippet for specified module(s)') - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity def run(self): diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 31c21146fc1239..94c04614ace98e 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -113,7 +113,7 @@ def parse(self): help='Force overwriting an existing role') # get options, args and galaxy object - self.options, self.args =self.parser.parse_args() + self.options, self.args =self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity self.galaxy = Galaxy(self.options) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index fc81f964563134..a9c0ed018dc0bc 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -72,7 +72,7 @@ def parse(self): parser.add_option('--start-at-task', dest='start_at_task', help="start the playbook at the task matching this name") - self.options, self.args = parser.parse_args() + self.options, self.args = parser.parse_args(self.args[1:]) self.parser = parser diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 1543c704d57697..593d601e8d44a5 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -90,7 +90,7 @@ def parse(self): help='verify GPG signature of checked out commit, if it fails abort running the playbook.' ' This needs the corresponding VCS module to support such an operation') - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) if not self.options.dest: hostname = socket.getfqdn() diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index ac148d4770c766..9908f17e578ac9 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -69,7 +69,7 @@ def parse(self): elif self.action == "rekey": self.parser.set_usage("usage: %prog rekey [options] file_name") - self.options, self.args = self.parser.parse_args() + self.options, self.args = self.parser.parse_args(self.args[1:]) display.verbosity = self.options.verbosity can_output = ['encrypt', 'decrypt'] From 2c8eee956fb574ab0ef2ae362a2936f95a2d80cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Mon, 7 Dec 2015 09:25:37 +0100 Subject: [PATCH 3035/3617] Fix issue when var name is the same as content. See https://github.com/ansible/ansible/issues/13453 for more details. --- lib/ansible/plugins/action/debug.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index a024e28b01dfe2..1d8e28c7a4a45a 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -45,8 +45,12 @@ def run(self, tmp=None, task_vars=None): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results else: + # If var name is same as result, try to template it if results == self._task.args['var']: - results = "VARIABLE IS NOT DEFINED!" + try: + results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True) + except: + results = "VARIABLE IS NOT DEFINED!" result[self._task.args['var']] = results else: result['msg'] = 'here we are' From dcedfbe26c2aacc901fe5ef84b51103feb92990f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 7 Dec 2015 09:54:55 -0800 Subject: [PATCH 3036/3617] corrected usage of ec2.py's profile option this was never introduced into ansible-playbook though the docs stated otherwise. We still explain how to use the env var to get the same result. --- docsite/rst/intro_dynamic_inventory.rst | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 1a2bd6f72c3a8c..5f491ebc2eff3e 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -111,9 +111,8 @@ If you use boto profiles to manage multiple AWS accounts, you can pass ``--profi aws_access_key_id = aws_secret_access_key = -You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, or run playbooks with: ``ansible-playbook -i 'ec2.py --profile prod' myplaybook.yml``. - -Alternatively, use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml`` +You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, this option is not supported by ``anisble-playbook`` though. +But you can use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml`` Since each region requires its own API call, if you are only using a small set of regions, feel free to edit ``ec2.ini`` and list only the regions you are interested in. There are other config options in ``ec2.ini`` including cache control, and destination variables. From 97626475db9fab72c27a7904d8e745638a6dde1f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 7 Dec 2015 10:04:48 -0800 Subject: [PATCH 3037/3617] added new ec2_vpc_net_facts to 2.1 changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d246be109338b9..36886531bb53ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ Ansible Changes By Release ## 2.1 TBD - ACTIVE DEVELOPMENT ####New Modules: +* aws: ec2_vpc_net_facts * cloudstack: cs_volume ####New Filters: From 9ae1dede0387c02b0f3772f168e94c99ce9f23a8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 06:36:04 -0800 Subject: [PATCH 3038/3617] adhoc does not load plugins by default reimplemented feature from 1.x which kept additional callbacks from poluting adhoc unless specifically asked for through configuration. --- lib/ansible/cli/adhoc.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 120b2302112b3c..912b07a5c72d04 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,6 +163,9 @@ def run(self): else: cb = 'minimal' + if not C.DEFAULT_LOAD_CALLBACK_PLUGINS: + C.DEFAULT_CALLBACK_WHITELIST = [] + if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree From 8d500215b68aafe49c0416867af3fc701addf602 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 12 Nov 2015 16:15:42 -0500 Subject: [PATCH 3039/3617] trigger jenkins integration tests --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index cec8ccca971af4..2e1f15559d31a2 100644 --- a/README.md +++ b/README.md @@ -55,3 +55,4 @@ Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.de Ansible is sponsored by [Ansible, Inc](http://ansible.com) + From 970d7cadb7f50e5f55b3aa1c12af130957f67204 Mon Sep 17 00:00:00 2001 From: David L Ballenger Date: Tue, 8 Dec 2015 07:11:02 -0800 Subject: [PATCH 3040/3617] Add ssh_host support for MacOSX El Capitan. OS X El Capitan moved the /etc/ssh_* files into /etc/ssh/. This fix adds a distribution version check for Darwin to set the keydir appropriately on El Capitan and later. --- lib/ansible/module_utils/facts.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 4120a51fb5b1ca..94a5a11f726cce 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -524,7 +524,10 @@ def get_public_ssh_host_keys(self): keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519') if self.facts['system'] == 'Darwin': - keydir = '/etc' + if self.facts['distribution'] == 'MacOSX' and LooseVersion(self.facts['distribution_version']) >= LooseVersion('10.11') : + keydir = '/etc/ssh' + else: + keydir = '/etc' else: keydir = '/etc/ssh' From 9c4eae525306bf201304a15d36f531b0308cd25e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 11:55:35 -0500 Subject: [PATCH 3041/3617] Fix always_run support in the action plugin for template when copying Fixes #13418 --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 109f3e80c0bba7..5edc4e8a2c448d 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -157,7 +157,7 @@ def run(self, tmp=None, task_vars=None): if self._play_context.diff: diff = self._get_diff_data(dest, resultant, task_vars, source_file=False) - if not self._play_context.check_mode: # do actual work thorugh copy + if not self._play_context.check_mode or self._task.always_run: # do actual work thorugh copy xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user From 5cac8efd73ff39268d2bebc1f501e3ae662add9d Mon Sep 17 00:00:00 2001 From: Jeremy Audet Date: Tue, 8 Dec 2015 09:39:45 -0500 Subject: [PATCH 3042/3617] Make "make webdocs" compatible with Python 3 The `webdocs` make target fails under Python 3. It fails due to a variety of syntax errors, such as the use of `except Foo, e` and `print 'foo'`. Fix #13463 by making code compatible with both Python 2 and 3. --- docsite/build-site.py | 23 ++++++++++++----------- hacking/module_formatter.py | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docsite/build-site.py b/docsite/build-site.py index 587a189f0774c0..24f9fc9a647f30 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import print_function __docformat__ = 'restructuredtext' @@ -24,9 +25,9 @@ try: from sphinx.application import Sphinx except ImportError: - print "#################################" - print "Dependency missing: Python Sphinx" - print "#################################" + print("#################################") + print("Dependency missing: Python Sphinx") + print("#################################") sys.exit(1) import os @@ -40,7 +41,7 @@ def __init__(self): """ Run the DocCommand. """ - print "Creating html documentation ..." + print("Creating html documentation ...") try: buildername = 'html' @@ -69,10 +70,10 @@ def __init__(self): app.builder.build_all() - except ImportError, ie: + except ImportError: traceback.print_exc() - except Exception, ex: - print >> sys.stderr, "FAIL! exiting ... (%s)" % ex + except Exception as ex: + print("FAIL! exiting ... (%s)" % ex, file=sys.stderr) def build_docs(self): self.app.builder.build_all() @@ -83,9 +84,9 @@ def build_rst_docs(): if __name__ == '__main__': if '-h' in sys.argv or '--help' in sys.argv: - print "This script builds the html documentation from rst/asciidoc sources.\n" - print " Run 'make docs' to build everything." - print " Run 'make viewdocs' to build and then preview in a web browser." + print("This script builds the html documentation from rst/asciidoc sources.\n") + print(" Run 'make docs' to build everything.") + print(" Run 'make viewdocs' to build and then preview in a web browser.") sys.exit(0) build_rst_docs() @@ -93,4 +94,4 @@ def build_rst_docs(): if "view" in sys.argv: import webbrowser if not webbrowser.open('htmlout/index.html'): - print >> sys.stderr, "Could not open on your webbrowser." + print("Could not open on your webbrowser.", file=sys.stderr) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f4ab5d7d9ab846..4c94ca3f2c4233 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -140,7 +140,7 @@ def list_modules(module_dir, depth=0): if os.path.isdir(d): res = list_modules(d, depth + 1) - for key in res.keys(): + for key in list(res.keys()): if key in categories: categories[key] = merge_hash(categories[key], res[key]) res.pop(key, None) @@ -451,7 +451,7 @@ def main(): categories = list_modules(options.module_dir) last_category = None - category_names = categories.keys() + category_names = list(categories.keys()) category_names.sort() category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") From d4ccb0be59c86d8518ba4becaed5c7442d8758fc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 09:20:49 -0800 Subject: [PATCH 3043/3617] have always_run override check mode for a task Fixes #13418 --- lib/ansible/playbook/play_context.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 9320a23ed9b457..81223500adf879 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -395,6 +395,10 @@ def set_task_and_variable_override(self, task, variables, templar): # set become defaults if not previouslly set task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user) + # have always_run override check mode + if task.always_run: + new_info.check_mode = False + return new_info def make_become_cmd(self, cmd, executable=None): From 7ffd578a9d38b80e71ef6df2219f7e887e2909b7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 09:24:20 -0800 Subject: [PATCH 3044/3617] Revert "Fix always_run support in the action plugin for template when copying" This reverts commit 9c4eae525306bf201304a15d36f531b0308cd25e. --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 5edc4e8a2c448d..109f3e80c0bba7 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -157,7 +157,7 @@ def run(self, tmp=None, task_vars=None): if self._play_context.diff: diff = self._get_diff_data(dest, resultant, task_vars, source_file=False) - if not self._play_context.check_mode or self._task.always_run: # do actual work thorugh copy + if not self._play_context.check_mode: # do actual work thorugh copy xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant) # fix file permissions when the copy is done as a different user From 05c8bb79f8158ca8a93d50bc798dd1bed02aaa89 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 8 Dec 2015 12:24:42 -0500 Subject: [PATCH 3045/3617] playbook that Ansible jenkins runs moved into core The playbook is already running in jenkins and works. This moves the assets into core for ease of maintenance going forward. --- .../ansible.cfg | 2 + .../ec2.yml | 41 ++++++++++ .../inventory | 1 + .../inventory.dynamic | 3 + .../main.yml | 62 ++++++++++++++ .../roles/ansible_deps/.gitignore | 1 + .../roles/ansible_deps/.travis.yml | 37 +++++++++ .../roles/ansible_deps/README.md | 8 ++ .../roles/ansible_deps/defaults/main.yml | 2 + .../roles/ansible_deps/handlers/main.yml | 2 + .../ansible_deps/meta/.galaxy_install_info | 1 + .../roles/ansible_deps/meta/main.yml | 23 ++++++ .../roles/ansible_deps/tasks/main.yml | 81 +++++++++++++++++++ .../roles/ansible_deps/test/inventory | 1 + .../roles/ansible_deps/test/main.yml | 29 +++++++ .../roles/ansible_deps/test/requirements.yml | 2 + .../roles/ansible_deps/vars/main.yml | 2 + .../roles/run_integration/tasks/main.yml | 20 +++++ 18 files changed, 318 insertions(+) create mode 100644 test/utils/ansible-playbook_integration_runner/ansible.cfg create mode 100644 test/utils/ansible-playbook_integration_runner/ec2.yml create mode 100644 test/utils/ansible-playbook_integration_runner/inventory create mode 100644 test/utils/ansible-playbook_integration_runner/inventory.dynamic create mode 100644 test/utils/ansible-playbook_integration_runner/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml create mode 100644 test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/ansible.cfg b/test/utils/ansible-playbook_integration_runner/ansible.cfg new file mode 100644 index 00000000000000..14c80651521d3c --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/ansible.cfg @@ -0,0 +1,2 @@ +[defaults] +host_key_checking = False diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml new file mode 100644 index 00000000000000..59e15f0da1a877 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -0,0 +1,41 @@ +- name: Launch Instance + ec2: + group_id: 'sg-07bb906d' # jenkins-slave_new + count: 1 + instance_type: 'm3.medium' + image: '{{ item.image }}' + wait: true + region: 'us-east-1' + keypair: '{{ keypair }}' + aws_access_key: "{{ aws_access_key|default(lookup('env', 'AWS_ACCESS_KEY')) }}" + aws_secret_key: "{{ aws_secret_key|default(lookup('env', 'AWS_SECRET_KEY')) }}" + instance_tags: + jenkins: jenkins_ansible_pr_test + register: ec2 + with_items: slaves +# We could do an async here, that would speed things up + + +- name: Wait for SSH + wait_for: + host: "{{ item['instances'][0]['public_ip'] }}" + port: 22 + delay: 10 + timeout: 320 + state: started + with_items: ec2.results + +- name: Wait a little longer for centos + pause: seconds=20 + +- name: Add hosts group temporary inventory group with pem path + add_host: + name: "{{ item.1.platform }} {{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + groups: dynamic_hosts + ansible_ssh_host: "{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + ansible_ssh_private_key_file: '{{ pem_path }}' + ansible_ssh_user: "{{ item.1.ssh_user }}" + ec2_vars: "{{ ec2.results[item.0]['instances'][0] }}" + ec2_instance_ids: "{{ ec2.results[item.0]['instance_ids'] }}" + with_indexed_items: slaves + diff --git a/test/utils/ansible-playbook_integration_runner/inventory b/test/utils/ansible-playbook_integration_runner/inventory new file mode 100644 index 00000000000000..42de3a1b5d7fa2 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/inventory @@ -0,0 +1 @@ +localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" diff --git a/test/utils/ansible-playbook_integration_runner/inventory.dynamic b/test/utils/ansible-playbook_integration_runner/inventory.dynamic new file mode 100644 index 00000000000000..1aa03b4ed8d697 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/inventory.dynamic @@ -0,0 +1,3 @@ +localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" +[dynamic_hosts] +54.157.26.110 ansible_ssh_user=root ansible_ssh_private_key_file=/Users/meyers/Dropbox/.ssh/Ansible_chris_meyers.pem diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml new file mode 100644 index 00000000000000..8661a6dba9e924 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -0,0 +1,62 @@ +- hosts: all + connection: local + vars: + slaves: + - distribution: "Ubuntu" + version: "12.04" + image: "ami-2ccc7a44" + ssh_user: "ubuntu" + platform: "ubuntu-12.04-x86_64" + - distribution: "Ubuntu" + version: "14.04" + image: "ami-9a562df2" + ssh_user: "ubuntu" + platform: "ubuntu-14.04-x86_64" + - distribution: "CentOS" + version: "6.5" + image: "ami-8997afe0" + ssh_user: "root" + platform: "centos-6.5-x86_64" + - distribution: "CentOS" + version: "7" + image: "ami-96a818fe" + ssh_user: "centos" + platform: "centos-7-x86_64" + + tasks: + - debug: var=ansible_version + - include: ec2.yml + when: groups['dynamic_hosts'] is not defined + +- hosts: dynamic_hosts + sudo: true + vars: + credentials_file: '' + test_flags: "" + make_target: "non_destructive" + #pre_tasks: + roles: + - { role: ansible_deps, tags: ansible_deps } + - { role: run_integration, + tags: run_integration, + run_integration_test_flags: "{{ test_flags }}", + run_integration_credentials_file: "{{ credentials_file }}", + run_integration_make_target: "{{ make_target }}", } + tasks: + + - name: Kill ec2 instances + sudo: false + local_action: + module: ec2 + state: absent + region: 'us-east-1' + instance_ids: "{{ hostvars[item]['ec2_instance_ids'] }}" + when: hostvars[item]['ec2_instance_ids'] is defined and item == inventory_hostname + with_items: groups['dynamic_hosts'] + + - set_fact: + ansible_connection: local + + - name: Fail + shell: 'echo "{{ inventory_hostname }}, Failed" && exit 1' + when: "test_results.rc != 0" diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore new file mode 100644 index 00000000000000..1377554ebea6f9 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore @@ -0,0 +1 @@ +*.swp diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml new file mode 100644 index 00000000000000..2264f0b20a7528 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml @@ -0,0 +1,37 @@ +sudo: required +dist: trusty +language: python +python: + - "2.7" +services: + - docker +env: + global: + - PATH="/usr/bin:$PATH" + +before_install: + # Ansible doesn't play well with virtualenv + - deactivate + - sudo apt-get update -qq + - sudo apt-get install docker-engine + +install: + - sudo pip install docker-py + # software-properties-common for ubuntu 14.04 + # python-software-properties for ubuntu 12.04 + - sudo apt-get install -y sshpass software-properties-common python-software-properties + - sudo apt-add-repository -y ppa:ansible/ansible + - sudo apt-get update -qq + - sudo apt-get install -y ansible + - sudo rm /usr/bin/python && sudo ln -s /usr/bin/python2.7 /usr/bin/python + - ansible-galaxy install -r test/requirements.yml -p test/roles/ + +script: + # Ensure any invocation of ansible-playbook (i.e. sudo) results in host_key_checking disabled + - sudo ansible all -i "127.0.0.1," -m lineinfile -a "regexp=^#host_key_checking dest=/etc/ansible/ansible.cfg line='host_key_checking = False'" -c local + - ansible-playbook -i test/inventory test/main.yml --syntax-check + - sudo ansible-playbook -i test/inventory test/main.yml + +notifications: + # notify ansible galaxy of results + webhooks: http://goo.gl/nSuq9h diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md new file mode 100644 index 00000000000000..f0fc755863cb40 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md @@ -0,0 +1,8 @@ +[![Build Status](https://travis-ci.org/chrismeyersfsu/role-ansible_deps.svg)](https://travis-ci.org/chrismeyersfsu/role-ansible_deps) + +ansible_deps +========= + +Install needed packages to run ansible integration tests. + +This role is periodically synced from ansible core repo to chrismeyersfsu/role-ansible_deps so that automated tests may run and so this role is accessible from galaxy. diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml new file mode 100644 index 00000000000000..c7837fc56b1988 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml new file mode 100644 index 00000000000000..050cdd123423e6 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info new file mode 100644 index 00000000000000..ffc298fff6f8b4 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info @@ -0,0 +1 @@ +{install_date: 'Tue Dec 8 15:06:28 2015', version: master} diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml new file mode 100644 index 00000000000000..07c15d619ee5b2 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml @@ -0,0 +1,23 @@ +--- +galaxy_info: + author: Chris Meyers + description: install ansible integration test dependencies + company: Ansible + license: license (GPLv2, CC-BY, etc) + min_ansible_version: 1.2 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Ubuntu + versions: + - precise + - trusty + galaxy_tags: + - testing + - integration + - ansible + - dependencies +dependencies: [] + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml new file mode 100644 index 00000000000000..f71128921d999f --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml @@ -0,0 +1,81 @@ +--- + +- name: Install sudo + yum: name=sudo state=installed + ignore_errors: true + when: ansible_os_family == 'RedHat' + +- name: Install sudo + apt: name=sudo state=installed + ignore_errors: true + when: ansible_os_family == 'Debian' + +- name: Install RH epel + yum: name="epel-release" state=installed + sudo: true + when: ansible_os_family == 'RedHat' + +- name: Install RH ansible dependencies + yum: name="{{ item }}" state=installed + sudo: true + with_items: + - python-pip + - python-httplib2 + - rsync + - subversion + - mercurial + - git + - rubygems + - unzip + - openssl + - make + - gcc + - python-devel + - libselinux-python + when: ansible_os_family == 'RedHat' + +- apt: update_cache=yes + when: ansible_os_family == 'Debian' + +- name: Install Debian ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - python-pip + - python-httplib2 + - rsync + - subversion + - mercurial + - git + - unzip + - python-dev + when: ansible_os_family == 'Debian' + +- name: Install ubuntu 12.04 ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - rubygems + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" + +- name: Install ubuntu 14.04 ansible dependencies + apt: name="{{ item }}" state=installed update_cache=yes + sudo: true + with_items: + - rubygems-integration + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "14.04" + +- name: Install ansible pip deps + sudo: true + pip: name="{{ item }}" + with_items: + - PyYAML + - Jinja2 + - paramiko + +- name: Remove tty sudo requirement + sudo: true + lineinfile: "dest=/etc/sudoers regexp='^Defaults[ , ]*requiretty' line='#Defaults requiretty'" + when: ansible_os_family == 'RedHat' + + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory new file mode 100644 index 00000000000000..2302edae31b44d --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory @@ -0,0 +1 @@ +localhost ansible_connection=local diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml new file mode 100644 index 00000000000000..95617dbfac3f1a --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml @@ -0,0 +1,29 @@ +--- +- name: Bring up docker containers + hosts: localhost + gather_facts: false + vars: + inventory: + - name: ansible_deps_host_1 + image: "chrismeyers/centos6" + - name: ansible_deps_host_2 + image: "chrismeyers/ubuntu12.04" + - name: ansible_deps_host_3 + image: "ubuntu-upstart:14.04" + roles: + - { role: provision_docker, provision_docker_company: 'ansible', provision_docker_inventory: "{{ inventory }}" } + +- name: Run ansible_deps Tests + hosts: docker_containers + vars: + git_dir: "/tmp/ansible" + roles: + - { role: ansible_deps } + tasks: + - name: Clone ansible + git: + repo: "https://github.com/ansible/ansible.git" + dest: "{{ git_dir }}" + - name: Invoke ansible in hacking mode + shell: "cd {{ git_dir }} && . hacking/env-setup && ansible --version && ansible-playbook --version" + diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml new file mode 100644 index 00000000000000..fa10641a72e925 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml @@ -0,0 +1,2 @@ +- src: chrismeyersfsu.provision_docker + name: provision_docker diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml new file mode 100644 index 00000000000000..a38c5fb0425b14 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for . diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml new file mode 100644 index 00000000000000..2114567d1522de --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Sync ansible repo to ec2 instance + synchronize: + src: "{{ sync_dir }}/" + dest: "~/ansible" + +- name: Get ansible source dir + sudo: false + shell: "cd ~ && pwd" + register: results + +- shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + sudo: true + environment: + TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" + CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" + args: + chdir: "{{ results.stdout }}/ansible" + register: test_results + ignore_errors: true From 822624d061c55c5386e260b67d923627df3394fd Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Tue, 8 Dec 2015 14:05:57 -0500 Subject: [PATCH 3046/3617] rename role ansible_deps to ansible_test_deps --- .../roles/{ansible_deps => ansible_test_deps}/.gitignore | 0 .../roles/{ansible_deps => ansible_test_deps}/.travis.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/README.md | 0 .../roles/{ansible_deps => ansible_test_deps}/defaults/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/handlers/main.yml | 0 .../{ansible_deps => ansible_test_deps}/meta/.galaxy_install_info | 0 .../roles/{ansible_deps => ansible_test_deps}/meta/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/tasks/main.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/test/inventory | 0 .../roles/{ansible_deps => ansible_test_deps}/test/main.yml | 0 .../{ansible_deps => ansible_test_deps}/test/requirements.yml | 0 .../roles/{ansible_deps => ansible_test_deps}/vars/main.yml | 0 12 files changed, 0 insertions(+), 0 deletions(-) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/.gitignore (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/.travis.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/README.md (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/defaults/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/handlers/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/meta/.galaxy_install_info (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/meta/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/tasks/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/inventory (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/main.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/test/requirements.yml (100%) rename test/utils/ansible-playbook_integration_runner/roles/{ansible_deps => ansible_test_deps}/vars/main.yml (100%) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.gitignore rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/.travis.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/README.md rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/defaults/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/handlers/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/.galaxy_install_info rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/meta/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/tasks/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/inventory rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/test/requirements.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml similarity index 100% rename from test/utils/ansible-playbook_integration_runner/roles/ansible_deps/vars/main.yml rename to test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml From de690445bca1f47e773e43b6cd6f1ed0b2ec278b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 14:00:17 -0500 Subject: [PATCH 3047/3617] Make fact delegating configurable, defaulting to 1.x behavior --- lib/ansible/playbook/block.py | 1 + lib/ansible/playbook/role/__init__.py | 1 + lib/ansible/playbook/role/include.py | 3 ++- lib/ansible/playbook/task.py | 1 + lib/ansible/plugins/strategy/__init__.py | 2 +- 5 files changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 0de5e635e7ee14..e842883bc82893 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -34,6 +34,7 @@ class Block(Base, Become, Conditional, Taggable): _rescue = FieldAttribute(isa='list', default=[]) _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) # for future consideration? this would be functionally # similar to the 'else' clause for exceptions diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 3cb914689feb69..bd7760d221cc4b 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -61,6 +61,7 @@ def hash_params(params): class Role(Base, Become, Conditional, Taggable): _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) def __init__(self, play=None): self._role_name = None diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py index 67949e2e124a0c..6e89eb33343e01 100644 --- a/lib/ansible/playbook/role/include.py +++ b/lib/ansible/playbook/role/include.py @@ -40,7 +40,8 @@ class RoleInclude(RoleDefinition): is included for execution in a play. """ - _delegate_to = FieldAttribute(isa='string') + _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 21dbc87becfabc..6c7730cb2a5094 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -72,6 +72,7 @@ class Task(Base, Conditional, Taggable, Become): _changed_when = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') + _delegate_facts = FieldAttribute(isa='bool', defalt=False) _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 0d0cc4a9dcef1f..732a9293d282c8 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -289,7 +289,7 @@ def _process_pending_results(self, iterator): # find the host we're actually refering too here, which may # be a host that is not really in inventory at all - if task.delegate_to is not None: + if task.delegate_to is not None and task.delegate_facts: task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task) self.add_tqm_variables(task_vars, play=iterator._play) if item is not None: From 398f6bbb89ebdcd3ef0efdbc26d54801a0eb2e55 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 14:34:37 -0500 Subject: [PATCH 3048/3617] Fix typo from 5ae850c --- lib/ansible/playbook/block.py | 2 +- lib/ansible/playbook/role/__init__.py | 2 +- lib/ansible/playbook/role/include.py | 2 +- lib/ansible/playbook/task.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index e842883bc82893..f2d9c82833a065 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -34,7 +34,7 @@ class Block(Base, Become, Conditional, Taggable): _rescue = FieldAttribute(isa='list', default=[]) _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) # for future consideration? this would be functionally # similar to the 'else' clause for exceptions diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index bd7760d221cc4b..1c6b344a4fc12d 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -61,7 +61,7 @@ def hash_params(params): class Role(Base, Become, Conditional, Taggable): _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) def __init__(self, play=None): self._role_name = None diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py index 6e89eb33343e01..43e2d9e4fc1d28 100644 --- a/lib/ansible/playbook/role/include.py +++ b/lib/ansible/playbook/role/include.py @@ -41,7 +41,7 @@ class RoleInclude(RoleDefinition): """ _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None): super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 6c7730cb2a5094..17f1952e39c4f5 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -72,7 +72,7 @@ class Task(Base, Conditional, Taggable, Become): _changed_when = FieldAttribute(isa='string') _delay = FieldAttribute(isa='int', default=5) _delegate_to = FieldAttribute(isa='string') - _delegate_facts = FieldAttribute(isa='bool', defalt=False) + _delegate_facts = FieldAttribute(isa='bool', default=False) _failed_when = FieldAttribute(isa='string') _first_available_file = FieldAttribute(isa='list') _loop = FieldAttribute(isa='string', private=True) From ec5827c22a1f238591c4c21413bf690ceb83aa1f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 11:52:59 -0800 Subject: [PATCH 3049/3617] updated with delegate_facts directive --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36886531bb53ec..3d31ef4ebb2524 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,7 @@ Ansible Changes By Release by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. * Added `meta: refresh_inventory` to force rereading the inventory in a play. This re-executes inventory scripts, but does not force them to ignore any cache they might use. -* Now when you delegate an action that returns ansible_facts, these facts will be applied to the delegated host, unlike before when they were applied to the current host. +* New delegate_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory_hostname (no/false) which is the default and previous behaviour. * local connections now work with 'su' as a privilege escalation method * New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a per-group or per-host ssh ProxyCommand or set any other ssh options. From 795fac917ea5970fd9583a41dad7a6d33a626b75 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 11:59:04 -0800 Subject: [PATCH 3050/3617] fixed typo in tree callback, added default dir this would allow it to work with playbooks also --- lib/ansible/plugins/callback/tree.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py index 8b1118864ec2f1..b6ecd6de878279 100644 --- a/lib/ansible/plugins/callback/tree.py +++ b/lib/ansible/plugins/callback/tree.py @@ -41,7 +41,8 @@ def __init__(self): self.tree = TREE_DIR if not self.tree: - self._display.warnings("Disabling tree callback, invalid directory provided to tree option: %s" % self.tree) + self.tree = os.path.expanduser("~/.ansible/tree") + self._display.warning("Defaulting to ~/.ansible/tree, invalid directory provided to tree option: %s" % self.tree) def write_tree_file(self, hostname, buf): ''' write something into treedir/hostname ''' @@ -53,7 +54,7 @@ def write_tree_file(self, hostname, buf): with open(path, 'wb+') as fd: fd.write(buf) except (OSError, IOError) as e: - self._display.warnings("Unable to write to %s's file: %s" % (hostname, str(e))) + self._display.warning("Unable to write to %s's file: %s" % (hostname, str(e))) def result_to_tree(self, result): if self.tree: From 1799de8528926355f51f79f705a6927a05ba018a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 8 Dec 2015 15:02:25 -0500 Subject: [PATCH 3051/3617] Preserve original token when appending to _raw_params in parse_kv Fixes #13311 --- lib/ansible/parsing/splitter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index c506603acb5741..f24d8ecf9de45f 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -65,8 +65,8 @@ def parse_kv(args, check_raw=False): raise raw_params = [] - for x in vargs: - x = _decode_escapes(x) + for orig_x in vargs: + x = _decode_escapes(orig_x) if "=" in x: pos = 0 try: @@ -90,7 +90,7 @@ def parse_kv(args, check_raw=False): else: options[k.strip()] = unquote(v.strip()) else: - raw_params.append(x) + raw_params.append(orig_x) # recombine the free-form params, if any were found, and assign # them to a special option for use later by the shell/command module From 0e55398e16de1ca99dbe2115a4809c57cdbb5150 Mon Sep 17 00:00:00 2001 From: Jeremy Audet Date: Tue, 8 Dec 2015 09:39:45 -0500 Subject: [PATCH 3052/3617] Make "make webdocs" compatible with Python 3 The `webdocs` make target fails under Python 3. It fails due to a variety of syntax errors, such as the use of `except Foo, e` and `print 'foo'`. Fix #13463 by making code compatible with both Python 2 and 3. --- docsite/build-site.py | 23 ++++++++++++----------- hacking/module_formatter.py | 4 ++-- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docsite/build-site.py b/docsite/build-site.py index 587a189f0774c0..24f9fc9a647f30 100755 --- a/docsite/build-site.py +++ b/docsite/build-site.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +from __future__ import print_function __docformat__ = 'restructuredtext' @@ -24,9 +25,9 @@ try: from sphinx.application import Sphinx except ImportError: - print "#################################" - print "Dependency missing: Python Sphinx" - print "#################################" + print("#################################") + print("Dependency missing: Python Sphinx") + print("#################################") sys.exit(1) import os @@ -40,7 +41,7 @@ def __init__(self): """ Run the DocCommand. """ - print "Creating html documentation ..." + print("Creating html documentation ...") try: buildername = 'html' @@ -69,10 +70,10 @@ def __init__(self): app.builder.build_all() - except ImportError, ie: + except ImportError: traceback.print_exc() - except Exception, ex: - print >> sys.stderr, "FAIL! exiting ... (%s)" % ex + except Exception as ex: + print("FAIL! exiting ... (%s)" % ex, file=sys.stderr) def build_docs(self): self.app.builder.build_all() @@ -83,9 +84,9 @@ def build_rst_docs(): if __name__ == '__main__': if '-h' in sys.argv or '--help' in sys.argv: - print "This script builds the html documentation from rst/asciidoc sources.\n" - print " Run 'make docs' to build everything." - print " Run 'make viewdocs' to build and then preview in a web browser." + print("This script builds the html documentation from rst/asciidoc sources.\n") + print(" Run 'make docs' to build everything.") + print(" Run 'make viewdocs' to build and then preview in a web browser.") sys.exit(0) build_rst_docs() @@ -93,4 +94,4 @@ def build_rst_docs(): if "view" in sys.argv: import webbrowser if not webbrowser.open('htmlout/index.html'): - print >> sys.stderr, "Could not open on your webbrowser." + print("Could not open on your webbrowser.", file=sys.stderr) diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py index f4ab5d7d9ab846..4c94ca3f2c4233 100755 --- a/hacking/module_formatter.py +++ b/hacking/module_formatter.py @@ -140,7 +140,7 @@ def list_modules(module_dir, depth=0): if os.path.isdir(d): res = list_modules(d, depth + 1) - for key in res.keys(): + for key in list(res.keys()): if key in categories: categories[key] = merge_hash(categories[key], res[key]) res.pop(key, None) @@ -451,7 +451,7 @@ def main(): categories = list_modules(options.module_dir) last_category = None - category_names = categories.keys() + category_names = list(categories.keys()) category_names.sort() category_list_path = os.path.join(options.output_dir, "modules_by_category.rst") From 021605a19578309cccc5cdec8c47c512b819d7e0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Nov 2015 18:42:39 -0800 Subject: [PATCH 3053/3617] keep string type filters as strings now we don't try to convert types if using a filter that outputs a specifically formated string made list of filters configurable --- lib/ansible/constants.py | 1 + lib/ansible/template/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6faae928dbe82c..0f809db7297da7 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -261,6 +261,7 @@ def load_config_file(): # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" +STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bdd0612bdddef3..8ce2358eb1e6cd 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -164,7 +164,8 @@ def __init__(self, loader, shared_loader_obj=None, variables=dict()): self.block_end = self.environment.block_end_string self.variable_start = self.environment.variable_start_string self.variable_end = self.environment.variable_end_string - self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1])) + self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (self.variable_start, self.block_start, self.block_end, self.variable_end)) + self._no_type_regex = re.compile(r'.*\|(?:%s)\s*(?:%s)?$' % ('|'.join(C.STRING_TYPE_FILTERS), self.variable_end)) def _get_filters(self): ''' @@ -278,8 +279,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors - # Don't template unsafe variables, instead drop them back down to - # their constituent type. + # Don't template unsafe variables, instead drop them back down to their constituent type. if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): return self._clean_data(text_type(variable)) @@ -294,6 +294,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True if isinstance(variable, string_types): result = variable + if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single @@ -319,7 +320,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True result = self._cached_result[sha1_hash] else: result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides) - if convert_data: + if convert_data and not self._no_type_regex.match(variable): # if this looks like a dictionary or list, convert it to such using the safe_eval method if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \ result.startswith("[") or result in ("True", "False"): From d82d65ee7bd2506e06ffb225a2e9be6fa1ac36db Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Nov 2015 18:42:39 -0800 Subject: [PATCH 3054/3617] keep string type filters as strings now we don't try to convert types if using a filter that outputs a specifically formated string made list of filters configurable --- lib/ansible/constants.py | 1 + lib/ansible/template/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 6faae928dbe82c..0f809db7297da7 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -261,6 +261,7 @@ def load_config_file(): # characters included in auto-generated passwords DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" +STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py index bdd0612bdddef3..8ce2358eb1e6cd 100644 --- a/lib/ansible/template/__init__.py +++ b/lib/ansible/template/__init__.py @@ -164,7 +164,8 @@ def __init__(self, loader, shared_loader_obj=None, variables=dict()): self.block_end = self.environment.block_end_string self.variable_start = self.environment.variable_start_string self.variable_end = self.environment.variable_end_string - self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1])) + self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (self.variable_start, self.block_start, self.block_end, self.variable_end)) + self._no_type_regex = re.compile(r'.*\|(?:%s)\s*(?:%s)?$' % ('|'.join(C.STRING_TYPE_FILTERS), self.variable_end)) def _get_filters(self): ''' @@ -278,8 +279,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True if fail_on_undefined is None: fail_on_undefined = self._fail_on_undefined_errors - # Don't template unsafe variables, instead drop them back down to - # their constituent type. + # Don't template unsafe variables, instead drop them back down to their constituent type. if hasattr(variable, '__UNSAFE__'): if isinstance(variable, text_type): return self._clean_data(text_type(variable)) @@ -294,6 +294,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True if isinstance(variable, string_types): result = variable + if self._contains_vars(variable): # Check to see if the string we are trying to render is just referencing a single @@ -319,7 +320,7 @@ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True result = self._cached_result[sha1_hash] else: result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides) - if convert_data: + if convert_data and not self._no_type_regex.match(variable): # if this looks like a dictionary or list, convert it to such using the safe_eval method if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \ result.startswith("[") or result in ("True", "False"): From c1cec64aa8372f2e7d565a2717c68a075836ae9b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 14:18:11 -0800 Subject: [PATCH 3055/3617] added delegate_facts docs --- docsite/rst/playbooks_delegation.rst | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index 4411e4aa29fda3..4e2e8c372ac41a 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -130,6 +130,29 @@ Here is an example:: Note that you must have passphrase-less SSH keys or an ssh-agent configured for this to work, otherwise rsync will need to ask for a passphrase. +.. _delegate_facts: + +Delegated facts +``````````````` + +.. versionadded:: 2.0 + +Before 2.0 any facts gathered by a delegated task were assigned to the `inventory_hostname` (current host) instead of the host which actually produced the facts (delegated to host). +The new directive `delegate_facts` if set to `True` will assing the task's gathered facts to the delegated host instead of the current one.:: + + + - hosts: app_servers + tasks: + - name: gather facts from db servers + setup: + delegate_to: "{{item}}" + delegate_facts: True + with_items: "{{groups['dbservers'}}" + +The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers, +that way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. + + .. _run_once: Run Once From 57391f49ba5e7692e50e4e43ed9c541511eb0936 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 9 Dec 2015 07:52:43 -0500 Subject: [PATCH 3056/3617] removed ansible_python_interpreter * added missed renames of ansible_deps to ansible_test_deps * removed acidential inventory.dynamic file * modified README for ansible_test_deps role --- .../ansible-playbook_integration_runner/inventory | 2 +- .../inventory.dynamic | 3 --- .../utils/ansible-playbook_integration_runner/main.yml | 2 +- .../roles/ansible_test_deps/README.md | 6 ++---- .../roles/ansible_test_deps/test/main.yml | 10 +++++----- 5 files changed, 9 insertions(+), 14 deletions(-) delete mode 100644 test/utils/ansible-playbook_integration_runner/inventory.dynamic diff --git a/test/utils/ansible-playbook_integration_runner/inventory b/test/utils/ansible-playbook_integration_runner/inventory index 42de3a1b5d7fa2..2302edae31b44d 100644 --- a/test/utils/ansible-playbook_integration_runner/inventory +++ b/test/utils/ansible-playbook_integration_runner/inventory @@ -1 +1 @@ -localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" +localhost ansible_connection=local diff --git a/test/utils/ansible-playbook_integration_runner/inventory.dynamic b/test/utils/ansible-playbook_integration_runner/inventory.dynamic deleted file mode 100644 index 1aa03b4ed8d697..00000000000000 --- a/test/utils/ansible-playbook_integration_runner/inventory.dynamic +++ /dev/null @@ -1,3 +0,0 @@ -localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python" -[dynamic_hosts] -54.157.26.110 ansible_ssh_user=root ansible_ssh_private_key_file=/Users/meyers/Dropbox/.ssh/Ansible_chris_meyers.pem diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 8661a6dba9e924..5d15541490f4ba 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -36,7 +36,7 @@ make_target: "non_destructive" #pre_tasks: roles: - - { role: ansible_deps, tags: ansible_deps } + - { role: ansible_test_deps, tags: ansible_test_deps } - { role: run_integration, tags: run_integration, run_integration_test_flags: "{{ test_flags }}", diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md index f0fc755863cb40..09ffacacaf5590 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md @@ -1,8 +1,6 @@ -[![Build Status](https://travis-ci.org/chrismeyersfsu/role-ansible_deps.svg)](https://travis-ci.org/chrismeyersfsu/role-ansible_deps) +[![Build Status](https://travis-ci.org/chrismeyersfsu/ansible_test_deps.svg)](https://travis-ci.org/chrismeyersfsu/ansible_test_deps) -ansible_deps +ansible_test_deps ========= Install needed packages to run ansible integration tests. - -This role is periodically synced from ansible core repo to chrismeyersfsu/role-ansible_deps so that automated tests may run and so this role is accessible from galaxy. diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml index 95617dbfac3f1a..b66d699d5d6ee5 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml @@ -4,21 +4,21 @@ gather_facts: false vars: inventory: - - name: ansible_deps_host_1 + - name: ansible_test_deps_host_1 image: "chrismeyers/centos6" - - name: ansible_deps_host_2 + - name: ansible_test_deps_host_2 image: "chrismeyers/ubuntu12.04" - - name: ansible_deps_host_3 + - name: ansible_test_deps_host_3 image: "ubuntu-upstart:14.04" roles: - { role: provision_docker, provision_docker_company: 'ansible', provision_docker_inventory: "{{ inventory }}" } -- name: Run ansible_deps Tests +- name: Run ansible_test_deps Tests hosts: docker_containers vars: git_dir: "/tmp/ansible" roles: - - { role: ansible_deps } + - { role: ansible_test_deps } tasks: - name: Clone ansible git: From f16628ffecfa5ece0535c9b1c3de78cc78e18575 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 9 Dec 2015 09:37:39 -0500 Subject: [PATCH 3057/3617] symbolic link role for testing --- .../roles/ansible_test_deps/test/roles/ansible_test_deps | 1 + 1 file changed, 1 insertion(+) create mode 120000 test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps new file mode 120000 index 00000000000000..eb6d9edda4b300 --- /dev/null +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps @@ -0,0 +1 @@ +../../../ansible_test_deps \ No newline at end of file From 8d66dcda21f176ee7cce21e99f52dea384ef42b8 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Wed, 9 Dec 2015 09:39:45 -0500 Subject: [PATCH 3058/3617] remove .gitignore --- .../roles/ansible_test_deps/.gitignore | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore deleted file mode 100644 index 1377554ebea6f9..00000000000000 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.swp From 0719eb3e2d798c6f80223e37dd77bc0ac41c537d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 06:32:04 -0800 Subject: [PATCH 3059/3617] clarified warning from tree callback --- lib/ansible/plugins/callback/tree.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py index b6ecd6de878279..ee710a6dfdfd5b 100644 --- a/lib/ansible/plugins/callback/tree.py +++ b/lib/ansible/plugins/callback/tree.py @@ -42,7 +42,7 @@ def __init__(self): self.tree = TREE_DIR if not self.tree: self.tree = os.path.expanduser("~/.ansible/tree") - self._display.warning("Defaulting to ~/.ansible/tree, invalid directory provided to tree option: %s" % self.tree) + self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree) def write_tree_file(self, hostname, buf): ''' write something into treedir/hostname ''' From 87969868d42cd8aba1c65c8207d059d73407373b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 07:21:00 -0800 Subject: [PATCH 3060/3617] avoid persistent containers in attribute defaults moved from the field attribute declaration and created a placeholder which then is resolved in the field attribute class. this is to avoid unwanted persistent of the defaults across objects which introduces stealth bugs when multiple objects of the same kind are used in succession while not overriding the default values. --- lib/ansible/playbook/attribute.py | 11 +++++++++++ lib/ansible/playbook/block.py | 6 +++--- lib/ansible/playbook/conditional.py | 2 +- lib/ansible/playbook/play.py | 16 ++++++++-------- lib/ansible/playbook/play_context.py | 4 ++-- lib/ansible/playbook/playbook_include.py | 2 +- lib/ansible/playbook/role/metadata.py | 2 +- lib/ansible/playbook/taggable.py | 2 +- lib/ansible/playbook/task.py | 2 +- 9 files changed, 29 insertions(+), 18 deletions(-) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index 703d9dbca1e831..ce7ed6d8fe7b97 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -32,6 +32,17 @@ def __init__(self, isa=None, private=False, default=None, required=False, listof self.priority = priority self.always_post_validate = always_post_validate + # This is here to avoid `default=` unwanted persistence across object instances + # We cannot rely on None as some fields use it to skip the code + # that would detect an empty container as a user error + if self.default == '_ansible_container': + if self.isa == 'list': + self.default = [] + elif self.isa == 'dict': + self.default = {} + elif self.isa == 'set': + self.default = set() + def __eq__(self, other): return other.priority == self.priority diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index f2d9c82833a065..66009b028afb25 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -30,9 +30,9 @@ class Block(Base, Become, Conditional, Taggable): - _block = FieldAttribute(isa='list', default=[]) - _rescue = FieldAttribute(isa='list', default=[]) - _always = FieldAttribute(isa='list', default=[]) + _block = FieldAttribute(isa='list', default='_ansible_container') + _rescue = FieldAttribute(isa='list', default='_ansible_container') + _always = FieldAttribute(isa='list', default='_ansible_container') _delegate_to = FieldAttribute(isa='list') _delegate_facts = FieldAttribute(isa='bool', default=False) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index fc178e2fa1df83..a5b3ca725f88c8 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -33,7 +33,7 @@ class Conditional: to be run conditionally when a condition is met or skipped. ''' - _when = FieldAttribute(isa='list', default=[]) + _when = FieldAttribute(isa='list', default='_ansible_container') def __init__(self, loader=None): # when used directly, this class needs a loader, but we want to diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ed61416e951116..e08c8c600163a3 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,22 +64,22 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', default='_ansible_container', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes - _vars_files = FieldAttribute(isa='list', default=[], priority=99) - _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True) + _vars_files = FieldAttribute(isa='list', default='_ansible_container', priority=99) + _vars_prompt = FieldAttribute(isa='list', default='_ansible_container', always_post_validate=True) _vault_password = FieldAttribute(isa='string', always_post_validate=True) # Role Attributes - _roles = FieldAttribute(isa='list', default=[], priority=90) + _roles = FieldAttribute(isa='list', default='_ansible_container', priority=90) # Block (Task) Lists Attributes - _handlers = FieldAttribute(isa='list', default=[]) - _pre_tasks = FieldAttribute(isa='list', default=[]) - _post_tasks = FieldAttribute(isa='list', default=[]) - _tasks = FieldAttribute(isa='list', default=[]) + _handlers = FieldAttribute(isa='list', default='_ansible_container') + _pre_tasks = FieldAttribute(isa='list', default='_ansible_container') + _post_tasks = FieldAttribute(isa='list', default='_ansible_container') + _tasks = FieldAttribute(isa='list', default='_ansible_container') # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False, always_post_validate=True) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 81223500adf879..da291c3c83470b 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -171,8 +171,8 @@ class PlayContext(Base): # general flags _verbosity = FieldAttribute(isa='int', default=0) - _only_tags = FieldAttribute(isa='set', default=set()) - _skip_tags = FieldAttribute(isa='set', default=set()) + _only_tags = FieldAttribute(isa='set', default='_ansible_container') + _skip_tags = FieldAttribute(isa='set', default='_ansible_container') _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index d9af2ba52379bc..52081c415394c5 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -35,7 +35,7 @@ class PlaybookInclude(Base, Conditional, Taggable): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') - _vars = FieldAttribute(isa='dict', default=dict()) + _vars = FieldAttribute(isa='dict', default='_ansible_container') @staticmethod def load(data, basedir, variable_manager=None, loader=None): diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py index 58b59145a1cc1b..4bb7d0ce02bbd8 100644 --- a/lib/ansible/playbook/role/metadata.py +++ b/lib/ansible/playbook/role/metadata.py @@ -40,7 +40,7 @@ class RoleMetadata(Base): ''' _allow_duplicates = FieldAttribute(isa='bool', default=False) - _dependencies = FieldAttribute(isa='list', default=[]) + _dependencies = FieldAttribute(isa='list', default='_ansible_container') _galaxy_info = FieldAttribute(isa='GalaxyInfo') def __init__(self, owner=None): diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 8f5cfa093446a7..37e3261e80da71 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -29,7 +29,7 @@ class Taggable: untagged = frozenset(['untagged']) - _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int)) + _tags = FieldAttribute(isa='list', default='_ansible_container', listof=(string_types,int)) def __init__(self): super(Taggable, self).__init__() diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 17f1952e39c4f5..53a9a3c3931231 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -64,7 +64,7 @@ class Task(Base, Conditional, Taggable, Become): # will be used if defined # might be possible to define others - _args = FieldAttribute(isa='dict', default=dict()) + _args = FieldAttribute(isa='dict', default='_ansible_container') _action = FieldAttribute(isa='string') _any_errors_fatal = FieldAttribute(isa='bool') From 4f84769a17bb92894ee31b08267cf9aec1c0118c Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 10:51:12 -0500 Subject: [PATCH 3061/3617] Galaxy 2.0 --- docsite/rst/galaxy.rst | 291 ++++++++++++++++- lib/ansible/cli/galaxy.py | 326 ++++++++++++++++--- lib/ansible/constants.py | 3 +- lib/ansible/galaxy/__init__.py | 2 + lib/ansible/galaxy/api.py | 207 ++++++++---- lib/ansible/galaxy/data/metadata_template.j2 | 14 + lib/ansible/galaxy/data/test_playbook.j2 | 5 + lib/ansible/galaxy/data/travis.j2 | 29 ++ lib/ansible/galaxy/login.py | 113 +++++++ lib/ansible/galaxy/role.py | 10 +- lib/ansible/galaxy/token.py | 67 ++++ 11 files changed, 952 insertions(+), 115 deletions(-) create mode 100644 lib/ansible/galaxy/data/test_playbook.j2 create mode 100644 lib/ansible/galaxy/data/travis.j2 create mode 100644 lib/ansible/galaxy/login.py create mode 100644 lib/ansible/galaxy/token.py diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 1b9475c418d85d..783ac15e456a76 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -8,7 +8,7 @@ Ansible Galaxy The Website ``````````` -The website `Ansible Galaxy `_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects. +The website `Ansible Galaxy `_, is a free site for finding, downloading, and sharing community developed Ansible roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects. You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. @@ -24,7 +24,7 @@ Installing Roles The most obvious is downloading roles from the Ansible Galaxy website:: - ansible-galaxy install username.rolename + $ ansible-galaxy install username.rolename .. _galaxy_cli_roles_path: @@ -33,23 +33,16 @@ roles_path You can specify a particular directory where you want the downloaded roles to be placed:: - ansible-galaxy install username.role -p ~/Code/ansible_roles/ + $ ansible-galaxy install username.role -p ~/Code/ansible_roles/ This can be useful if you have a master folder that contains ansible galaxy roles shared across several projects. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured). -Building out Role Scaffolding ------------------------------ - -It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: - - ansible-galaxy init rolename - Installing Multiple Roles From A File -------------------------------------- +===================================== To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website:: - ansible-galaxy install -r requirements.txt + $ ansible-galaxy install -r requirements.txt Where the requirements.txt looks like:: @@ -64,7 +57,7 @@ To request specific versions (tags) of a role, use this syntax in the roles file Available versions will be listed on the Ansible Galaxy webpage for that role. Advanced Control over Role Requirements Files ---------------------------------------------- +============================================= For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this:: @@ -121,3 +114,275 @@ Roles pulled from galaxy work as with other SCM sourced roles above. To download `irc.freenode.net `_ #ansible IRC chat channel +Building Role Scaffolding +------------------------- + +Use the init command to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires:: + + $ ansible-galaxy init rolename + +The above will create the following directory structure in the current working directory: + +:: + + README.md + .travsis.yml + defaults/ + main.yml + files/ + handlers/ + main.yml + meta/ + main.yml + templates/ + tests/ + inventory + test.yml + vars/ + main.yml + +.. note:: + + .travis.yml and tests/ are new in Ansible 2.0 + +If a directory matching the name of the role already exists in the current working directory, the init command will result in an error. To ignore the error use the --force option. Force will create the above subdirectories and files, replacing anything that matches. + +Search for Roles +---------------- + +The search command provides for querying the Galaxy database, allowing for searching by tags, platforms, author and multiple keywords. For example: + +:: + + $ ansible-galaxy search elasticsearch --author geerlingguy + +The search command will return a list of the first 1000 results matching your search: + +:: + + Found 2 roles matching your search: + + Name Description + ---- ----------- + geerlingguy.elasticsearch Elasticsearch for Linux. + geerlingguy.elasticsearch-curator Elasticsearch curator for Linux. + +.. note:: + + The format of results pictured here is new in Ansible 2.0. + +Get More Information About a Role +--------------------------------- + +Use the info command To view more detail about a specific role: + +:: + + $ ansible-galaxy info username.role_name + +This returns everything found in Galaxy for the role: + +:: + + Role: username.rolename + description: Installs and configures a thing, a distributed, highly available NoSQL thing. + active: True + commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57 + commit_message: Adding travis + commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab + company: My Company, Inc. + created: 2015-12-08T14:17:52.773Z + download_count: 1 + forks_count: 0 + github_branch: + github_repo: repo_name + github_user: username + id: 6381 + is_valid: True + issue_tracker_url: + license: Apache + min_ansible_version: 1.4 + modified: 2015-12-08T18:43:49.085Z + namespace: username + open_issues_count: 0 + path: /Users/username/projects/roles + scm: None + src: username.repo_name + stargazers_count: 0 + travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=master + version: + watchers_count: 1 + +.. note:: + + The format of results pictured here is new in Ansible 2.0. + + +List Installed Roles +-------------------- + +The list command shows the name and version of each role installed in roles_path. + +:: + + $ ansible-galaxy list + + - chouseknecht.role-install_mongod, master + - chouseknecht.test-role-1, v1.0.2 + - chrismeyersfsu.role-iptables, master + - chrismeyersfsu.role-required_vars, master + +Remove an Installed Role +------------------------ + +The remove command will delete a role from roles_path: + +:: + + $ ansible-galaxy remove username.rolename + +Authenticate with Galaxy +------------------------ + +To use the import, delete and setup commands authentication with Galaxy is required. The login command will authenticate the user,retrieve a token from Galaxy, and store it in the user's home directory. + +:: + + $ ansible-galaxy login + + We need your Github login to identify you. + This information will not be sent to Galaxy, only to api.github.com. + The password will not be displayed. + + Use --github-token if you do not want to enter your password. + + Github Username: dsmith + Password for dsmith: + Succesfully logged into Galaxy as dsmith + +As depicted above, the login command prompts for a GitHub username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the GitHub personal access token is destroyed. + +If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. + +Import a Role +------------- + +Roles can be imported using ansible-galaxy. The import command expects that the user previously authenticated with Galaxy using the login command. + +Import any GitHub repo you have access to: + +:: + + $ ansible-galaxy import github_user github_repo + +By default the command will wait for the role to be imported by Galaxy, displaying the results as the import progresses: + +:: + + Successfully submitted import request 41 + Starting import 41: role_name=myrole repo=githubuser/ansible-role-repo ref= + Retrieving Github repo githubuser/ansible-role-repo + Accessing branch: master + Parsing and validating meta/main.yml + Parsing galaxy_tags + Parsing platforms + Adding dependencies + Parsing and validating README.md + Adding repo tags as role versions + Import completed + Status SUCCESS : warnings=0 errors=0 + +Use the --branch option to import a specific branch. If not specified, the default branch for the repo will be used. + +If the --no-wait option is present, the command will not wait for results. Results of the most recent import for any of your roles is available on the Galaxy web site under My Imports. + +.. note:: + + The import command is only available in Ansible 2.0. + +Delete a Role +------------- + +Remove a role from the Galaxy web site using the delete command. You can delete any role that you have access to in GitHub. The delete command expects that the user previously authenticated with Galaxy using the login command. + +:: + + ansible-galaxy delete github_user github_repo + +This only removes the role from Galaxy. It does not impact the actual GitHub repo. + +.. note:: + + The delete command is only available in Ansible 2.0. + +Setup Travis Integerations +-------------------------- + +Using the setup command you can enable notifications from `travis `_. The setup command expects that the user previously authenticated with Galaxy using the login command. + +:: + + $ ansible-galaxy setup travis github_user github_repo xxxtravistokenxxx + + Added integration for travis chouseknecht/ansible-role-sendmail + +The setup command requires your Travis token. The Travis token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described in `the Travis documentation `_. The calculated hash is stored in Galaxy and used to verify notifications received from Travis. + +The setup command enables Galaxy to respond to notifications. Follow the `Travis getting started guide `_ to enable the Travis build process for the role repository. + +When you create your .travis.yml file add the following to cause Travis to notify Galaxy when a build completes: + +:: + + notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ + +.. note:: + + The setup command is only available in Ansible 2.0. + + +List Travis Integrtions +======================= + +Use the --list option to display your Travis integrations: + +:: + + $ ansible-galaxy setup --list + + + ID Source Repo + ---------- ---------- ---------- + 2 travis github_user/github_repo + 1 travis github_user/github_repo + + +Remove Travis Integrations +========================== + +Use the --remove option to disable a Travis integration: + +:: + + $ ansible-galaxy setup --remove ID + +Provide the ID of the integration you want disabled. Use the --list option to get the ID. + + + + + + + + + + + + + + + + + + diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 94c04614ace98e..01e0475b24b114 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -22,10 +22,11 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -import os import os.path import sys import yaml +import json +import time from collections import defaultdict from jinja2 import Environment @@ -36,7 +37,10 @@ from ansible.galaxy import Galaxy from ansible.galaxy.api import GalaxyAPI from ansible.galaxy.role import GalaxyRole +from ansible.galaxy.login import GalaxyLogin +from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement +from ansible.module_utils.urls import open_url try: from __main__ import display @@ -44,18 +48,52 @@ from ansible.utils.display import Display display = Display() - class GalaxyCLI(CLI): - VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search") - SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) + available_commands = { + "delete": "remove a role from Galaxy", + "import": "add a role contained in a GitHub repo to Galaxy", + "info": "display details about a particular role", + "init": "create a role directory structure in your roles path", + "install": "download a role into your roles path", + "list": "enumerate roles found in your roles path", + "login": "authenticate with Galaxy API and store the token", + "remove": "delete a role from your roles path", + "search": "query the Galaxy API", + "setup": "add a TravisCI integration to Galaxy", + } + SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) + def __init__(self, args): - + self.VALID_ACTIONS = self.available_commands.keys() + self.VALID_ACTIONS.sort() self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) + def set_action(self): + """ + Get the action the user wants to execute from the sys argv list. + """ + for i in range(0,len(self.args)): + arg = self.args[i] + if arg in self.VALID_ACTIONS: + self.action = arg + del self.args[i] + break + + if not self.action: + self.show_available_actions() + + def show_available_actions(self): + # list available commands + display.display(u'\n' + "usage: ansible-galaxy COMMAND [--help] [options] ...") + display.display(u'\n' + "availabe commands:" + u'\n\n') + for key in self.VALID_ACTIONS: + display.display(u'\t' + "%-12s %s" % (key, self.available_commands[key])) + display.display(' ') + def parse(self): ''' create an options parser for bin/ansible ''' @@ -63,11 +101,21 @@ def parse(self): usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - + self.set_action() # options specific to actions - if self.action == "info": + if self.action == "delete": + self.parser.set_usage("usage: %prog delete [options] github_user github_repo") + elif self.action == "import": + self.parser.set_usage("usage: %prog import [options] github_user github_repo") + self.parser.add_option('-n', '--no-wait', dest='wait', action='store_false', default=True, + help='Don\'t wait for import results.') + self.parser.add_option('-b', '--branch', dest='reference', + help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') + self.parser.add_option('-t', '--status', dest='check_status', action='store_true', default=False, + help='Check the status of the most recent import request for given github_user/github_repo.') + elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") elif self.action == "init": self.parser.set_usage("usage: %prog init [options] role_name") @@ -83,27 +131,40 @@ def parse(self): self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') + help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": self.parser.set_usage("usage: %prog list [role_name]") + elif self.action == "login": + self.parser.set_usage("usage: %prog login [options]") + self.parser.add_option('-g','--github-token', dest='token', default=None, + help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by') self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by') - self.parser.set_usage("usage: %prog search [] [--galaxy-tags ] [--platforms platform]") + self.parser.add_option('--author', dest='author', + help='GitHub username') + self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") + elif self.action == "setup": + self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret" + + u'\n\n' + "Create an integration with travis.") + self.parser.add_option('-r', '--remove', dest='remove_id', default=None, + help='Remove the integration matching the provided ID value. Use --list to see ID values.') + self.parser.add_option('-l', '--list', dest="setup_list", action='store_true', default=False, + help='List all of your integrations.') # options that apply to more than one action - if self.action != "init": + if not self.action in ("config","import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' 'ansible.cfg file (/etc/ansible/roles if not configured)') - if self.action in ("info","init","install","search"): - self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com", + if self.action in ("import","info","init","install","login","search","setup","delete"): + self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True, help='Ignore SSL certificate validation errors.') @@ -112,23 +173,25 @@ def parse(self): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') - # get options, args and galaxy object - self.options, self.args =self.parser.parse_args(self.args[1:]) - display.verbosity = self.options.verbosity - self.galaxy = Galaxy(self.options) + if self.action: + # get options, args and galaxy object + self.options, self.args =self.parser.parse_args() + display.verbosity = self.options.verbosity + self.galaxy = Galaxy(self.options) return True def run(self): + if not self.action: + return True + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api - if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline): - api_server = self.options.api_server - self.api = GalaxyAPI(self.galaxy, api_server) - if not self.api: - raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server) + if self.action in ("import","info","install","search","login","setup","delete") or \ + (self.action == 'init' and not self.options.offline): + self.api = GalaxyAPI(self.galaxy) self.execute() @@ -188,7 +251,7 @@ def execute_init(self): "however it will reset any main.yml files that may have\n" "been modified there already." % role_path) - # create the default README.md + # create default README.md if not os.path.exists(role_path): os.makedirs(role_path) readme_path = os.path.join(role_path, "README.md") @@ -196,9 +259,16 @@ def execute_init(self): f.write(self.galaxy.default_readme) f.close() + # create default .travis.yml + travis = Environment().from_string(self.galaxy.default_travis).render() + f = open(os.path.join(role_path, '.travis.yml'), 'w') + f.write(travis) + f.close() + for dir in GalaxyRole.ROLE_DIRS: dir_path = os.path.join(init_path, role_name, dir) main_yml_path = os.path.join(dir_path, 'main.yml') + # create the directory if it doesn't exist already if not os.path.exists(dir_path): os.makedirs(dir_path) @@ -234,6 +304,20 @@ def execute_init(self): f.write(rendered_meta) f.close() pass + elif dir == "tests": + # create tests/test.yml + inject = dict( + role_name = role_name + ) + playbook = Environment().from_string(self.galaxy.default_test).render(inject) + f = open(os.path.join(dir_path, 'test.yml'), 'w') + f.write(playbook) + f.close() + + # create tests/inventory + f = open(os.path.join(dir_path, 'inventory'), 'w') + f.write('localhost') + f.close() elif dir not in ('files','templates'): # just write a (mostly) empty YAML file for main.yml f = open(main_yml_path, 'w') @@ -325,7 +409,7 @@ def execute_install(self): for role in required_roles: role = RoleRequirement.role_yaml_parse(role) - display.debug('found role %s in yaml file' % str(role)) + display.vvv('found role %s in yaml file' % str(role)) if 'name' not in role and 'scm' not in role: raise AnsibleError("Must specify name or src for role") roles_left.append(GalaxyRole(self.galaxy, **role)) @@ -348,7 +432,7 @@ def execute_install(self): roles_left.append(GalaxyRole(self.galaxy, rname.strip())) for role in roles_left: - display.debug('Installing role %s ' % role.name) + display.vvv('Installing role %s ' % role.name) # query the galaxy API for the role data if role.install_info is not None and not force: @@ -458,21 +542,189 @@ def execute_list(self): return 0 def execute_search(self): - + page_size = 1000 search = None - if len(self.args) > 1: - raise AnsibleOptionsError("At most a single search term is allowed.") - elif len(self.args) == 1: - search = self.args.pop() - - response = self.api.search_roles(search, self.options.platforms, self.options.tags) - - if 'count' in response: - display.display("Found %d roles matching your search:\n" % response['count']) + + if len(self.args): + terms = [] + for i in range(len(self.args)): + terms.append(self.args.pop()) + search = '+'.join(terms) + + if not search and not self.options.platforms and not self.options.tags and not self.options.author: + raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") + + response = self.api.search_roles(search, platforms=self.options.platforms, + tags=self.options.tags, author=self.options.author, page_size=page_size) + + if response['count'] == 0: + display.display("No roles match your search.", color="yellow") + return True data = '' - if 'results' in response: - for role in response['results']: - data += self._display_role_info(role) + if response['count'] > page_size: + data += ("Found %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) + else: + data += ("Found %d roles matching your search:\n" % response['count']) + + max_len = [] + for role in response['results']: + max_len.append(len(role['username'] + '.' + role['name'])) + name_len = max(max_len) + format_str = " %%-%ds %%s\n" % name_len + data +='\n' + data += (format_str % ("Name", "Description")) + data += (format_str % ("----", "-----------")) + for role in response['results']: + data += (format_str % (role['username'] + '.' + role['name'],role['description'])) + self.pager(data) + + return True + + def execute_login(self): + """ + Verify user's identify via Github and retreive an auth token from Galaxy. + """ + # Authenticate with github and retrieve a token + if self.options.token is None: + login = GalaxyLogin(self.galaxy) + github_token = login.create_github_token() + else: + github_token = self.options.token + + galaxy_response = self.api.authenticate(github_token) + + if self.options.token is None: + # Remove the token we created + login.remove_github_token() + + # Store the Galaxy token + token = GalaxyToken() + token.set(galaxy_response['token']) + + display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username']) + return 0 + + def execute_import(self): + """ + Import a role into Galaxy + """ + + colors = { + 'INFO': 'normal', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'SUCCESS': 'green', + 'FAILED': 'red' + } + + if len(self.args) < 2: + raise AnsibleError("Expected a github_username and github_repository. Use --help.") + + github_repo = self.args.pop() + github_user = self.args.pop() + + if self.options.check_status: + task = self.api.get_import_task(github_user=github_user, github_repo=github_repo) + else: + # Submit an import request + task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) + + if len(task) > 1: + # found multiple roles associated with github_user/github_repo + display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), + color='yellow') + display.display("The following Galaxy roles are being updated:" + u'\n', color='yellow') + for t in task: + display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color='yellow') + display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), + color='yellow') + return 0 + # found a single role as expected + display.display("Successfully submitted import request %d" % task[0]['id']) + if not self.options.wait: + display.display("Role name: %s" % task[0]['summary_fields']['role']['name']) + display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo'])) + + if self.options.check_status or self.options.wait: + # Get the status of the import + msg_list = [] + finished = False + while not finished: + task = self.api.get_import_task(task_id=task[0]['id']) + for msg in task[0]['summary_fields']['task_messages']: + if msg['id'] not in msg_list: + display.display(msg['message_text'], color=colors[msg['message_type']]) + msg_list.append(msg['id']) + if task[0]['state'] in ['SUCCESS', 'FAILED']: + finished = True + else: + time.sleep(10) + + return 0 + + def execute_setup(self): + """ + Setup an integration from Github or Travis + """ + + if self.options.setup_list: + # List existing integration secrets + secrets = self.api.list_secrets() + if len(secrets) == 0: + # None found + display.display("No integrations found.") + return 0 + display.display(u'\n' + "ID Source Repo", color="green") + display.display("---------- ---------- ----------", color="green") + for secret in secrets: + display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], + secret['github_repo']),color="green") + return 0 + + if self.options.remove_id: + # Remove a secret + self.api.remove_secret(self.options.remove_id) + display.display("Secret removed. Integrations using this secret will not longer work.", color="green") + return 0 + + if len(self.args) < 4: + raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") + return 0 + + secret = self.args.pop() + github_repo = self.args.pop() + github_user = self.args.pop() + source = self.args.pop() + + resp = self.api.add_secret(source, github_user, github_repo, secret) + display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo'])) + + return 0 + + def execute_delete(self): + """ + Delete a role from galaxy.ansible.com + """ + + if len(self.args) < 2: + raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") + + github_repo = self.args.pop() + github_user = self.args.pop() + resp = self.api.delete_role(github_user, github_repo) + + if len(resp['deleted_roles']) > 1: + display.display("Deleted the following roles:") + display.display("ID User Name") + display.display("------ --------------- ----------") + for role in resp['deleted_roles']: + display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) + + display.display(resp['status']) + + return True + + diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 0f809db7297da7..ae10c5e9a42617 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -255,7 +255,8 @@ def load_config_file(): PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True) # galaxy related -DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com') +GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com') +GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True) # this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True) diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py index 00d8c25aecf90b..62823fced47f8d 100644 --- a/lib/ansible/galaxy/__init__.py +++ b/lib/ansible/galaxy/__init__.py @@ -52,6 +52,8 @@ def __init__(self, options): #TODO: move to getter for lazy loading self.default_readme = self._str_from_data_file('readme') self.default_meta = self._str_from_data_file('metadata_template.j2') + self.default_test = self._str_from_data_file('test_playbook.j2') + self.default_travis = self._str_from_data_file('travis.j2') def add_role(self, role): self.roles[role.name] = role diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index 2918688406f4d5..c1bf2c4ed50b49 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -25,11 +25,15 @@ __metaclass__ = type import json +import urllib + from urllib2 import quote as urlquote, HTTPError from urlparse import urlparse +import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url +from ansible.galaxy.token import GalaxyToken try: from __main__ import display @@ -43,45 +47,113 @@ class GalaxyAPI(object): SUPPORTED_VERSIONS = ['v1'] - def __init__(self, galaxy, api_server): + def __init__(self, galaxy): self.galaxy = galaxy - - try: - urlparse(api_server, scheme='https') - except: - raise AnsibleError("Invalid server API url passed: %s" % api_server) - - server_version = self.get_server_api_version('%s/api/' % (api_server)) - if not server_version: - raise AnsibleError("Could not retrieve server API version: %s" % api_server) - + self.token = GalaxyToken() + self._api_server = C.GALAXY_SERVER + self._validate_certs = C.GALAXY_IGNORE_CERTS + + # set validate_certs + if galaxy.options.validate_certs == False: + self._validate_certs = False + display.vvv('Check for valid certs: %s' % self._validate_certs) + + # set the API server + if galaxy.options.api_server != C.GALAXY_SERVER: + self._api_server = galaxy.options.api_server + display.vvv("Connecting to galaxy_server: %s" % self._api_server) + + server_version = self.get_server_api_version() + if server_version in self.SUPPORTED_VERSIONS: - self.baseurl = '%s/api/%s' % (api_server, server_version) + self.baseurl = '%s/api/%s' % (self._api_server, server_version) self.version = server_version # for future use - display.vvvvv("Base API: %s" % self.baseurl) + display.vvv("Base API: %s" % self.baseurl) else: raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) - def get_server_api_version(self, api_server): + def __auth_header(self): + token = self.token.get() + if token is None: + raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.") + return {'Authorization': 'Token ' + token} + + def __call_galaxy(self, url, args=None, headers=None, method=None): + if args and not headers: + headers = self.__auth_header() + try: + display.vvv(url) + resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method) + data = json.load(resp) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['detail']) + return data + + @property + def api_server(self): + return self._api_server + + @property + def validate_certs(self): + return self._validate_certs + + def get_server_api_version(self): """ Fetches the Galaxy API current version to ensure the API server is up and reachable. """ - #TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1) - # also should set baseurl using supported_versions which has path - return 'v1' - try: - data = json.load(open_url(api_server, validate_certs=self.galaxy.options.validate_certs)) - return data.get("current_version", 'v1') - except Exception: - # TODO: report error - return None + url = '%s/api/' % self._api_server + data = json.load(open_url(url, validate_certs=self._validate_certs)) + return data['current_version'] + except Exception as e: + raise AnsibleError("The API server (%s) is not responding, please try again later." % url) + + def authenticate(self, github_token): + """ + Retrieve an authentication token + """ + url = '%s/tokens/' % self.baseurl + args = urllib.urlencode({"github_token": github_token}) + resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST") + data = json.load(resp) + return data + + def create_import_task(self, github_user, github_repo, reference=None): + """ + Post an import request + """ + url = '%s/imports/' % self.baseurl + args = urllib.urlencode({ + "github_user": github_user, + "github_repo": github_repo, + "github_reference": reference if reference else "" + }) + data = self.__call_galaxy(url, args=args) + if data.get('results', None): + return data['results'] + return data + def get_import_task(self, task_id=None, github_user=None, github_repo=None): + """ + Check the status of an import task. + """ + url = '%s/imports/' % self.baseurl + if not task_id is None: + url = "%s?id=%d" % (url,task_id) + elif not github_user is None and not github_repo is None: + url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo) + else: + raise AnsibleError("Expected task_id or github_user and github_repo") + + data = self.__call_galaxy(url) + return data['results'] + def lookup_role_by_name(self, role_name, notify=True): """ - Find a role by name + Find a role by name. """ role_name = urlquote(role_name) @@ -92,18 +164,12 @@ def lookup_role_by_name(self, role_name, notify=True): if notify: display.display("- downloading role '%s', owned by %s" % (role_name, user_name)) except: - raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name) + raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name) url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name) - display.vvvv("- %s" % (url)) - try: - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) - if len(data["results"]) != 0: - return data["results"][0] - except: - # TODO: report on connection/availability errors - pass - + data = self.__call_galaxy(url) + if len(data["results"]) != 0: + return data["results"][0] return None def fetch_role_related(self, related, role_id): @@ -114,13 +180,12 @@ def fetch_role_related(self, related, role_id): try: url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results = data['results'] done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) - display.display(url) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results @@ -131,10 +196,9 @@ def get_list(self, what): """ Fetch the list of items specified. """ - try: url = '%s/%s/?page_size' % (self.baseurl, what) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) if "results" in data: results = data['results'] else: @@ -144,41 +208,64 @@ def get_list(self, what): done = (data.get('next', None) is None) while not done: url = '%s%s' % (self.baseurl, data['next']) - display.display(url) - data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs)) + data = self.__call_galaxy(url) results += data['results'] done = (data.get('next', None) is None) return results except Exception as error: raise AnsibleError("Failed to download the %s list: %s" % (what, str(error))) - def search_roles(self, search, platforms=None, tags=None): + def search_roles(self, search, **kwargs): - search_url = self.baseurl + '/roles/?page=1' + search_url = self.baseurl + '/search/roles/?' if search: - search_url += '&search=' + urlquote(search) + search_url += '&autocomplete=' + urlquote(search) + + tags = kwargs.get('tags',None) + platforms = kwargs.get('platforms', None) + page_size = kwargs.get('page_size', None) + author = kwargs.get('author', None) - if tags is None: - tags = [] - elif isinstance(tags, basestring): + if tags and isinstance(tags, basestring): tags = tags.split(',') + search_url += '&tags_autocomplete=' + '+'.join(tags) + + if platforms and isinstance(platforms, basestring): + platforms = platforms.split(',') + search_url += '&platforms_autocomplete=' + '+'.join(platforms) - for tag in tags: - search_url += '&chain__tags__name=' + urlquote(tag) + if page_size: + search_url += '&page_size=%s' % page_size - if platforms is None: - platforms = [] - elif isinstance(platforms, basestring): - platforms = platforms.split(',') + if author: + search_url += '&username_autocomplete=%s' % author + + data = self.__call_galaxy(search_url) + return data - for plat in platforms: - search_url += '&chain__platforms__name=' + urlquote(plat) + def add_secret(self, source, github_user, github_repo, secret): + url = "%s/notification_secrets/" % self.baseurl + args = urllib.urlencode({ + "source": source, + "github_user": github_user, + "github_repo": github_repo, + "secret": secret + }) + data = self.__call_galaxy(url, args=args) + return data - display.debug("Executing query: %s" % search_url) - try: - data = json.load(open_url(search_url, validate_certs=self.galaxy.options.validate_certs)) - except HTTPError as e: - raise AnsibleError("Unsuccessful request to server: %s" % str(e)) + def list_secrets(self): + url = "%s/notification_secrets" % self.baseurl + data = self.__call_galaxy(url, headers=self.__auth_header()) + return data + + def remove_secret(self, secret_id): + url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id) + data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') + return data + def delete_role(self, github_user, github_repo): + url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo) + data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE') return data diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2 index c618adb3d4b8a2..1054c64bdfa44b 100644 --- a/lib/ansible/galaxy/data/metadata_template.j2 +++ b/lib/ansible/galaxy/data/metadata_template.j2 @@ -2,9 +2,11 @@ galaxy_info: author: {{ author }} description: {{description}} company: {{ company }} + # If the issue tracker for your role is not on github, uncomment the # next line and provide a value # issue_tracker_url: {{ issue_tracker_url }} + # Some suggested licenses: # - BSD (default) # - MIT @@ -13,7 +15,17 @@ galaxy_info: # - Apache # - CC-BY license: {{ license }} + min_ansible_version: {{ min_ansible_version }} + + # Optionally specify the branch Galaxy will use when accessing the GitHub + # repo for this role. During role install, if no tags are available, + # Galaxy will use this branch. During import Galaxy will access files on + # this branch. If travis integration is cofigured, only notification for this + # branch will be accepted. Otherwise, in all cases, the repo's default branch + # (usually master) will be used. + #github_branch: + # # Below are all platforms currently available. Just uncomment # the ones that apply to your role. If you don't see your @@ -28,6 +40,7 @@ galaxy_info: # - {{ version }} {%- endfor %} {%- endfor %} + galaxy_tags: [] # List tags for your role here, one per line. A tag is # a keyword that describes and categorizes the role. @@ -36,6 +49,7 @@ galaxy_info: # # NOTE: A tag is limited to a single word comprised of # alphanumeric characters. Maximum 20 tags per role. + dependencies: [] # List your role dependencies here, one per line. # Be sure to remove the '[]' above if you add dependencies diff --git a/lib/ansible/galaxy/data/test_playbook.j2 b/lib/ansible/galaxy/data/test_playbook.j2 new file mode 100644 index 00000000000000..45824f60519e64 --- /dev/null +++ b/lib/ansible/galaxy/data/test_playbook.j2 @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - {{ role_name }} \ No newline at end of file diff --git a/lib/ansible/galaxy/data/travis.j2 b/lib/ansible/galaxy/data/travis.j2 new file mode 100644 index 00000000000000..36bbf6208cfd42 --- /dev/null +++ b/lib/ansible/galaxy/data/travis.j2 @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/lib/ansible/galaxy/login.py b/lib/ansible/galaxy/login.py new file mode 100644 index 00000000000000..3edaed7bc70a38 --- /dev/null +++ b/lib/ansible/galaxy/login.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2015, Chris Houseknecht +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import getpass +import json +import urllib + +from urllib2 import quote as urlquote, HTTPError +from urlparse import urlparse + +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.module_utils.urls import open_url +from ansible.utils.color import stringc + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + +class GalaxyLogin(object): + ''' Class to handle authenticating user with Galaxy API prior to performing CUD operations ''' + + GITHUB_AUTH = 'https://api.github.com/authorizations' + + def __init__(self, galaxy, github_token=None): + self.galaxy = galaxy + self.github_username = None + self.github_password = None + + if github_token == None: + self.get_credentials() + + def get_credentials(self): + display.display(u'\n\n' + "We need your " + stringc("Github login",'bright cyan') + + " to identify you.", screen_only=True) + display.display("This information will " + stringc("not be sent to Galaxy",'bright cyan') + + ", only to " + stringc("api.github.com.","yellow"), screen_only=True) + display.display("The password will not be displayed." + u'\n\n', screen_only=True) + display.display("Use " + stringc("--github-token",'yellow') + + " if you do not want to enter your password." + u'\n\n', screen_only=True) + + try: + self.github_username = raw_input("Github Username: ") + except: + pass + + try: + self.github_password = getpass.getpass("Password for %s: " % self.github_username) + except: + pass + + if not self.github_username or not self.github_password: + raise AnsibleError("Invalid Github credentials. Username and password are required.") + + def remove_github_token(self): + ''' + If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot + retrieve the token after creation, so we are forced to create a new one. + ''' + try: + tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, + url_password=self.github_password, force_basic_auth=True,)) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + + for token in tokens: + if token['note'] == 'ansible-galaxy login': + display.vvvvv('removing token: %s' % token['token_last_eight']) + try: + open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username, + url_password=self.github_password, method='DELETE', force_basic_auth=True,) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + + def create_github_token(self): + ''' + Create a personal authorization token with a note of 'ansible-galaxy login' + ''' + self.remove_github_token() + args = json.dumps({"scopes":["public_repo"], "note":"ansible-galaxy login"}) + try: + data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, + url_password=self.github_password, force_basic_auth=True, data=args)) + except HTTPError as e: + res = json.load(e) + raise AnsibleError(res['message']) + return data['token'] diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py index dc9da5d79cee29..36b1e0fbbba591 100644 --- a/lib/ansible/galaxy/role.py +++ b/lib/ansible/galaxy/role.py @@ -46,7 +46,7 @@ class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = os.path.join('meta', 'main.yml') META_INSTALL = os.path.join('meta', '.galaxy_install_info') - ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') + ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests') def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None): @@ -198,10 +198,10 @@ def install(self): role_data = self.src tmp_file = self.fetch(role_data) else: - api = GalaxyAPI(self.galaxy, self.options.api_server) + api = GalaxyAPI(self.galaxy) role_data = api.lookup_role_by_name(self.src) if not role_data: - raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.options.api_server)) + raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server)) role_versions = api.fetch_role_related('versions', role_data['id']) if not self.version: @@ -213,8 +213,10 @@ def install(self): loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] loose_versions.sort() self.version = str(loose_versions[-1]) + elif role_data.get('github_branch', None): + self.version = role_data['github_branch'] else: - self.version = 'master' + self.version = 'master' elif self.version != 'master': if role_versions and self.version not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py new file mode 100644 index 00000000000000..02ca8330697ba5 --- /dev/null +++ b/lib/ansible/galaxy/token.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + +######################################################################## +# +# (C) 2015, Chris Houseknecht +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +######################################################################## +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os +import yaml +from stat import * + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + display = Display() + + +class GalaxyToken(object): + ''' Class to storing and retrieving token in ~/.ansible_galaxy ''' + + def __init__(self): + self.file = os.path.expanduser("~") + '/.ansible_galaxy' + self.config = yaml.safe_load(self.__open_config_for_read()) + if not self.config: + self.config = {} + + def __open_config_for_read(self): + if os.path.isfile(self.file): + display.vvv('Opened %s' % self.file) + return open(self.file, 'r') + # config.yml not found, create and chomd u+rw + f = open(self.file,'w') + f.close() + os.chmod(self.file,S_IRUSR|S_IWUSR) # owner has +rw + display.vvv('Created %s' % self.file) + return open(self.file, 'r') + + def set(self, token): + self.config['token'] = token + self.save() + + def get(self): + return self.config.get('token', None) + + def save(self): + with open(self.file,'w') as f: + yaml.safe_dump(self.config,f,default_flow_style=False) + \ No newline at end of file From 04fc3f118f5989df4c2ba462d86a75d0b72fc50a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 08:23:45 -0800 Subject: [PATCH 3062/3617] Code smell test for specifying both required and default in FieldAttributes --- .travis.yml | 1 + test/code-smell/required-and-default-attributes.sh | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100755 test/code-smell/required-and-default-attributes.sh diff --git a/.travis.yml b/.travis.yml index 1ff0ca118d4547..603132f722c223 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,7 @@ script: - ./test/code-smell/replace-urlopen.sh . - ./test/code-smell/use-compat-six.sh lib - ./test/code-smell/boilerplate.sh +- ./test/code-smell/required-and-default-attributes.sh - if test x"$TOXENV" != x'py24' ; then tox ; fi - if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi #- make -C docsite all diff --git a/test/code-smell/required-and-default-attributes.sh b/test/code-smell/required-and-default-attributes.sh new file mode 100755 index 00000000000000..9822a1559732b4 --- /dev/null +++ b/test/code-smell/required-and-default-attributes.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +BASEDIR=${1-"lib/ansible"} +cd "$BASEDIR" +grep -r FieldAttribute . |grep 'default' | grep 'required' +if test $? -eq 0 ; then + exit 1 +fi +exit 0 + From c64298de02a9998d6c5774ccb1f92a9aec435d74 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:22:58 -0800 Subject: [PATCH 3063/3617] Revert "avoid persistent containers in attribute defaults" This reverts commit 87969868d42cd8aba1c65c8207d059d73407373b. found better way to do it --- lib/ansible/playbook/attribute.py | 11 ----------- lib/ansible/playbook/block.py | 6 +++--- lib/ansible/playbook/conditional.py | 2 +- lib/ansible/playbook/play.py | 16 ++++++++-------- lib/ansible/playbook/play_context.py | 4 ++-- lib/ansible/playbook/playbook_include.py | 2 +- lib/ansible/playbook/role/metadata.py | 2 +- lib/ansible/playbook/taggable.py | 2 +- lib/ansible/playbook/task.py | 2 +- 9 files changed, 18 insertions(+), 29 deletions(-) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index ce7ed6d8fe7b97..703d9dbca1e831 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -32,17 +32,6 @@ def __init__(self, isa=None, private=False, default=None, required=False, listof self.priority = priority self.always_post_validate = always_post_validate - # This is here to avoid `default=` unwanted persistence across object instances - # We cannot rely on None as some fields use it to skip the code - # that would detect an empty container as a user error - if self.default == '_ansible_container': - if self.isa == 'list': - self.default = [] - elif self.isa == 'dict': - self.default = {} - elif self.isa == 'set': - self.default = set() - def __eq__(self, other): return other.priority == self.priority diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py index 66009b028afb25..f2d9c82833a065 100644 --- a/lib/ansible/playbook/block.py +++ b/lib/ansible/playbook/block.py @@ -30,9 +30,9 @@ class Block(Base, Become, Conditional, Taggable): - _block = FieldAttribute(isa='list', default='_ansible_container') - _rescue = FieldAttribute(isa='list', default='_ansible_container') - _always = FieldAttribute(isa='list', default='_ansible_container') + _block = FieldAttribute(isa='list', default=[]) + _rescue = FieldAttribute(isa='list', default=[]) + _always = FieldAttribute(isa='list', default=[]) _delegate_to = FieldAttribute(isa='list') _delegate_facts = FieldAttribute(isa='bool', default=False) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index a5b3ca725f88c8..fc178e2fa1df83 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -33,7 +33,7 @@ class Conditional: to be run conditionally when a condition is met or skipped. ''' - _when = FieldAttribute(isa='list', default='_ansible_container') + _when = FieldAttribute(isa='list', default=[]) def __init__(self, loader=None): # when used directly, this class needs a loader, but we want to diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index e08c8c600163a3..ed61416e951116 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,22 +64,22 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default='_ansible_container', required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes - _vars_files = FieldAttribute(isa='list', default='_ansible_container', priority=99) - _vars_prompt = FieldAttribute(isa='list', default='_ansible_container', always_post_validate=True) + _vars_files = FieldAttribute(isa='list', default=[], priority=99) + _vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True) _vault_password = FieldAttribute(isa='string', always_post_validate=True) # Role Attributes - _roles = FieldAttribute(isa='list', default='_ansible_container', priority=90) + _roles = FieldAttribute(isa='list', default=[], priority=90) # Block (Task) Lists Attributes - _handlers = FieldAttribute(isa='list', default='_ansible_container') - _pre_tasks = FieldAttribute(isa='list', default='_ansible_container') - _post_tasks = FieldAttribute(isa='list', default='_ansible_container') - _tasks = FieldAttribute(isa='list', default='_ansible_container') + _handlers = FieldAttribute(isa='list', default=[]) + _pre_tasks = FieldAttribute(isa='list', default=[]) + _post_tasks = FieldAttribute(isa='list', default=[]) + _tasks = FieldAttribute(isa='list', default=[]) # Flag/Setting Attributes _any_errors_fatal = FieldAttribute(isa='bool', default=False, always_post_validate=True) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index da291c3c83470b..81223500adf879 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -171,8 +171,8 @@ class PlayContext(Base): # general flags _verbosity = FieldAttribute(isa='int', default=0) - _only_tags = FieldAttribute(isa='set', default='_ansible_container') - _skip_tags = FieldAttribute(isa='set', default='_ansible_container') + _only_tags = FieldAttribute(isa='set', default=set()) + _skip_tags = FieldAttribute(isa='set', default=set()) _check_mode = FieldAttribute(isa='bool', default=False) _force_handlers = FieldAttribute(isa='bool', default=False) _start_at_task = FieldAttribute(isa='string') diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py index 52081c415394c5..d9af2ba52379bc 100644 --- a/lib/ansible/playbook/playbook_include.py +++ b/lib/ansible/playbook/playbook_include.py @@ -35,7 +35,7 @@ class PlaybookInclude(Base, Conditional, Taggable): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') - _vars = FieldAttribute(isa='dict', default='_ansible_container') + _vars = FieldAttribute(isa='dict', default=dict()) @staticmethod def load(data, basedir, variable_manager=None, loader=None): diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py index 4bb7d0ce02bbd8..58b59145a1cc1b 100644 --- a/lib/ansible/playbook/role/metadata.py +++ b/lib/ansible/playbook/role/metadata.py @@ -40,7 +40,7 @@ class RoleMetadata(Base): ''' _allow_duplicates = FieldAttribute(isa='bool', default=False) - _dependencies = FieldAttribute(isa='list', default='_ansible_container') + _dependencies = FieldAttribute(isa='list', default=[]) _galaxy_info = FieldAttribute(isa='GalaxyInfo') def __init__(self, owner=None): diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py index 37e3261e80da71..8f5cfa093446a7 100644 --- a/lib/ansible/playbook/taggable.py +++ b/lib/ansible/playbook/taggable.py @@ -29,7 +29,7 @@ class Taggable: untagged = frozenset(['untagged']) - _tags = FieldAttribute(isa='list', default='_ansible_container', listof=(string_types,int)) + _tags = FieldAttribute(isa='list', default=[], listof=(string_types,int)) def __init__(self): super(Taggable, self).__init__() diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 53a9a3c3931231..17f1952e39c4f5 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -64,7 +64,7 @@ class Task(Base, Conditional, Taggable, Become): # will be used if defined # might be possible to define others - _args = FieldAttribute(isa='dict', default='_ansible_container') + _args = FieldAttribute(isa='dict', default=dict()) _action = FieldAttribute(isa='string') _any_errors_fatal = FieldAttribute(isa='bool') From 2820b4c243d50416f661c4ea9408bba1918244bb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:23:45 -0800 Subject: [PATCH 3064/3617] removed default from hosts to make it requried prevents writing a play w/o a hosts entry which would default to all/empty --- lib/ansible/playbook/play.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py index ed61416e951116..bc0331486460ba 100644 --- a/lib/ansible/playbook/play.py +++ b/lib/ansible/playbook/play.py @@ -64,7 +64,7 @@ class Play(Base, Taggable, Become): # Connection _gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True) - _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True) + _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True) _name = FieldAttribute(isa='string', default='', always_post_validate=True) # Variable Attributes From 2bfb13bfb39bf31c5c1bc40f376907fc50ca69ef Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:28:54 -0800 Subject: [PATCH 3065/3617] removed unused 'pattern' from ansible.cfg also moved the config param to a 'deprecated' list in constants.py added TODO for producing a deprecation warning for such vars --- examples/ansible.cfg | 1 - lib/ansible/constants.py | 8 ++++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index 87c089f45ae420..ec3ddf20641301 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -14,7 +14,6 @@ #inventory = /etc/ansible/hosts #library = /usr/share/my_modules/ #remote_tmp = $HOME/.ansible/tmp -#pattern = * #forks = 5 #poll_interval = 15 #sudo_user = root diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index ae10c5e9a42617..7f74358dd5dca9 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -120,16 +120,20 @@ def load_config_file(): # sections in config file DEFAULTS='defaults' +# FIXME: add deprecation warning when these get set +#### DEPRECATED VARS #### +# use more sanely named 'inventory' DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True) +# this is not used since 0.5 but people might still have in config +DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None) -# generally configurable things +#### GENERALLY CONFIGURABLE THINGS #### DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True) DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True) DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True) DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True) DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp') DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command') -DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*') DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True) DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '') DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8')) From ae2447df9136353453c9ed48d44b2c7fa70231b0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 08:38:53 -0800 Subject: [PATCH 3066/3617] attribute defaults that are containers are a copy This is simpler way to prevent persistent containers across instances of classes that use field attributes --- lib/ansible/playbook/attribute.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py index 703d9dbca1e831..0befb9d80df523 100644 --- a/lib/ansible/playbook/attribute.py +++ b/lib/ansible/playbook/attribute.py @@ -19,6 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from copy import deepcopy class Attribute: @@ -32,6 +33,11 @@ def __init__(self, isa=None, private=False, default=None, required=False, listof self.priority = priority self.always_post_validate = always_post_validate + if default is not None and self.isa in ('list', 'dict', 'set'): + self.default = deepcopy(default) + else: + self.default = default + def __eq__(self, other): return other.priority == self.priority From 0211da2fe9a7b3cefa79d72aab599546bf923e1b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 08:44:09 -0800 Subject: [PATCH 3067/3617] Clarify language of delegate_facts documentation --- docsite/rst/playbooks_delegation.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index 4e2e8c372ac41a..c715adea361457 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -137,8 +137,8 @@ Delegated facts .. versionadded:: 2.0 -Before 2.0 any facts gathered by a delegated task were assigned to the `inventory_hostname` (current host) instead of the host which actually produced the facts (delegated to host). -The new directive `delegate_facts` if set to `True` will assing the task's gathered facts to the delegated host instead of the current one.:: +By default, any fact gathered by a delegated task are assigned to the `inventory_hostname` (the current host) instead of the host which actually produced the facts (the delegated to host). +In 2.0, the directive `delegate_facts` may be set to `True` to assign the task's gathered facts to the delegated host instead of the current one.:: - hosts: app_servers @@ -149,8 +149,8 @@ The new directive `delegate_facts` if set to `True` will assing the task's gathe delegate_facts: True with_items: "{{groups['dbservers'}}" -The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers, -that way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. +The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers. +This way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`. .. _run_once: From 7936a4687e9be3752bdbee006d956ed4f2687160 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 10:01:21 -0800 Subject: [PATCH 3068/3617] adhoc avoids callbacks by default as it did before Previous emptying of whitelist only affected callbacks that were constructed for need whitelist. This now works for all callbacks. --- lib/ansible/cli/adhoc.py | 4 +--- lib/ansible/executor/task_queue_manager.py | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 912b07a5c72d04..f6dcb37a8ab83a 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,9 +163,6 @@ def run(self): else: cb = 'minimal' - if not C.DEFAULT_LOAD_CALLBACK_PLUGINS: - C.DEFAULT_CALLBACK_WHITELIST = [] - if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree @@ -180,6 +177,7 @@ def run(self): options=self.options, passwords=passwords, stdout_callback=cb, + run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, ) result = self._tqm.run(play) finally: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index d665000046ca8c..70cefee510bb84 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -56,7 +56,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None): + def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True): self._inventory = inventory self._variable_manager = variable_manager @@ -65,6 +65,7 @@ def __init__(self, inventory, variable_manager, loader, options, passwords, stdo self._stats = AggregateStats() self.passwords = passwords self._stdout_callback = stdout_callback + self._run_additional_callbacks = run_additional_callbacks self._callbacks_loaded = False self._callback_plugins = [] @@ -159,7 +160,7 @@ def load_callbacks(self): if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True - elif callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST): + elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)): continue self._callback_plugins.append(callback_plugin()) From 04d74fd6804b5a851cc8762cecf07b100e4dcc6f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 10:13:50 -0800 Subject: [PATCH 3069/3617] reenabled --tree for ansible adhoc command previous fix to avoid callbacks now conflicted with tree optoin which is implemented as a callback in 2.0 --- lib/ansible/cli/adhoc.py | 3 +++ lib/ansible/executor/task_queue_manager.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index f6dcb37a8ab83a..3de0e55b7bb4ab 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -163,9 +163,11 @@ def run(self): else: cb = 'minimal' + run_tree=False if self.options.tree: C.DEFAULT_CALLBACK_WHITELIST.append('tree') C.TREE_DIR = self.options.tree + run_tree=True # now create a task queue manager to execute the play self._tqm = None @@ -178,6 +180,7 @@ def run(self): passwords=passwords, stdout_callback=cb, run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS, + run_tree=run_tree, ) result = self._tqm.run(play) finally: diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 70cefee510bb84..74111382935de4 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -56,7 +56,7 @@ class TaskQueueManager: which dispatches the Play's tasks to hosts. ''' - def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True): + def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False): self._inventory = inventory self._variable_manager = variable_manager @@ -66,6 +66,7 @@ def __init__(self, inventory, variable_manager, loader, options, passwords, stdo self.passwords = passwords self._stdout_callback = stdout_callback self._run_additional_callbacks = run_additional_callbacks + self._run_tree = run_tree self._callbacks_loaded = False self._callback_plugins = [] @@ -160,6 +161,8 @@ def load_callbacks(self): if callback_name != self._stdout_callback or stdout_callback_loaded: continue stdout_callback_loaded = True + elif callback_name == 'tree' and self._run_tree: + pass elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)): continue From 14e19c239d610619498f06978e2841764a262e15 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 9 Dec 2015 14:51:43 -0500 Subject: [PATCH 3070/3617] Make on_file_diff callback item-aware --- lib/ansible/plugins/callback/__init__.py | 6 +++++- lib/ansible/plugins/callback/default.py | 9 ++++++++- lib/ansible/plugins/callback/skippy.py | 9 ++++++++- lib/ansible/plugins/strategy/__init__.py | 2 +- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 03eb58d99db65d..b8a48943f28661 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -59,6 +59,10 @@ def __init__(self, display=None): version = getattr(self, 'CALLBACK_VERSION', '1.0') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) + def _copy_result(self, result): + ''' helper for callbacks, so they don't all have to include deepcopy ''' + return deepcopy(result) + def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result")) @@ -126,7 +130,7 @@ def _get_item(self, result): def _process_items(self, result): for res in result._result['results']: - newres = deepcopy(result) + newres = self._copy_result(result) res['item'] = self._get_item(res) newres._result = res if 'failed' in res and res['failed']: diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 3175bf3e53c3e9..1f37f4b975e0f9 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -134,7 +134,14 @@ def v2_playbook_on_play_start(self, play): self._display.banner(msg) def v2_on_file_diff(self, result): - if 'diff' in result._result and result._result['diff']: + if result._task.loop and 'results' in result._result: + for res in result._result['results']: + newres = self._copy_result(result) + res['item'] = self._get_item(res) + newres._result = res + + self.v2_on_file_diff(newres) + elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) def v2_playbook_item_on_ok(self, result): diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 15b7d3387c2849..495943417fd996 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -123,7 +123,14 @@ def v2_playbook_on_play_start(self, play): self._display.banner(msg) def v2_on_file_diff(self, result): - if 'diff' in result._result and result._result['diff']: + if result._task.loop and 'results' in result._result: + for res in result._result['results']: + newres = self._copy_result(result) + res['item'] = self._get_item(res) + newres._result = res + + self.v2_on_file_diff(newres) + elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) def v2_playbook_item_on_ok(self, result): diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 732a9293d282c8..15636b580d1c37 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -221,7 +221,7 @@ def _process_pending_results(self, iterator): self._tqm._stats.increment('changed', host.name) self._tqm.send_callback('v2_runner_on_ok', task_result) - if self._diff and 'diff' in task_result._result: + if self._diff: self._tqm.send_callback('v2_on_file_diff', task_result) self._pending_results -= 1 From 61dc4a7e67bcb7c968e273ee39618d1f76f7ab9e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 12:10:21 -0800 Subject: [PATCH 3071/3617] Update module refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 191347676eea08..0b5555b62cd8d9 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 191347676eea08817da3fb237f24cdbf2d16e307 +Subproject commit 0b5555b62cd8d91fb4fa434217671f3acaebbf5a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index a10bdd6be948d3..cbed642009497d 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit a10bdd6be948d3aa5fad7ff4959908d6e78e0528 +Subproject commit cbed642009497ddaf19b5f578ab6c78da1356eda From 64864829c4a858e296b049075675e960de678690 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 9 Dec 2015 12:37:56 -0800 Subject: [PATCH 3072/3617] changed deprecation to removal warning --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index d7d0f03fb1fdd2..3c1331e7065c2f 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -388,7 +388,7 @@ def _split_subscript(self, pattern): end = -1 subscript = (int(start), int(end)) if sep == '-': - display.deprecated("Use [x:y] inclusive subscripts instead of [x-y]", version=2.0, removed=True) + display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed") return (pattern, subscript) From 07bf4d9ac4899eb2e0e8246530ff2ca3ee75f3ef Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 9 Dec 2015 15:48:53 -0500 Subject: [PATCH 3073/3617] added winrm CP notes to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d31ef4ebb2524..2bf11e6c5bc498 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -85,6 +85,8 @@ newline being stripped you can change your playbook like this: ###Plugins * Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases +* WinRM connection plugin passes all vars named `ansible_winrm_*` to the underlying pywinrm client. This allows, for instance, `ansible_winrm_server_cert_validation=ignore` to be used with newer versions of pywinrm to disable certificate validation on Python 2.7.9+. +* WinRM connection plugin put_file is significantly faster and no longer has file size limitations. ####Deprecated Modules (new ones in parens): From c0d79cf7e10da157ae1b28283ab7b564baee7b51 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 13:07:00 -0800 Subject: [PATCH 3074/3617] Remove the funcd connection plugin --- lib/ansible/plugins/connection/funcd.py | 99 ------------------------- 1 file changed, 99 deletions(-) delete mode 100644 lib/ansible/plugins/connection/funcd.py diff --git a/lib/ansible/plugins/connection/funcd.py b/lib/ansible/plugins/connection/funcd.py deleted file mode 100644 index 4c9e09be65cdd9..00000000000000 --- a/lib/ansible/plugins/connection/funcd.py +++ /dev/null @@ -1,99 +0,0 @@ -# Based on local.py (c) 2012, Michael DeHaan -# Based on chroot.py (c) 2013, Maykel Moya -# (c) 2013, Michael Scherer -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# --- -# The func transport permit to use ansible over func. For people who have already setup -# func and that wish to play with ansible, this permit to move gradually to ansible -# without having to redo completely the setup of the network. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -HAVE_FUNC=False -try: - import func.overlord.client as fc - HAVE_FUNC=True -except ImportError: - pass - -import os -from ansible.callbacks import vvv -from ansible import errors -import tempfile -import shutil - - -class Connection(object): - ''' Func-based connections ''' - - def __init__(self, runner, host, port, *args, **kwargs): - self.runner = runner - self.host = host - self.has_pipelining = False - # port is unused, this go on func - self.port = port - - def connect(self, port=None): - if not HAVE_FUNC: - raise errors.AnsibleError("func is not installed") - - self.client = fc.Client(self.host) - return self - - def exec_command(self, cmd, become_user=None, sudoable=False, - executable='/bin/sh', in_data=None): - ''' run a command on the remote minion ''' - - if in_data: - raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - - # totally ignores privlege escalation - vvv("EXEC %s" % (cmd), host=self.host) - p = self.client.command.run(cmd)[self.host] - return (p[0], p[1], p[2]) - - def _normalize_path(self, path, prefix): - if not path.startswith(os.path.sep): - path = os.path.join(os.path.sep, path) - normpath = os.path.normpath(path) - return os.path.join(prefix, normpath[1:]) - - def put_file(self, in_path, out_path): - ''' transfer a file from local to remote ''' - - out_path = self._normalize_path(out_path, '/') - vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) - self.client.local.copyfile.send(in_path, out_path) - - def fetch_file(self, in_path, out_path): - ''' fetch a file from remote to local ''' - - in_path = self._normalize_path(in_path, '/') - vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) - # need to use a tmp dir due to difference of semantic for getfile - # ( who take a # directory as destination) and fetch_file, who - # take a file directly - tmpdir = tempfile.mkdtemp(prefix="func_ansible") - self.client.local.getfile.get(in_path, tmpdir) - shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), - out_path) - shutil.rmtree(tmpdir) - - def close(self): - ''' terminate the connection; nothing to do here ''' - pass From 18ac12aee60b0033d4b8af4a78ddbd55335c2991 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Wed, 9 Dec 2015 22:08:30 +0100 Subject: [PATCH 3075/3617] Do not fail when variable is not correct in debug action. See https://github.com/ansible/ansible/issues/13484 for more information. --- lib/ansible/plugins/action/debug.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index 1d8e28c7a4a45a..a0ffb71404419f 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -40,7 +40,7 @@ def run(self, tmp=None, task_vars=None): result['msg'] = self._task.args['msg'] # FIXME: move the LOOKUP_REGEX somewhere else elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - results = self._templar.template(self._task.args['var'], convert_bare=True) + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=False) if type(self._task.args['var']) in (list, dict): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results From a7cd41b482dc6bf1bf1073e451aa1b38526dde08 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 9 Dec 2015 16:29:39 -0500 Subject: [PATCH 3076/3617] Windows doc updates --- docsite/rst/intro_windows.rst | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst index e5cbb94fafd206..1adcc35010f7b4 100644 --- a/docsite/rst/intro_windows.rst +++ b/docsite/rst/intro_windows.rst @@ -31,7 +31,7 @@ On a Linux control machine:: Active Directory Support ++++++++++++++++++++++++ -If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module and the MIT krb5 libraries it depends on. +If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module on the Ansible control host (and the MIT krb5 libraries it depends on). The Ansible control host also requires a properly configured computer account in Active Directory. Installing python-kerberos dependencies --------------------------------------- @@ -131,7 +131,9 @@ To test this, ping the windows host you want to control by name then use the ip If you get different hostnames back than the name you originally pinged, speak to your active directory administrator and get them to check that DNS Scavenging is enabled and that DNS and DHCP are updating each other. -Check your ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted. +Ensure that the Ansible controller has a properly configured computer account in the domain. + +Check your Ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted. Check you are using the real fully qualified domain name for the domain. Sometimes domains are commonly known to users by aliases. To check this run: @@ -165,6 +167,8 @@ In group_vars/windows.yml, define the following inventory variables:: ansible_password: SecretPasswordGoesHere ansible_port: 5986 ansible_connection: winrm + # The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates: + ansible_winrm_server_cert_validation: ignore Although Ansible is mostly an SSH-oriented system, Windows management will not happen over SSH (`yet `). @@ -189,6 +193,7 @@ Since 2.0, the following custom inventory variables are also supported for addit * ``ansible_winrm_path``: Specify an alternate path to the WinRM endpoint. Ansible uses ``/wsman`` by default. * ``ansible_winrm_realm``: Specify the realm to use for Kerberos authentication. If the username contains ``@``, Ansible will use the part of the username after ``@`` by default. * ``ansible_winrm_transport``: Specify one or more transports as a comma-separated list. By default, Ansible will use ``kerberos,plaintext`` if the ``kerberos`` module is installed and a realm is defined, otherwise ``plaintext``. +* ``ansible_winrm_server_cert_validation``: Specify the server certificate validation mode (``ignore`` or ``validate``). Ansible defaults to ``validate`` on Python 2.7.9 and higher, which will result in certificate validation errors against the Windows self-signed certificates. Unless verifiable certificates have been configured on the WinRM listeners, this should be set to ``ignore`` * ``ansible_winrm_*``: Any additional keyword arguments supported by ``winrm.Protocol`` may be provided. .. _windows_system_prep: @@ -221,7 +226,7 @@ Getting to PowerShell 3.0 or higher PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. Note that PowerShell 3.0 is only supported on Windows 7 SP1, Windows Server 2008 SP1, and later releases of Windows. -Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. +Looking at an Ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 `_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above. .. _what_windows_modules_are_available: @@ -248,10 +253,10 @@ Note there are a few other Ansible modules that don't start with "win" that also Developers: Supported modules and how it works `````````````````````````````````````````````` -Developing ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix. -What if you want to write Windows modules for ansible though? +Developing Ansible modules are covered in a `later section of the documentation `_, with a focus on Linux/Unix. +What if you want to write Windows modules for Ansible though? -For Windows, ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. +For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules live in a "windows/" subfolder in the Ansible "library/" subtree. For example, if a module is named "library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense. @@ -351,7 +356,7 @@ form of new modules, tweaks to existing modules, documentation, or something els :doc:`developing_modules` How to write modules :doc:`playbooks` - Learning ansible's configuration management language + Learning Ansible's configuration management language `List of Windows Modules `_ Windows specific module list, all implemented in PowerShell `Mailing List `_ From 62cbc03af6410df2b9c61a5056f71a51dd2570ec Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 13:29:53 -0800 Subject: [PATCH 3077/3617] Revert "Remove the funcd connection plugin" This reverts commit c0d79cf7e10da157ae1b28283ab7b564baee7b51. We may still port the funcd connection plugin, just not in time for 2.0.0 --- lib/ansible/plugins/connection/funcd.py | 99 +++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 lib/ansible/plugins/connection/funcd.py diff --git a/lib/ansible/plugins/connection/funcd.py b/lib/ansible/plugins/connection/funcd.py new file mode 100644 index 00000000000000..4c9e09be65cdd9 --- /dev/null +++ b/lib/ansible/plugins/connection/funcd.py @@ -0,0 +1,99 @@ +# Based on local.py (c) 2012, Michael DeHaan +# Based on chroot.py (c) 2013, Maykel Moya +# (c) 2013, Michael Scherer +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# --- +# The func transport permit to use ansible over func. For people who have already setup +# func and that wish to play with ansible, this permit to move gradually to ansible +# without having to redo completely the setup of the network. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +HAVE_FUNC=False +try: + import func.overlord.client as fc + HAVE_FUNC=True +except ImportError: + pass + +import os +from ansible.callbacks import vvv +from ansible import errors +import tempfile +import shutil + + +class Connection(object): + ''' Func-based connections ''' + + def __init__(self, runner, host, port, *args, **kwargs): + self.runner = runner + self.host = host + self.has_pipelining = False + # port is unused, this go on func + self.port = port + + def connect(self, port=None): + if not HAVE_FUNC: + raise errors.AnsibleError("func is not installed") + + self.client = fc.Client(self.host) + return self + + def exec_command(self, cmd, become_user=None, sudoable=False, + executable='/bin/sh', in_data=None): + ''' run a command on the remote minion ''' + + if in_data: + raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") + + # totally ignores privlege escalation + vvv("EXEC %s" % (cmd), host=self.host) + p = self.client.command.run(cmd)[self.host] + return (p[0], p[1], p[2]) + + def _normalize_path(self, path, prefix): + if not path.startswith(os.path.sep): + path = os.path.join(os.path.sep, path) + normpath = os.path.normpath(path) + return os.path.join(prefix, normpath[1:]) + + def put_file(self, in_path, out_path): + ''' transfer a file from local to remote ''' + + out_path = self._normalize_path(out_path, '/') + vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self.client.local.copyfile.send(in_path, out_path) + + def fetch_file(self, in_path, out_path): + ''' fetch a file from remote to local ''' + + in_path = self._normalize_path(in_path, '/') + vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + # need to use a tmp dir due to difference of semantic for getfile + # ( who take a # directory as destination) and fetch_file, who + # take a file directly + tmpdir = tempfile.mkdtemp(prefix="func_ansible") + self.client.local.getfile.get(in_path, tmpdir) + shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), + out_path) + shutil.rmtree(tmpdir) + + def close(self): + ''' terminate the connection; nothing to do here ''' + pass From a19e083d33ae5ae59be358c9468a4318aca3174f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 13:52:01 -0800 Subject: [PATCH 3078/3617] Note that handlers inside of includes are not possible at the moment --- docsite/rst/playbooks_intro.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index e0f1aec5c104d2..28c809f013266f 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -386,6 +386,7 @@ won't need them for much else. * Handler names live in a global namespace. * If two handler tasks have the same name, only one will run. `* `_ + * You cannot notify a handler that is defined inside of an include Roles are described later on, but it's worthwhile to point out that: From a61387846d3e210181683a60df14c8e7cbf46893 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 7 Dec 2015 10:22:07 -0800 Subject: [PATCH 3079/3617] draft release documentation --- docsite/rst/developing_releases.rst | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 docsite/rst/developing_releases.rst diff --git a/docsite/rst/developing_releases.rst b/docsite/rst/developing_releases.rst new file mode 100644 index 00000000000000..1eeb2421210f10 --- /dev/null +++ b/docsite/rst/developing_releases.rst @@ -0,0 +1,48 @@ +Releases +======== + +.. contents:: Topics + :local: + +.. schedule:: + +Release Schedule +```````````````` +Ansible is on a 'flexible' 4 month release schedule, sometimes this can be extended if there is a major change that requires a longer cycle (i.e. 2.0 core rewrite). +Currently modules get released at the same time as the main Ansible repo, even though they are separated into ansible-modules-core and ansible-modules-extras. + +The major features and bugs fixed in a release should be reflected in the CHANGELOG.md, minor ones will be in the commit history (FIXME: add git exmaple to list). +When a fix/feature gets added to the `devel` branch it will be part of the next release, some bugfixes can be backported to previous releases and might be part of a minor point release if it is deemed necessary. + +Sometimes an RC can be extended by a few days if a bugfix makes a change that can have far reaching consequences, so users have enough time to find any new issues that may stem from this. + +.. methods:: + +Release methods +```````````````` + +Ansible normally goes through a 'release candidate', issuing an RC1 for a release, if no major bugs are discovered in it after 5 business days we'll get a final release. +Otherwise fixes will be applied and an RC2 will be provided for testing and if no bugs after 2 days, the final release will be made, iterating this last step and incrementing the candidate number as we find major bugs. + + +.. freezing:: + +Release feature freeze +`````````````````````` + +During the release candidate process, the focus will be on bugfixes that affect the RC, new features will be delayed while we try to produce a final version. Some bugfixes that are minor or don't affect the RC will also be postponed until after the release is finalized. + +.. seealso:: + + :doc:`developing_api` + Python API to Playbooks and Ad Hoc Task Execution + :doc:`developing_modules` + How to develop modules + :doc:`developing_plugins` + How to develop plugins + `Ansible Tower `_ + REST API endpoint and GUI for Ansible, syncs with dynamic inventory + `Development Mailing List `_ + Mailing list for development topics + `irc.freenode.net `_ + #ansible IRC chat channel From 2b363434514aa94aad145d2a6eacf4c1013490d8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 9 Dec 2015 17:57:52 -0500 Subject: [PATCH 3080/3617] Missed one place we were appending the incorrectly escaped item to raw params --- lib/ansible/parsing/splitter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py index f24d8ecf9de45f..feb0cd2b34ba35 100644 --- a/lib/ansible/parsing/splitter.py +++ b/lib/ansible/parsing/splitter.py @@ -86,7 +86,7 @@ def parse_kv(args, check_raw=False): # FIXME: make the retrieval of this list of shell/command # options a function, so the list is centralized if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'): - raw_params.append(x) + raw_params.append(orig_x) else: options[k.strip()] = unquote(v.strip()) else: From 30e729557f0056ec561288046e2aa933efe899b3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 9 Dec 2015 16:43:24 -0800 Subject: [PATCH 3081/3617] Add first draft of porting guide for 2.0 --- docsite/rst/porting_guide_2.0.rst | 160 ++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 docsite/rst/porting_guide_2.0.rst diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst new file mode 100644 index 00000000000000..9c26a4b1611872 --- /dev/null +++ b/docsite/rst/porting_guide_2.0.rst @@ -0,0 +1,160 @@ +Porting Guide +============= + + +Playbook +-------- + +* backslash escapes When specifying parameters in jinja2 expressions in YAML + dicts, backslashes sometimes needed to be escaped twice. This has been fixed + in 2.0.x so that escaping once works. The following example shows how + playbooks must be modified:: + + # Syntax in 1.9.x + - debug: + msg: "{{ 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') }}" + # Syntax in 2.0.x + - debug: + msg: "{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" + + # Output: + "msg": "test1 1\\3" + +To make an escaped string that will work on all versions you have two options:: + +- debug: msg="{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" + +uses key=value escaping which has not changed. The other option is to check for the ansible version:: + +"{{ (ansible_version|version_compare('ge', '2.0'))|ternary( 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') , 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') ) }}" + +* trailing newline When a string with a trailing newline was specified in the + playbook via yaml dict format, the trailing newline was stripped. When + specified in key=value format, the trailing newlines were kept. In v2, both + methods of specifying the string will keep the trailing newlines. If you + relied on the trailing newline being stripped, you can change your playbook + using the following as an example:: + + # Syntax in 1.9.x + vars: + message: > + Testing + some things + tasks: + - debug: + msg: "{{ message }}" + + # Syntax in 2.0.x + vars: + old_message: > + Testing + some things + message: "{{ old_messsage[:-1] }}" + - debug: + msg: "{{ message }}" + # Output + "msg": "Testing some things" + +* porting task includes + * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. + * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 + * variable precedence +* templating (variables in playbooks and template lookups) has improved with regard to keeping the original instead of turning everything into a string. + If you need the old behavior, quote the value to pass it around as a string. + Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. + You can override the `null_representation` setting to an empty string in your config file by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. +* Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed. +* dnf module has been rewritten. Some minor changes in behavior may be observed. +* win_updates has been rewritten and works as expected now. + +Deprecated +---------- + +While all items listed here will show a deprecation warning message, they still work as they did in 1.9.x. Please note that they will be removed in 2.2 (Ansible always waits two major releases to remove a deprecated feature). + +* Bare variables in with_ loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. +* The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead. +* Undefined variables within a with_ loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. +* Using variables for task parameters is unsafe and will be removed in a future version. For example:: + + - hosts: localhost + gather_facts: no + vars: + debug_params: + msg: "hello there" + tasks: + - debug: "{{debug_params}}" + +* Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern. +* Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y]. +* Playbooks using privilege escalation should always use “become*” options rather than the old su*/sudo* options. +* The “short form” for vars_prompt is no longer supported. +For example:: + +vars_prompt: + variable_name: "Prompt string" + +* Specifying variables at the top level of a task include statement is no longer supported. For example:: + + - include: foo.yml + a: 1 + +Should now be:: + +- include: foo.yml + args: + a: 1 + +* Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only. +* Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’. +* Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. +For example:: + + - include: foo.yml tags=a,b,c + +Should be:: + + - include: foo.yml + tags: [a, b, c] + +* The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin. + + +Porting plugins +=============== + +In ansible-1.9.x, you would generally copy an existing plugin to create a new one. Simply implementing the methods and attributes that the caller of the plugin expected made it a plugin of that type. In ansible-2.0, most plugins are implemented by subclassing a base class for each plugin type. This way the custom plugin does not need to contain methods which are not customized. + +.. note:: + +Lookup plugins +-------------- +* lookup plugins ; import version + + +Connection plugins +------------------ + +* connection plugins + +Action plugins +-------------- + +* action plugins + +Callback plugins +---------------- + +* callback plugins + +Connection plugins +------------------ + +* connection plugins + + +Porting custom scripts +====================== + +Custom scripts that used the ``ansible.runner.Runner`` API in 1.x have to be ported in 2.x. Please refer to: +https://github.com/ansible/ansible/blob/devel/docsite/rst/developing_api.rst From fe72fff57da967ff0e53c8026bcd94d67cdb59db Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Thu, 10 Dec 2015 01:58:17 +0100 Subject: [PATCH 3082/3617] Fix the markdown used for the Windows module section --- docsite/rst/developing_modules.rst | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index bdee4aa83dc025..fde4b5704b6222 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -538,24 +538,34 @@ Windows modules checklist #!powershell -then:: + then:: + -then:: + + then:: + # WANT_JSON # POWERSHELL_COMMON -then, to parse all arguments into a variable modules generally use:: + then, to parse all arguments into a variable modules generally use:: + $params = Parse-Args $args * Arguments: * Try and use state present and state absent like other modules * You need to check that all your mandatory args are present. You can do this using the builtin Get-AnsibleParam function. * Required arguments:: + $package = Get-AnsibleParam -obj $params -name name -failifempty $true + * Required arguments with name validation:: + $state = Get-AnsibleParam -obj $params -name "State" -ValidateSet "Present","Absent" -resultobj $resultobj -failifempty $true + * Optional arguments with name validation:: + $state = Get-AnsibleParam -obj $params -name "State" -default "Present" -ValidateSet "Present","Absent" + * the If "FailIfEmpty" is true, the resultobj parameter is used to specify the object returned to fail-json. You can also override the default message using $emptyattributefailmessage (for missing required attributes) and $ValidateSetErrorMessage (for attribute validation errors) * Look at existing modules for more examples of argument checking. @@ -586,7 +596,7 @@ Starting in 1.8 you can deprecate modules by renaming them with a preceding _, i _old_cloud.py, This will keep the module available but hide it from the primary docs and listing. You can also rename modules and keep an alias to the old name by using a symlink that starts with _. -This example allows the stat module to be called with fileinfo, making the following examples equivalent +This example allows the stat module to be called with fileinfo, making the following examples equivalent:: EXAMPLES = ''' ln -s stat.py _fileinfo.py From c20c1a6d490933fa2ec8961508735422f3a6adeb Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Thu, 10 Dec 2015 11:16:21 +0100 Subject: [PATCH 3083/3617] add depth option to ansible-pull Allows shallow checkouts in ansible-pull by adding `--depth 1` (or higher number) --- lib/ansible/cli/pull.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 593d601e8d44a5..67e89259303079 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -80,6 +80,8 @@ def parse(self): help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') + self.parser.add_option('--depth', dest='depth', default=None, + help='Depth of checkout, shallow checkout if greater or equal 1 . Defaults to full checkout.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -154,6 +156,10 @@ def run(self): if self.options.verify: repo_opts += ' verify_commit=yes' + + if self.options.depth: + repo_opts += ' depth=%s' % self.options.depth + path = module_loader.find_plugin(self.options.module_name) if path is None: From 6680cc7052dd4ef5bb166008a18a57e0f156df95 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Thu, 10 Dec 2015 08:04:06 -0500 Subject: [PATCH 3084/3617] allow custom callbacks with adhoc cli for scripting missing import of CallbackBase --- lib/ansible/cli/__init__.py | 3 ++- lib/ansible/cli/adhoc.py | 4 +++- lib/ansible/executor/task_queue_manager.py | 11 +++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index da1aabcc69891f..a934a3a8ee5d49 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -66,7 +66,7 @@ class CLI(object): LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) # -S (chop long lines) -X (disable termcap init and de-init) - def __init__(self, args): + def __init__(self, args, callback=None): """ Base init method for all command line programs """ @@ -75,6 +75,7 @@ def __init__(self, args): self.options = None self.parser = None self.action = None + self.callback = callback def set_action(self): """ diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py index 3de0e55b7bb4ab..250241a848faab 100644 --- a/lib/ansible/cli/adhoc.py +++ b/lib/ansible/cli/adhoc.py @@ -158,7 +158,9 @@ def run(self): play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval) play = Play().load(play_ds, variable_manager=variable_manager, loader=loader) - if self.options.one_line: + if self.callback: + cb = self.callback + elif self.options.one_line: cb = 'oneline' else: cb = 'minimal' diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 74111382935de4..e2b29a5282c2e9 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -34,6 +34,7 @@ from ansible.plugins import callback_loader, strategy_loader, module_loader from ansible.template import Templar from ansible.vars.hostvars import HostVars +from ansible.plugins.callback import CallbackBase try: from __main__ import display @@ -146,8 +147,14 @@ def load_callbacks(self): if self._stdout_callback is None: self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK - if self._stdout_callback not in callback_loader: - raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) + if isinstance(self._stdout_callback, CallbackBase): + self._callback_plugins.append(self._stdout_callback) + stdout_callback_loaded = True + elif isinstance(self._stdout_callback, basestring): + if self._stdout_callback not in callback_loader: + raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback) + else: + raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin") for callback_plugin in callback_loader.all(class_only=True): if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0: From 72f0679f685dc6c79fe80736d2ca72f6778b8e5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Warcho=C5=82?= Date: Thu, 10 Dec 2015 16:22:37 +0100 Subject: [PATCH 3085/3617] Explain how 'run_once' interacts with 'serial' --- docsite/rst/playbooks_delegation.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst index c715adea361457..fa808abb65bb71 100644 --- a/docsite/rst/playbooks_delegation.rst +++ b/docsite/rst/playbooks_delegation.rst @@ -182,13 +182,18 @@ This can be optionally paired with "delegate_to" to specify an individual host t delegate_to: web01.example.org When "run_once" is not used with "delegate_to" it will execute on the first host, as defined by inventory, -in the group(s) of hosts targeted by the play. e.g. webservers[0] if the play targeted "hosts: webservers". +in the group(s) of hosts targeted by the play - e.g. webservers[0] if the play targeted "hosts: webservers". -This approach is similar, although more concise and cleaner than applying a conditional to a task such as:: +This approach is similar to applying a conditional to a task such as:: - command: /opt/application/upgrade_db.py when: inventory_hostname == webservers[0] +.. note:: + When used together with "serial", tasks marked as "run_once" will be ran on one host in *each* serial batch. + If it's crucial that the task is run only once regardless of "serial" mode, use + :code:`inventory_hostname == my_group_name[0]` construct. + .. _local_playbooks: Local Playbooks From 1dda8158ff9aa5240e89711c7279c3d072e0e57e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 10 Dec 2015 07:28:58 -0800 Subject: [PATCH 3086/3617] become_pass needs to be bytes when it is passed to ssh. Fixes #13240 --- lib/ansible/plugins/connection/ssh.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index debe36bd320509..4251f8a63e8de4 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -463,7 +463,7 @@ def _run(self, cmd, in_data, sudoable=True): if states[state] == 'awaiting_prompt': if self._flags['become_prompt']: display.debug('Sending become_pass in response to prompt') - stdin.write(self._play_context.become_pass + '\n') + stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass ))) self._flags['become_prompt'] = False state += 1 elif self._flags['become_success']: From bd9582d0721db3c6e5e24b08c747e02a6391a0a7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 10 Dec 2015 08:10:45 -0800 Subject: [PATCH 3087/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0b5555b62cd8d9..0d23b3df526875 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0b5555b62cd8d91fb4fa434217671f3acaebbf5a +Subproject commit 0d23b3df526875c8fc6edf94268f3aa850ec05f1 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index cbed642009497d..51813e003331c3 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit cbed642009497ddaf19b5f578ab6c78da1356eda +Subproject commit 51813e003331c3341b07c5cda33346cada537a3b From c402325085c129ce289c73a808d8d6ac68df096d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 10 Dec 2015 13:10:17 -0500 Subject: [PATCH 3088/3617] Fixing up docker integration tests a bit --- .../roles/test_docker/tasks/docker-tests.yml | 31 +++---------------- .../test_docker/tasks/registry-tests.yml | 11 ++----- 2 files changed, 8 insertions(+), 34 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml index 33ffe6c70ca4c3..14e23f72dd5db7 100644 --- a/test/integration/roles/test_docker/tasks/docker-tests.yml +++ b/test/integration/roles/test_docker/tasks/docker-tests.yml @@ -8,7 +8,6 @@ image: busybox state: present pull: missing - docker_api_version: "1.14" - name: Run a small script in busybox docker: @@ -17,22 +16,12 @@ pull: always command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True - docker_api_version: "1.14" - -- name: Get the docker container id - shell: "docker ps | grep busybox | awk '{ print $1 }'" - register: container_id - name: Get the docker container ip - shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" - register: container_ip - -- name: Pause a few moments because docker is not reliable - pause: - seconds: 40 + set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}" - name: Try to access the server - shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + shell: "echo 'world' | nc {{ container_ip }} 2000" register: docker_output - name: check that the script ran @@ -49,22 +38,12 @@ TEST: hello command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"' detach: True - docker_api_version: "1.14" - -- name: Get the docker container id - shell: "docker ps | grep busybox | awk '{ print $1 }'" - register: container_id - name: Get the docker container ip - shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'" - register: container_ip - -- name: Pause a few moments because docker is not reliable - pause: - seconds: 40 + set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}" - name: Try to access the server - shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000" + shell: "echo 'world' | nc {{ container_ip }} 2000" register: docker_output - name: check that the script ran @@ -73,7 +52,7 @@ - "'hello world' in docker_output.stdout_lines" - name: Remove containers - shell: "docker rm $(docker ps -aq)" + shell: "docker rm -f $(docker ps -aq)" - name: Remove all images from the local docker shell: "docker rmi -f $(docker images -q)" diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml index 57b4d252774176..1ef330da5f62cf 100644 --- a/test/integration/roles/test_docker/tasks/registry-tests.yml +++ b/test/integration/roles/test_docker/tasks/registry-tests.yml @@ -19,11 +19,8 @@ - name: Push docker image into the private registry command: "docker push localhost:5000/mine" -- name: Remove containers - shell: "docker rm $(docker ps -aq)" - - name: Remove all images from the local docker - shell: "docker rmi -f $(docker images -q)" + shell: "docker rmi -f {{image_id.stdout_lines[0]}}" - name: Get number of images in docker command: "docker images" @@ -41,7 +38,6 @@ state: present pull: missing insecure_registry: True - docker_api_version: "1.14" - name: Run a small script in the new image docker: @@ -51,7 +47,6 @@ command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True insecure_registry: True - docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" @@ -76,8 +71,9 @@ - name: Remove containers - shell: "docker rm $(docker ps -aq)" + shell: "docker rm -f $(docker ps -aq)" +- shell: docker images -q - name: Remove all images from the local docker shell: "docker rmi -f $(docker images -q)" @@ -157,7 +153,6 @@ state: running command: "nc -l -p 2000 -e xargs -n1 echo hello" detach: True - docker_api_version: "1.14" - name: Get the docker container id shell: "docker ps | grep mine | awk '{ print $1 }'" From a6a58d6947912328fd48e26ea1335bd9314f0135 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Thu, 10 Dec 2015 16:39:27 -0500 Subject: [PATCH 3089/3617] fix default host for non vcd service types --- lib/ansible/module_utils/vca.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py index 56341ec55596ff..ef89d5455696c0 100644 --- a/lib/ansible/module_utils/vca.py +++ b/lib/ansible/module_utils/vca.py @@ -108,7 +108,10 @@ def get_vm(self, vapp_name, vm_name): def create_instance(self): service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE) - host = self.params.get('host', LOGIN_HOST.get('service_type')) + if service_type == 'vcd': + host = self.params['host'] + else: + host = LOGIN_HOST[service_type] username = self.params['username'] version = self.params.get('api_version') From 37c4e9aee34df2f421942e86c8afd1fef2bee5f6 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Fri, 11 Dec 2015 07:11:48 +0530 Subject: [PATCH 3090/3617] Clean up debug logging around _low_level_execute_command We were logging the command to be executed many times, which made debug logs very hard to read. Now we do it only once. Also makes the logged ssh command line cut-and-paste-able (the lack of which has confused a number of people by now; the problem being that we pass the command as a single argument to execve(), so it doesn't need an extra level of quoting as it does when you try to run it by hand). --- lib/ansible/plugins/action/__init__.py | 25 ++++++------------------- lib/ansible/plugins/connection/ssh.py | 2 +- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 497143224a72d6..154404e474cf78 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -202,9 +202,7 @@ def _make_tmp_path(self): tmp_mode = 0o755 cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode) - display.debug("executing _low_level_execute_command to create the tmp path") result = self._low_level_execute_command(cmd, sudoable=False) - display.debug("done with creation of tmp path") # error handling on this seems a little aggressive? if result['rc'] != 0: @@ -249,9 +247,7 @@ def _remove_tmp_path(self, tmp_path): cmd = self._connection._shell.remove(tmp_path, recurse=True) # If we have gotten here we have a working ssh configuration. # If ssh breaks we could leave tmp directories out on the remote system. - display.debug("calling _low_level_execute_command to remove the tmp path") self._low_level_execute_command(cmd, sudoable=False) - display.debug("done removing the tmp path") def _transfer_data(self, remote_path, data): ''' @@ -286,9 +282,7 @@ def _remote_chmod(self, mode, path, sudoable=False): ''' cmd = self._connection._shell.chmod(mode, path) - display.debug("calling _low_level_execute_command to chmod the remote path") res = self._low_level_execute_command(cmd, sudoable=sudoable) - display.debug("done with chmod call") return res def _remote_checksum(self, path, all_vars): @@ -299,9 +293,7 @@ def _remote_checksum(self, path, all_vars): python_interp = all_vars.get('ansible_python_interpreter', 'python') cmd = self._connection._shell.checksum(path, python_interp) - display.debug("calling _low_level_execute_command to get the remote checksum") data = self._low_level_execute_command(cmd, sudoable=True) - display.debug("done getting the remote checksum") try: data2 = data['stdout'].strip().splitlines()[-1] if data2 == u'': @@ -329,9 +321,7 @@ def _remote_expand_user(self, path): expand_path = '~%s' % self._play_context.become_user cmd = self._connection._shell.expand_user(expand_path) - display.debug("calling _low_level_execute_command to expand the remote user path") data = self._low_level_execute_command(cmd, sudoable=False) - display.debug("done expanding the remote user path") #initial_fragment = utils.last_non_blank_line(data['stdout']) initial_fragment = data['stdout'].strip().splitlines()[-1] @@ -448,9 +438,7 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var # specified in the play, not the sudo_user sudoable = False - display.debug("calling _low_level_execute_command() for command %s" % cmd) res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data) - display.debug("_low_level_execute_command returned ok") if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp: if self._play_context.become and self._play_context.become_user != 'root': @@ -498,21 +486,20 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, if executable is not None: cmd = executable + ' -c ' + cmd - display.debug("in _low_level_execute_command() (%s)" % (cmd,)) + display.debug("_low_level_execute_command(): starting") if not cmd: # this can happen with powershell modules when there is no analog to a Windows command (like chmod) - display.debug("no command, exiting _low_level_execute_command()") + display.debug("_low_level_execute_command(): no command, exiting") return dict(stdout='', stderr='') allow_same_user = C.BECOME_ALLOW_SAME_USER same_user = self._play_context.become_user == self._play_context.remote_user if sudoable and self._play_context.become and (allow_same_user or not same_user): - display.debug("using become for this command") + display.debug("_low_level_execute_command(): using become for this command") cmd = self._play_context.make_become_cmd(cmd, executable=executable) - display.debug("executing the command %s through the connection" % cmd) + display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.debug("command execution done: rc=%s" % (rc)) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type @@ -530,11 +517,11 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, else: err = stderr - display.debug("stdout=%s, stderr=%s" % (stdout, stderr)) - display.debug("done with _low_level_execute_command() (%s)" % (cmd,)) if rc is None: rc = 0 + display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr)) + return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err) def _get_first_available_file(self, faf, of=None, searchdir='files'): diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index 4251f8a63e8de4..a2abcf20aee903 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -319,7 +319,7 @@ def _run(self, cmd, in_data, sudoable=True): Starts the command and communicates with it until it ends. ''' - display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]] + display_cmd = map(pipes.quote, cmd) display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) # Start the given command. If we don't need to pipeline data, we can try From bd0f9a4afc8406f71d65c50cda35a43549998fc1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 10 Dec 2015 21:50:11 -0500 Subject: [PATCH 3091/3617] fix make complaint when git is not installed --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ac4c07f4314216..f62cffb2df815c 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ GIT_HASH := $(shell git log -n 1 --format="%h") GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.\/]//g') GITINFO = .$(GIT_HASH).$(GIT_BRANCH) else -GITINFO = '' +GITINFO = "" endif ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1) From 58072c92fb762881679c31d050d519ccd83cb209 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 09:32:19 -0500 Subject: [PATCH 3092/3617] removed 'bare' example in environment now shows how to use explicit templating --- docsite/rst/playbooks_environment.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_environment.rst b/docsite/rst/playbooks_environment.rst index da050f007d5ca4..f909bfcd6e60c7 100644 --- a/docsite/rst/playbooks_environment.rst +++ b/docsite/rst/playbooks_environment.rst @@ -31,7 +31,7 @@ The environment can also be stored in a variable, and accessed like so:: tasks: - apt: name=cobbler state=installed - environment: proxy_env + environment: "{{proxy_env}}" You can also use it at a playbook level:: From d9e510b19273d6a495e6694b6930e49de80f9500 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 13:12:24 -0500 Subject: [PATCH 3093/3617] narrow down exception catching in block builds this was obscuring other errors and should have always been narrow scope --- lib/ansible/playbook/role/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index 1c6b344a4fc12d..f308954f52818e 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -150,7 +150,7 @@ def _load_role_data(self, role_include, parent_role=None): current_when = getattr(self, 'when')[:] current_when.extend(role_include.when) setattr(self, 'when', current_when) - + current_tags = getattr(self, 'tags')[:] current_tags.extend(role_include.tags) setattr(self, 'tags', current_tags) @@ -174,7 +174,7 @@ def _load_role_data(self, role_include, parent_role=None): if task_data: try: self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader) - except: + except AssertionError: raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data) handler_data = self._load_role_yaml('handlers') From 97554fc222628057d7f3255ce2caac8dfe5d783f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 11 Dec 2015 00:18:47 -0500 Subject: [PATCH 3094/3617] Fixing filter test for extract to use proper group --- test/integration/roles/test_filters/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml index af6c5d49defb98..cb1549d3f78fcc 100644 --- a/test/integration/roles/test_filters/tasks/main.yml +++ b/test/integration/roles/test_filters/tasks/main.yml @@ -77,4 +77,4 @@ - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last" - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first" - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first" - - "'ungrouped' == ['localhost']|map('extract',hostvars,['vars','group_names',0])|list|first" + - "'amazon' == ['localhost']|map('extract',hostvars,['vars','group_names',0])|list|first" From 7f7e730dea36dbb709b47c39ca1a28cb9f6cb3f1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 11 Dec 2015 14:55:44 -0500 Subject: [PATCH 3095/3617] Don't mark hosts failed if they've moved to a rescue portion of a block Fixes #13521 --- lib/ansible/plugins/strategy/__init__.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 15636b580d1c37..91ca4e863832ae 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -30,6 +30,11 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable +<<<<<<< Updated upstream +======= +from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess +>>>>>>> Stashed changes from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -202,8 +207,10 @@ def _process_pending_results(self, iterator): [iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts] else: iterator.mark_host_failed(host) - self._tqm._failed_hosts[host.name] = True - self._tqm._stats.increment('failures', host.name) + (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) + if state.run_state != PlayIterator.ITERATING_RESCUE: + self._tqm._failed_hosts[host.name] = True + self._tqm._stats.increment('failures', host.name) else: self._tqm._stats.increment('ok', host.name) self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors) From de71171fc21a81a343eb28ed25472ef4aa17406c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 15:10:48 -0500 Subject: [PATCH 3096/3617] removed merge conflict --- lib/ansible/plugins/strategy/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 91ca4e863832ae..5d31a3dba8d75c 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -30,11 +30,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable -<<<<<<< Updated upstream -======= from ansible.executor.play_iterator import PlayIterator -from ansible.executor.process.worker import WorkerProcess ->>>>>>> Stashed changes from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group From ae988ed753f69cb2a7bf115c7cee41e53f01ef3e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 11 Dec 2015 15:35:57 -0500 Subject: [PATCH 3097/3617] avoid set to unique hosts to preserver order swiched to using a list comp and set to still unique but keep expected order fixes #13522 --- lib/ansible/inventory/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 3c1331e7065c2f..95e193f381a86e 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -194,7 +194,8 @@ def get_hosts(self, pattern="all", ignore_limits_and_restrictions=False): if self._restriction is not None: hosts = [ h for h in hosts if h in self._restriction ] - HOSTS_PATTERNS_CACHE[pattern_hash] = list(set(hosts)) + seen = set() + HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)] return HOSTS_PATTERNS_CACHE[pattern_hash][:] From 120b9a7ac6274c54d091291587b0c9ec865905a1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 10 Dec 2015 18:03:25 -0500 Subject: [PATCH 3098/3617] Changing the way workers are forked --- bin/ansible | 1 + lib/ansible/executor/process/worker.py | 116 ++++++++------------- lib/ansible/executor/task_queue_manager.py | 31 +----- lib/ansible/plugins/strategy/__init__.py | 48 ++++----- lib/ansible/plugins/strategy/linear.py | 5 +- 5 files changed, 74 insertions(+), 127 deletions(-) diff --git a/bin/ansible b/bin/ansible index 7e1aa01a932c7d..627510a72e8fa2 100755 --- a/bin/ansible +++ b/bin/ansible @@ -60,6 +60,7 @@ if __name__ == '__main__': try: display = Display() + display.debug("starting run") sub = None try: diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py index a1a83a5ddaa09d..73f5faa78b6998 100644 --- a/lib/ansible/executor/process/worker.py +++ b/lib/ansible/executor/process/worker.py @@ -59,14 +59,18 @@ class WorkerProcess(multiprocessing.Process): for reading later. ''' - def __init__(self, tqm, main_q, rslt_q, hostvars_manager, loader): + def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj): super(WorkerProcess, self).__init__() # takes a task queue manager as the sole param: - self._main_q = main_q - self._rslt_q = rslt_q - self._hostvars = hostvars_manager - self._loader = loader + self._rslt_q = rslt_q + self._task_vars = task_vars + self._host = host + self._task = task + self._play_context = play_context + self._loader = loader + self._variable_manager = variable_manager + self._shared_loader_obj = shared_loader_obj # dupe stdin, if we have one self._new_stdin = sys.stdin @@ -97,73 +101,45 @@ def run(self): if HAS_ATFORK: atfork() - while True: - task = None - try: - #debug("waiting for work") - (host, task, basedir, zip_vars, compressed_vars, play_context, shared_loader_obj) = self._main_q.get(block=False) - - if compressed_vars: - job_vars = json.loads(zlib.decompress(zip_vars)) - else: - job_vars = zip_vars - - job_vars['hostvars'] = self._hostvars.hostvars() - - debug("there's work to be done! got a task/handler to work on: %s" % task) - - # because the task queue manager starts workers (forks) before the - # playbook is loaded, set the basedir of the loader inherted by - # this fork now so that we can find files correctly - self._loader.set_basedir(basedir) - - # Serializing/deserializing tasks does not preserve the loader attribute, - # since it is passed to the worker during the forking of the process and - # would be wasteful to serialize. So we set it here on the task now, and - # the task handles updating parent/child objects as needed. - task.set_loader(self._loader) - - # execute the task and build a TaskResult from the result - debug("running TaskExecutor() for %s/%s" % (host, task)) - executor_result = TaskExecutor( - host, - task, - job_vars, - play_context, - self._new_stdin, - self._loader, - shared_loader_obj, - ).run() - debug("done running TaskExecutor() for %s/%s" % (host, task)) - task_result = TaskResult(host, task, executor_result) - - # put the result on the result queue - debug("sending task result") - self._rslt_q.put(task_result) - debug("done sending task result") - - except queue.Empty: - time.sleep(0.0001) - except AnsibleConnectionFailure: + try: + # execute the task and build a TaskResult from the result + debug("running TaskExecutor() for %s/%s" % (self._host, self._task)) + executor_result = TaskExecutor( + self._host, + self._task, + self._task_vars, + self._play_context, + self._new_stdin, + self._loader, + self._shared_loader_obj, + ).run() + + debug("done running TaskExecutor() for %s/%s" % (self._host, self._task)) + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, executor_result) + + # put the result on the result queue + debug("sending task result") + self._rslt_q.put(task_result) + debug("done sending task result") + + except AnsibleConnectionFailure: + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, dict(unreachable=True)) + self._rslt_q.put(task_result, block=False) + + except Exception as e: + if not isinstance(e, (IOError, EOFError, KeyboardInterrupt)) or isinstance(e, TemplateNotFound): try: - if task: - task_result = TaskResult(host, task, dict(unreachable=True)) - self._rslt_q.put(task_result, block=False) + self._host.vars = dict() + self._host.groups = [] + task_result = TaskResult(self._host, self._task, dict(failed=True, exception=traceback.format_exc(), stdout='')) + self._rslt_q.put(task_result, block=False) except: - break - except Exception as e: - if isinstance(e, (IOError, EOFError, KeyboardInterrupt)) and not isinstance(e, TemplateNotFound): - break - else: - try: - if task: - task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout='')) - self._rslt_q.put(task_result, block=False) - except: - debug("WORKER EXCEPTION: %s" % e) - debug("WORKER EXCEPTION: %s" % traceback.format_exc()) - break + debug("WORKER EXCEPTION: %s" % e) + debug("WORKER EXCEPTION: %s" % traceback.format_exc()) debug("WORKER PROCESS EXITING") - diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index e2b29a5282c2e9..9189ab95819925 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -102,11 +102,7 @@ def _initialize_processes(self, num): for i in xrange(num): main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() - - prc = WorkerProcess(self, main_q, rslt_q, self._hostvars_manager, self._loader) - prc.start() - - self._workers.append((prc, main_q, rslt_q)) + self._workers.append([None, main_q, rslt_q]) self._result_prc = ResultProcess(self._final_q, self._workers) self._result_prc.start() @@ -195,31 +191,12 @@ def run(self, play): new_play = play.copy() new_play.post_validate(templar) - class HostVarsManager(SyncManager): - pass - - hostvars = HostVars( + self.hostvars = HostVars( inventory=self._inventory, variable_manager=self._variable_manager, loader=self._loader, ) - HostVarsManager.register( - 'hostvars', - callable=lambda: hostvars, - # FIXME: this is the list of exposed methods to the DictProxy object, plus our - # special ones (set_variable_manager/set_inventory). There's probably a better way - # to do this with a proper BaseProxy/DictProxy derivative - exposed=( - 'set_variable_manager', 'set_inventory', '__contains__', '__delitem__', - 'set_nonpersistent_facts', 'set_host_facts', 'set_host_variable', - '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', - 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' - ), - ) - self._hostvars_manager = HostVarsManager() - self._hostvars_manager.start() - # Fork # of forks, # of hosts or serial, whichever is lowest contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))] contenders = [ v for v in contenders if v is not None and v > 0 ] @@ -259,7 +236,6 @@ class HostVarsManager(SyncManager): # and run the play using the strategy and cleanup on way out play_return = strategy.run(iterator, play_context) self._cleanup_processes() - self._hostvars_manager.shutdown() return play_return def cleanup(self): @@ -275,7 +251,8 @@ def _cleanup_processes(self): for (worker_prc, main_q, rslt_q) in self._workers: rslt_q.close() main_q.close() - worker_prc.terminate() + if worker_prc and worker_prc.is_alive(): + worker_prc.terminate() def clear_failed_hosts(self): self._failed_hosts = dict() diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 5d31a3dba8d75c..ea30b800b027d6 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -31,6 +31,7 @@ from ansible import constants as C from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable from ansible.executor.play_iterator import PlayIterator +from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult from ansible.inventory.host import Host from ansible.inventory.group import Group @@ -138,38 +139,29 @@ def _queue_task(self, host, task, task_vars, play_context): display.debug("entering _queue_task() for %s/%s" % (host, task)) + task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) try: display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers))) - (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] - self._cur_worker += 1 - if self._cur_worker >= len(self._workers): - self._cur_worker = 0 - # create a dummy object with plugin loaders set as an easier # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() - # compress (and convert) the data if so configured, which can - # help a lot when the variable dictionary is huge. We pop the - # hostvars out of the task variables right now, due to the fact - # that they're not JSON serializable - compressed_vars = False - if C.DEFAULT_VAR_COMPRESSION_LEVEL > 0: - zip_vars = zlib.compress(json.dumps(task_vars), C.DEFAULT_VAR_COMPRESSION_LEVEL) - compressed_vars = True - # we're done with the original dict now, so delete it to - # try and reclaim some memory space, which is helpful if the - # data contained in the dict is very large - del task_vars - else: - zip_vars = task_vars # noqa (pyflakes false positive because task_vars is deleted in the conditional above) - - # and queue the task - main_q.put((host, task, self._loader.get_basedir(), zip_vars, compressed_vars, play_context, shared_loader_obj)) + while True: + (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] + if worker_prc is None or not worker_prc.is_alive(): + worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) + self._workers[self._cur_worker][0] = worker_prc + worker_prc.start() + break + self._cur_worker += 1 + if self._cur_worker >= len(self._workers): + self._cur_worker = 0 + time.sleep(0.0001) + del task_vars self._pending_results += 1 except (EOFError, IOError, AssertionError) as e: # most likely an abort @@ -177,7 +169,7 @@ def _queue_task(self, host, task, task_vars, play_context): return display.debug("exiting _queue_task() for %s/%s" % (host, task)) - def _process_pending_results(self, iterator): + def _process_pending_results(self, iterator, one_pass=False): ''' Reads results off the final queue and takes appropriate action based on the result (executing callbacks, updating state, etc.). @@ -247,13 +239,11 @@ def _process_pending_results(self, iterator): new_host_info = result_item.get('add_host', dict()) self._add_host(new_host_info, iterator) - self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'add_group': host = result[1] result_item = result[2] self._add_group(host, result_item) - self._tqm._hostvars_manager.hostvars().set_inventory(self._inventory) elif result[0] == 'notify_handler': task_result = result[1] @@ -283,7 +273,6 @@ def _process_pending_results(self, iterator): for target_host in host_list: self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) - self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): host = result[1] @@ -316,21 +305,22 @@ def _process_pending_results(self, iterator): for target_host in host_list: self._variable_manager.set_host_variable(target_host, var_name, var_value) - self._tqm._hostvars_manager.hostvars().set_host_variable(target_host, var_name, var_value) elif result[0] == 'set_host_facts': facts = result[4] if task.action == 'set_fact': self._variable_manager.set_nonpersistent_facts(actual_host, facts) - self._tqm._hostvars_manager.hostvars().set_nonpersistent_facts(actual_host, facts) else: self._variable_manager.set_host_facts(actual_host, facts) - self._tqm._hostvars_manager.hostvars().set_host_facts(actual_host, facts) else: raise AnsibleError("unknown result message received: %s" % result[0]) + except Queue.Empty: time.sleep(0.0001) + if one_pass: + break + return ret_results def _wait_on_pending_results(self, iterator): diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 8a8d5c084af5b3..8c94267cf46fc5 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -169,6 +169,7 @@ def run(self, iterator, play_context): skip_rest = False choose_step = True + results = [] for (host, task) in host_tasks: if not task: continue @@ -243,12 +244,14 @@ def run(self, iterator, play_context): if run_once: break + results += self._process_pending_results(iterator, one_pass=True) + # go to next host/task group if skip_rest: continue display.debug("done queuing things up, now waiting for results queue to drain") - results = self._wait_on_pending_results(iterator) + results += self._wait_on_pending_results(iterator) host_results.extend(results) if not work_to_do and len(iterator.get_failed_hosts()) > 0: From 8db291274519331ed186f0b9dc0711f6754cb25d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 12:59:00 -0500 Subject: [PATCH 3099/3617] corrected section anchors --- docsite/rst/developing_releases.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docsite/rst/developing_releases.rst b/docsite/rst/developing_releases.rst index 1eeb2421210f10..2332459c30d05a 100644 --- a/docsite/rst/developing_releases.rst +++ b/docsite/rst/developing_releases.rst @@ -4,7 +4,7 @@ Releases .. contents:: Topics :local: -.. schedule:: +.. _schedule: Release Schedule ```````````````` @@ -16,7 +16,7 @@ When a fix/feature gets added to the `devel` branch it will be part of the next Sometimes an RC can be extended by a few days if a bugfix makes a change that can have far reaching consequences, so users have enough time to find any new issues that may stem from this. -.. methods:: +.. _methods: Release methods ```````````````` @@ -25,7 +25,7 @@ Ansible normally goes through a 'release candidate', issuing an RC1 for a releas Otherwise fixes will be applied and an RC2 will be provided for testing and if no bugs after 2 days, the final release will be made, iterating this last step and incrementing the candidate number as we find major bugs. -.. freezing:: +.. _freezing: Release feature freeze `````````````````````` From 0a112a1b0617d4087ae3e46ea031101af204d48e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:14:14 -0500 Subject: [PATCH 3100/3617] fixed formating issues with rst --- docsite/rst/porting_guide_2.0.rst | 44 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 9c26a4b1611872..8d69ecd4403c84 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -56,12 +56,11 @@ uses key=value escaping which has not changed. The other option is to check for "msg": "Testing some things" * porting task includes - * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. - * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 - * variable precedence +* More dynamic. Corner-case formats that were not supposed to work now do not, as expected. +* variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 * templating (variables in playbooks and template lookups) has improved with regard to keeping the original instead of turning everything into a string. - If you need the old behavior, quote the value to pass it around as a string. - Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. + If you need the old behavior, quote the value to pass it around as a string. +* Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`. You can override the `null_representation` setting to an empty string in your config file by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. * Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed. * dnf module has been rewritten. Some minor changes in behavior may be observed. @@ -72,26 +71,26 @@ Deprecated While all items listed here will show a deprecation warning message, they still work as they did in 1.9.x. Please note that they will be removed in 2.2 (Ansible always waits two major releases to remove a deprecated feature). -* Bare variables in with_ loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. +* Bare variables in `with_` loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity. * The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead. -* Undefined variables within a with_ loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. +* Undefined variables within a `with_` loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error. * Using variables for task parameters is unsafe and will be removed in a future version. For example:: - hosts: localhost - gather_facts: no - vars: - debug_params: - msg: "hello there" - tasks: - - debug: "{{debug_params}}" + gather_facts: no + vars: + debug_params: + msg: "hello there" + tasks: + - debug: "{{debug_params}}" * Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern. * Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y]. * Playbooks using privilege escalation should always use “become*” options rather than the old su*/sudo* options. -* The “short form” for vars_prompt is no longer supported. -For example:: +* The “short form” for vars_prompt is no longer supported. + For example:: -vars_prompt: + vars_prompt: variable_name: "Prompt string" * Specifying variables at the top level of a task include statement is no longer supported. For example:: @@ -101,21 +100,21 @@ vars_prompt: Should now be:: -- include: foo.yml - args: - a: 1 + - include: foo.yml + args: + a: 1 * Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only. * Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’. * Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task. -For example:: + For example:: - include: foo.yml tags=a,b,c -Should be:: + Should be:: - include: foo.yml - tags: [a, b, c] + tags: [a, b, c] * The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin. @@ -125,7 +124,6 @@ Porting plugins In ansible-1.9.x, you would generally copy an existing plugin to create a new one. Simply implementing the methods and attributes that the caller of the plugin expected made it a plugin of that type. In ansible-2.0, most plugins are implemented by subclassing a base class for each plugin type. This way the custom plugin does not need to contain methods which are not customized. -.. note:: Lookup plugins -------------- From d7b516f75dc879ad350b285e7ddc398418bf85fd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:16:40 -0500 Subject: [PATCH 3101/3617] added releases doc --- docsite/rst/developing.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docsite/rst/developing.rst b/docsite/rst/developing.rst index 2a258993019b51..c5a1dca061177a 100644 --- a/docsite/rst/developing.rst +++ b/docsite/rst/developing.rst @@ -11,6 +11,7 @@ Learn how to build modules of your own in any language, and also how to extend A developing_modules developing_plugins developing_test_pr + developing_releases Developers will also likely be interested in the fully-discoverable in :doc:`tower`. It's great for embedding Ansible in all manner of applications. From 8e445c551a23f52e901c9b1d2603e496a2e88c11 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:43:10 -0500 Subject: [PATCH 3102/3617] removed unused imports in galaxy/cli --- lib/ansible/cli/galaxy.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 01e0475b24b114..0f9074da935119 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -25,7 +25,6 @@ import os.path import sys import yaml -import json import time from collections import defaultdict @@ -40,7 +39,6 @@ from ansible.galaxy.login import GalaxyLogin from ansible.galaxy.token import GalaxyToken from ansible.playbook.role.requirement import RoleRequirement -from ansible.module_utils.urls import open_url try: from __main__ import display @@ -61,10 +59,10 @@ class GalaxyCLI(CLI): "remove": "delete a role from your roles path", "search": "query the Galaxy API", "setup": "add a TravisCI integration to Galaxy", - } + } SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + def __init__(self, args): self.VALID_ACTIONS = self.available_commands.keys() self.VALID_ACTIONS.sort() @@ -101,7 +99,7 @@ def parse(self): usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS), epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) - + self.set_action() # options specific to actions @@ -131,7 +129,7 @@ def parse(self): self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies') self.parser.add_option('-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') + help='A file containing a list of roles to be imported') elif self.action == "remove": self.parser.set_usage("usage: %prog remove role1 role2 ...") elif self.action == "list": @@ -190,7 +188,7 @@ def run(self): # if not offline, get connect to galaxy api if self.action in ("import","info","install","search","login","setup","delete") or \ - (self.action == 'init' and not self.options.offline): + (self.action == 'init' and not self.options.offline): self.api = GalaxyAPI(self.galaxy) self.execute() @@ -544,7 +542,7 @@ def execute_list(self): def execute_search(self): page_size = 1000 search = None - + if len(self.args): terms = [] for i in range(len(self.args)): @@ -556,7 +554,7 @@ def execute_search(self): response = self.api.search_roles(search, platforms=self.options.platforms, tags=self.options.tags, author=self.options.author, page_size=page_size) - + if response['count'] == 0: display.display("No roles match your search.", color="yellow") return True @@ -578,7 +576,7 @@ def execute_search(self): data += (format_str % ("----", "-----------")) for role in response['results']: data += (format_str % (role['username'] + '.' + role['name'],role['description'])) - + self.pager(data) return True @@ -595,12 +593,12 @@ def execute_login(self): github_token = self.options.token galaxy_response = self.api.authenticate(github_token) - + if self.options.token is None: # Remove the token we created login.remove_github_token() - - # Store the Galaxy token + + # Store the Galaxy token token = GalaxyToken() token.set(galaxy_response['token']) @@ -611,7 +609,7 @@ def execute_import(self): """ Import a role into Galaxy """ - + colors = { 'INFO': 'normal', 'WARNING': 'yellow', @@ -631,7 +629,7 @@ def execute_import(self): else: # Submit an import request task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference) - + if len(task) > 1: # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), @@ -693,7 +691,7 @@ def execute_setup(self): if len(self.args) < 4: raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret") return 0 - + secret = self.args.pop() github_repo = self.args.pop() github_user = self.args.pop() @@ -711,7 +709,7 @@ def execute_delete(self): if len(self.args) < 2: raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo") - + github_repo = self.args.pop() github_user = self.args.pop() resp = self.api.delete_role(github_user, github_repo) @@ -722,9 +720,8 @@ def execute_delete(self): display.display("------ --------------- ----------") for role in resp['deleted_roles']: display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name)) - + display.display(resp['status']) return True - From 3c4d2fc6f2cdeba074511fb591134014cf77032d Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 12 Dec 2015 19:31:19 +0100 Subject: [PATCH 3103/3617] Add tests for ansible.module_utils.known_hosts --- .../module_utils/basic/test_known_hosts.py | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 test/units/module_utils/basic/test_known_hosts.py diff --git a/test/units/module_utils/basic/test_known_hosts.py b/test/units/module_utils/basic/test_known_hosts.py new file mode 100644 index 00000000000000..952184bfec9f4c --- /dev/null +++ b/test/units/module_utils/basic/test_known_hosts.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# (c) 2015, Michael Scherer +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from ansible.compat.tests import unittest +from ansible.module_utils import known_hosts + +class TestAnsibleModuleKnownHosts(unittest.TestCase): + urls = { + 'ssh://one.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'one.example.org'}, + 'ssh+git://two.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'two.example.org'}, + 'rsync://three.example.org/user/example.git': + {'is_ssh_url': False, 'get_fqdn': 'three.example.org'}, + 'git@four.example.org:user/example.git': + {'is_ssh_url': True, 'get_fqdn': 'four.example.org'}, + 'git+ssh://five.example.org/example.git': + {'is_ssh_url': True, 'get_fqdn': 'five.example.org'}, + 'ssh://six.example.org:21/example.org': + {'is_ssh_url': True, 'get_fqdn': 'six.example.org'}, + } + + def test_is_ssh_url(self): + for u in self.urls: + self.assertEqual(known_hosts.is_ssh_url(u), self.urls[u]['is_ssh_url']) + + def test_get_fqdn(self): + for u in self.urls: + self.assertEqual(known_hosts.get_fqdn(u), self.urls[u]['get_fqdn']) + + + From 99e46440bdaf622958f78cebecb52dec7ed67669 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 16:10:18 -0500 Subject: [PATCH 3104/3617] changed shell delimiters for csh fixes #13459 --- lib/ansible/plugins/shell/csh.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py index 1c383d133c6e87..bd210f12feb097 100644 --- a/lib/ansible/plugins/shell/csh.py +++ b/lib/ansible/plugins/shell/csh.py @@ -24,6 +24,8 @@ class ShellModule(ShModule): # How to end lines in a python script one-liner _SHELL_EMBEDDED_PY_EOL = '\\\n' _SHELL_REDIRECT_ALLNULL = '>& /dev/null' + _SHELL_SUB_LEFT = '"`' + _SHELL_SUB_RIGHT = '`"' def env_prefix(self, **kwargs): return 'env %s' % super(ShellModule, self).env_prefix(**kwargs) From f3bedbae2991b540421d64f5be942ec7c84fdf7d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 17:50:55 -0500 Subject: [PATCH 3105/3617] simplified skippy thanks agaffney! --- lib/ansible/plugins/callback/skippy.py | 159 +------------------------ 1 file changed, 6 insertions(+), 153 deletions(-) diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py index 495943417fd996..306d1a534e58cb 100644 --- a/lib/ansible/plugins/callback/skippy.py +++ b/lib/ansible/plugins/callback/skippy.py @@ -19,10 +19,9 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible import constants as C -from ansible.plugins.callback import CallbackBase +from ansible.plugins.callback.default import CallbackModule as CallbackModule_default -class CallbackModule(CallbackBase): +class CallbackModule(CallbackModule_default): ''' This is the default callback interface, which simply prints messages @@ -33,154 +32,8 @@ class CallbackModule(CallbackBase): CALLBACK_TYPE = 'stdout' CALLBACK_NAME = 'skippy' - def v2_runner_on_failed(self, result, ignore_errors=False): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if 'exception' in result._result: - if self._display.verbosity < 3: - # extract just the actual error message from the exception text - error = result._result['exception'].strip().split('\n')[-1] - msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error - else: - msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - - self._display.display(msg, color='red') - - # finally, remove the exception from the result so it's not shown every time - del result._result['exception'] - - if result._task.loop and 'results' in result._result: - self._process_items(result) - else: - if delegated_vars: - self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') - else: - self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') - - if result._task.ignore_errors: - self._display.display("...ignoring", color='cyan') - - def v2_runner_on_ok(self, result): - - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if result._task.action == 'include': - return - elif result._result.get('changed', False): - if delegated_vars: - msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' - else: - if delegated_vars: - msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "ok: [%s]" % result._host.get_name() - color = 'green' - - if result._task.loop and 'results' in result._result: - self._process_items(result) - else: - - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: - msg += " => %s" % (self._dump_results(result._result),) - self._display.display(msg, color=color) - - self._handle_warnings(result._result) - - def v2_runner_on_unreachable(self, result): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if delegated_vars: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') - else: - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') - - def v2_playbook_on_no_hosts_matched(self): - self._display.display("skipping: no hosts matched", color='cyan') - - def v2_playbook_on_no_hosts_remaining(self): - self._display.banner("NO MORE HOSTS LEFT") - - def v2_playbook_on_task_start(self, task, is_conditional): - self._display.banner("TASK [%s]" % task.get_name().strip()) - if self._display.verbosity > 2: - path = task.get_path() - if path: - self._display.display("task path: %s" % path, color='dark gray') - - def v2_playbook_on_cleanup_task_start(self, task): - self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) - - def v2_playbook_on_handler_task_start(self, task): - self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) - - def v2_playbook_on_play_start(self, play): - name = play.get_name().strip() - if not name: - msg = "PLAY" - else: - msg = "PLAY [%s]" % name - - self._display.banner(msg) - - def v2_on_file_diff(self, result): - if result._task.loop and 'results' in result._result: - for res in result._result['results']: - newres = self._copy_result(result) - res['item'] = self._get_item(res) - newres._result = res - - self.v2_on_file_diff(newres) - elif 'diff' in result._result and result._result['diff']: - self._display.display(self._get_diff(result._result['diff'])) - - def v2_playbook_item_on_ok(self, result): - - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if result._task.action == 'include': - return - elif result._result.get('changed', False): - if delegated_vars: - msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' - else: - if delegated_vars: - msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) - else: - msg = "ok: [%s]" % result._host.get_name() - color = 'green' - - msg += " => (item=%s)" % (result._result['item'],) - - if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: - msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color=color) - - def v2_playbook_item_on_failed(self, result): - delegated_vars = result._result.get('_ansible_delegated_vars', None) - if 'exception' in result._result: - if self._display.verbosity < 3: - # extract just the actual error message from the exception text - error = result._result['exception'].strip().split('\n')[-1] - msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error - else: - msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - - self._display.display(msg, color='red') - - # finally, remove the exception from the result so it's not shown every time - del result._result['exception'] - - if delegated_vars: - self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red') - else: - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') - - self._handle_warnings(result._result) - - def v2_playbook_on_include(self, included_file): - msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) - color = 'cyan' - self._display.display(msg, color='cyan') + def v2_runner_on_skipped(self, result): + pass + def v2_playbook_item_on_skipped(self, result): + pass From d73562902b289e7fd7e2e5a37e82b00c83a16369 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 13 Dec 2015 00:13:13 -0500 Subject: [PATCH 3106/3617] debug now validates its params simplified var handling made default message the same as in pre 2.0 fixes #13532 --- lib/ansible/plugins/action/debug.py | 35 ++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py index a0ffb71404419f..2af20eddfc4b37 100644 --- a/lib/ansible/plugins/action/debug.py +++ b/lib/ansible/plugins/action/debug.py @@ -20,40 +20,45 @@ from ansible.plugins.action import ActionBase from ansible.utils.boolean import boolean from ansible.utils.unicode import to_unicode +from ansible.errors import AnsibleUndefinedVariable class ActionModule(ActionBase): ''' Print statements during execution ''' TRANSFERS_FILES = False + VALID_ARGS = set(['msg', 'var']) def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() + for arg in self._task.args: + if arg not in self.VALID_ARGS: + return {"failed": True, "msg": "'%s' is not a valid option in debug" % arg} + + if 'msg' in self._task.args and 'var' in self._task.args: + return {"failed": True, "msg": "'msg' and 'var' are incompatible options"} + result = super(ActionModule, self).run(tmp, task_vars) if 'msg' in self._task.args: - if 'fail' in self._task.args and boolean(self._task.args['fail']): - result['failed'] = True - result['msg'] = self._task.args['msg'] - else: - result['msg'] = self._task.args['msg'] - # FIXME: move the LOOKUP_REGEX somewhere else - elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']): - results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=False) + result['msg'] = self._task.args['msg'] + + elif 'var' in self._task.args: + try: + results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True) + if results == self._task.args['var']: + raise AnsibleUndefinedVariable + except AnsibleUndefinedVariable: + results = "VARIABLE IS NOT DEFINED!" + if type(self._task.args['var']) in (list, dict): # If var is a list or dict, use the type as key to display result[to_unicode(type(self._task.args['var']))] = results else: - # If var name is same as result, try to template it - if results == self._task.args['var']: - try: - results = self._templar.template("{{" + results + "}}", convert_bare=True, fail_on_undefined=True) - except: - results = "VARIABLE IS NOT DEFINED!" result[self._task.args['var']] = results else: - result['msg'] = 'here we are' + result['msg'] = 'Hello world!' # force flag to make debug output module always verbose result['_ansible_verbose_always'] = True From e2ad4fe9100729462fbd511c75a035ccdfd41841 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 13 Dec 2015 00:34:23 -0500 Subject: [PATCH 3107/3617] include all packaging in tarball not juse rpm spec file --- MANIFEST.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index d8402f0297ff17..64c5bf1fcbaf8d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,12 +4,13 @@ prune ticket_stubs prune packaging prune test prune hacking -include README.md packaging/rpm/ansible.spec COPYING +include README.md COPYING include examples/hosts include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 recursive-include lib/ansible/modules * recursive-include docs * +recursive-include packaging * include Makefile include VERSION include MANIFEST.in From 4779f29777872f1352c65ea504eb81e998a47b7b Mon Sep 17 00:00:00 2001 From: Usman Ehtesham Gul Date: Sun, 13 Dec 2015 01:24:27 -0500 Subject: [PATCH 3108/3617] Fix Doc mistake Fix Doc mistake in ansible/docsite/rst/playbooks_variables.rst --- docsite/rst/playbooks_variables.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst index 307387a72e58bf..122c0ef9232209 100644 --- a/docsite/rst/playbooks_variables.rst +++ b/docsite/rst/playbooks_variables.rst @@ -796,7 +796,7 @@ Basically, anything that goes into "role defaults" (the defaults folder inside t .. [1] Tasks in each role will see their own role's defaults. Tasks defined outside of a role will see the last role's defaults. .. [2] Variables defined in inventory file or provided by dynamic inventory. -.. note:: Within a any section, redefining a var will overwrite the previous instance. +.. note:: Within any section, redefining a var will overwrite the previous instance. If multiple groups have the same variable, the last one loaded wins. If you define a variable twice in a play's vars: section, the 2nd one wins. .. note:: the previous describes the default config `hash_behavior=replace`, switch to 'merge' to only partially overwrite. From 1b2ebe8defddbb6f6cd471f999d6eba8b78f1446 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Sun, 13 Dec 2015 10:56:47 +0100 Subject: [PATCH 3109/3617] make shallow clone the default for ansibel-pull --- lib/ansible/cli/pull.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 67e89259303079..7b2fd13e5eec52 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -80,8 +80,8 @@ def parse(self): help='directory to checkout repository to') self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository') - self.parser.add_option('--depth', dest='depth', default=None, - help='Depth of checkout, shallow checkout if greater or equal 1 . Defaults to full checkout.') + self.parser.add_option('--full', dest='fullclone', action='store_true', + help='Do a full clone, instead of a shallow one.') self.parser.add_option('-C', '--checkout', dest='checkout', help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.') self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', @@ -157,8 +157,8 @@ def run(self): if self.options.verify: repo_opts += ' verify_commit=yes' - if self.options.depth: - repo_opts += ' depth=%s' % self.options.depth + if not self.options.fullclone: + repo_opts += ' depth=1' path = module_loader.find_plugin(self.options.module_name) From 1bd8d97093f30e4848640a5c43a7f830a9112e2f Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Sun, 13 Dec 2015 11:19:50 +0100 Subject: [PATCH 3110/3617] fix whitespace --- lib/ansible/cli/pull.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py index 7b2fd13e5eec52..2571717766e7a2 100644 --- a/lib/ansible/cli/pull.py +++ b/lib/ansible/cli/pull.py @@ -156,7 +156,7 @@ def run(self): if self.options.verify: repo_opts += ' verify_commit=yes' - + if not self.options.fullclone: repo_opts += ' depth=1' From d8e6bc98a2494628aca2fc406655dce70701f525 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 17:09:34 -0500 Subject: [PATCH 3111/3617] Fix overloaded options. Show an error when no action given. Don't show a helpful list of commands and descriptions. --- lib/ansible/cli/galaxy.py | 68 ++++++++------------------------------- 1 file changed, 13 insertions(+), 55 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 0f9074da935119..13df7c4122033f 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -48,50 +48,14 @@ class GalaxyCLI(CLI): - available_commands = { - "delete": "remove a role from Galaxy", - "import": "add a role contained in a GitHub repo to Galaxy", - "info": "display details about a particular role", - "init": "create a role directory structure in your roles path", - "install": "download a role into your roles path", - "list": "enumerate roles found in your roles path", - "login": "authenticate with Galaxy API and store the token", - "remove": "delete a role from your roles path", - "search": "query the Galaxy API", - "setup": "add a TravisCI integration to Galaxy", - } - SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - + VALID_ACTIONS = ("delete","import","info","init","install","list","login","remove","search","setup") + def __init__(self, args): - self.VALID_ACTIONS = self.available_commands.keys() - self.VALID_ACTIONS.sort() self.api = None self.galaxy = None super(GalaxyCLI, self).__init__(args) - def set_action(self): - """ - Get the action the user wants to execute from the sys argv list. - """ - for i in range(0,len(self.args)): - arg = self.args[i] - if arg in self.VALID_ACTIONS: - self.action = arg - del self.args[i] - break - - if not self.action: - self.show_available_actions() - - def show_available_actions(self): - # list available commands - display.display(u'\n' + "usage: ansible-galaxy COMMAND [--help] [options] ...") - display.display(u'\n' + "availabe commands:" + u'\n\n') - for key in self.VALID_ACTIONS: - display.display(u'\t' + "%-12s %s" % (key, self.available_commands[key])) - display.display(' ') - def parse(self): ''' create an options parser for bin/ansible ''' @@ -107,11 +71,11 @@ def parse(self): self.parser.set_usage("usage: %prog delete [options] github_user github_repo") elif self.action == "import": self.parser.set_usage("usage: %prog import [options] github_user github_repo") - self.parser.add_option('-n', '--no-wait', dest='wait', action='store_false', default=True, + self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.') - self.parser.add_option('-b', '--branch', dest='reference', + self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)') - self.parser.add_option('-t', '--status', dest='check_status', action='store_true', default=False, + self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.') elif self.action == "info": self.parser.set_usage("usage: %prog info [options] role_name[,version]") @@ -147,15 +111,14 @@ def parse(self): help='GitHub username') self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]") elif self.action == "setup": - self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret" + - u'\n\n' + "Create an integration with travis.") - self.parser.add_option('-r', '--remove', dest='remove_id', default=None, + self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret") + self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.') - self.parser.add_option('-l', '--list', dest="setup_list", action='store_true', default=False, + self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.') # options that apply to more than one action - if not self.action in ("config","import","init","login","setup"): + if not self.action in ("import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' @@ -171,19 +134,14 @@ def parse(self): self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role') - if self.action: - # get options, args and galaxy object - self.options, self.args =self.parser.parse_args() - display.verbosity = self.options.verbosity - self.galaxy = Galaxy(self.options) + self.options, self.args =self.parser.parse_args() + display.verbosity = self.options.verbosity + self.galaxy = Galaxy(self.options) return True def run(self): - - if not self.action: - return True - + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api From 989604b1a3977e6246f997d1a75aaf97776b28ae Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 17:12:53 -0500 Subject: [PATCH 3112/3617] Fix typo. --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 783ac15e456a76..c9dea27336788f 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -126,7 +126,7 @@ The above will create the following directory structure in the current working d :: README.md - .travsis.yml + .travis.yml defaults/ main.yml files/ From bc7392009069749042bf937eb315ea19c513d0ff Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 18:28:57 -0500 Subject: [PATCH 3113/3617] Updated ansible-galaxy man page. Removed -b option for import. --- docs/man/man1/ansible-galaxy.1.asciidoc.in | 202 ++++++++++++++++++++- lib/ansible/cli/galaxy.py | 4 +- 2 files changed, 201 insertions(+), 5 deletions(-) diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index e6f2d0b456887c..44f0b46b085fc8 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -12,7 +12,7 @@ ansible-galaxy - manage roles using galaxy.ansible.com SYNOPSIS -------- -ansible-galaxy [init|info|install|list|remove] [--help] [options] ... +ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ... DESCRIPTION @@ -20,7 +20,7 @@ DESCRIPTION *Ansible Galaxy* is a shared repository for Ansible roles. The ansible-galaxy command can be used to manage these roles, -or by creating a skeleton framework for roles you'd like to upload to Galaxy. +or for creating a skeleton framework for roles you'd like to upload to Galaxy. COMMON OPTIONS -------------- @@ -29,7 +29,6 @@ COMMON OPTIONS Show a help message related to the given sub-command. - INSTALL ------- @@ -145,6 +144,203 @@ The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) +SEARCH +------ + +The *search* sub-command returns a filtered list of roles found at +galaxy.ansible.com. + +USAGE +~~~~~ + +$ ansible-galaxy search [options] [searchterm1 searchterm2] + + +OPTIONS +~~~~~~~ +*--galaxy-tags*:: + +Provide a comma separated list of Galaxy Tags on which to filter. + +*--platforms*:: + +Provide a comma separated list of Platforms on which to filter. + +*--author*:: + +Specify the username of a Galaxy contributor on which to filter. + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +INFO +---- + +The *info* sub-command shows detailed information for a specific role. +Details returned about the role included information from the local copy +as well as information from galaxy.ansible.com. + +USAGE +~~~~~ + +$ ansible-galaxy info [options] role_name[, version] + +OPTIONS +~~~~~~~ + +*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: + +The path to the directory containing your roles. The default is the *roles_path* +configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +LOGIN +----- + +The *login* sub-command is used to authenticate with galaxy.ansible.com. +Authentication is required to use the import, delete and setup commands. +It will authenticate the user,retrieve a token from Galaxy, and store it +in the user's home directory. + +USAGE +~~~~~ + +$ ansible-galaxy login [options] + +The *login* sub-command prompts for a *GitHub* username and password. It does +NOT send your password to Galaxy. It actually authenticates with GitHub and +creates a personal access token. It then sends the personal access token to +Galaxy, which in turn verifies that you are you and returns a Galaxy access +token. After authentication completes the *GitHub* personal access token is +destroyed. + +If you do not wish to use your GitHub password, or if you have two-factor +authentication enabled with GitHub, use the *--github-token* option to pass a +personal access token that you create. Log into GitHub, go to Settings and +click on Personal Access Token to create a token. + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--github-token*:: + +Authenticate using a *GitHub* personal access token rather than a password. + + +IMPORT +------ + +Import a role from *GitHub* to galaxy.ansible.com. Requires the user first +authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy import [options] github_user github_repo + +OPTIONS +~~~~~~~ +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--branch*:: + +Provide a specific branch to import. When a branch is not specified the +branch found in meta/main.yml is used. If no branch is specified in +meta/main.yml, the repo's default branch (usually master) is used. + + +DELETE +------ + +The *delete* sub-command will delete a role from galaxy.ansible.com. Requires +the user first authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy delete [options] github_user github_repo + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +SETUP +----- + +The *setup* sub-command creates an integration point for *Travis CI*, enabling +galaxy.ansible.com to receive notifications from *Travis* on build completion. +Requires the user first authenticate with galaxy.ansible.com using the *login* +subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy setup [options] source github_user github_repo secret + +* Use *travis* as the source value. In the future additional source values may + be added. + +* Provide your *Travis* user token as the secret. The token is not stored by + galaxy.ansible.com. A hash is created using github_user, github_repo + and your token. The hash value is what actually gets stored. + +OPTIONS +~~~~~~~ + +*-c*, *--ingore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +--list:: + +Show your configured integrations. Provids the ID of each integration +which can be used with the remove option. + +--remove:: + +Remove a specific integration. Provide the ID of the integration to +be removed. + AUTHOR ------ diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 13df7c4122033f..1cd936d028e175 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -100,7 +100,7 @@ def parse(self): self.parser.set_usage("usage: %prog list [role_name]") elif self.action == "login": self.parser.set_usage("usage: %prog login [options]") - self.parser.add_option('-g','--github-token', dest='token', default=None, + self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.') elif self.action == "search": self.parser.add_option('--platforms', dest='platforms', @@ -118,7 +118,7 @@ def parse(self): help='List all of your integrations.') # options that apply to more than one action - if not self.action in ("import","init","login","setup"): + if not self.action in ("delete","import","init","login","setup"): self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, help='The path to the directory containing your roles. ' 'The default is the roles_path configured in your ' From f1c72ff8f51b749165d5bc4089ca8c8fd5b22789 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 22:04:00 -0500 Subject: [PATCH 3114/3617] Make sure it is clear that new commands require using the Galaxy 2.0 Beta site. --- docsite/rst/galaxy.rst | 58 +++++++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index c9dea27336788f..3a12044ca9eb6f 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -1,7 +1,7 @@ Ansible Galaxy ++++++++++++++ -"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool that helps work with roles. +"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool for managing and creating roles. .. contents:: Topics @@ -10,24 +10,36 @@ The Website The website `Ansible Galaxy `_, is a free site for finding, downloading, and sharing community developed Ansible roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects. -You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later. +Access the Galaxy web site using GitHub OAuth, and to install roles use the 'ansible-galaxy' command line tool included in Ansible 1.4.2 and later. Read the "About" page on the Galaxy site for more information. The ansible-galaxy command line tool ```````````````````````````````````` -The command line ansible-galaxy has many different subcommands. +The ansible-galaxy command has many different sub-commands for managing roles both locally and at `galaxy.ansible.com `_. + +.. note:: + + The search, login, import, delete, and setup commands in the Ansible 2.0 version of ansible-galaxy require access to the + 2.0 Beta release of the Galaxy web site available at `https://galaxy-qa.ansible.com `_. + + Use the ``--server`` option to access the beta site. For example:: + + $ ansible-galaxy search --server https://galaxy-qa.ansible.com mysql --author geerlingguy + + Additionally, you can define a server in ansible.cfg:: + + [galaxy] + server=https://galaxy-qa.ansible.com Installing Roles ---------------- -The most obvious is downloading roles from the Ansible Galaxy website:: +The most obvious use of the ansible-galaxy command is downloading roles from `the Ansible Galaxy website `_:: $ ansible-galaxy install username.rolename -.. _galaxy_cli_roles_path: - roles_path =============== @@ -169,7 +181,9 @@ The search command will return a list of the first 1000 results matching your se .. note:: - The format of results pictured here is new in Ansible 2.0. + The search command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Get More Information About a Role --------------------------------- @@ -213,10 +227,6 @@ This returns everything found in Galaxy for the role: version: watchers_count: 1 -.. note:: - - The format of results pictured here is new in Ansible 2.0. - List Installed Roles -------------------- @@ -262,7 +272,13 @@ To use the import, delete and setup commands authentication with Galaxy is requi As depicted above, the login command prompts for a GitHub username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the GitHub personal access token is destroyed. -If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. +If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token. + +.. note:: + + The login command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Import a Role ------------- @@ -298,7 +314,9 @@ If the --no-wait option is present, the command will not wait for results. Resul .. note:: - The import command is only available in Ansible 2.0. + The import command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Delete a Role ------------- @@ -307,13 +325,15 @@ Remove a role from the Galaxy web site using the delete command. You can delete :: - ansible-galaxy delete github_user github_repo + $ ansible-galaxy delete github_user github_repo This only removes the role from Galaxy. It does not impact the actual GitHub repo. .. note:: - The delete command is only available in Ansible 2.0. + The delete command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. Setup Travis Integerations -------------------------- @@ -324,7 +344,7 @@ Using the setup command you can enable notifications from `travis `_. The calculated hash is stored in Galaxy and used to verify notifications received from Travis. @@ -339,7 +359,9 @@ When you create your .travis.yml file add the following to cause Travis to notif .. note:: - The setup command is only available in Ansible 2.0. + The setup command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access + `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] + section of your ansible.cfg file. List Travis Integrtions @@ -361,7 +383,7 @@ Use the --list option to display your Travis integrations: Remove Travis Integrations ========================== -Use the --remove option to disable a Travis integration: +Use the --remove option to disable and remove a Travis integration: :: From 342dee0023e2c6fd6d361a70fec621c09b833915 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 22:56:54 -0500 Subject: [PATCH 3115/3617] Define and handle ignore_certs correctly. Preserve search term order. Tweak to Galaxy docsite. --- docsite/rst/galaxy.rst | 2 +- lib/ansible/cli/galaxy.py | 8 ++++---- lib/ansible/galaxy/api.py | 18 ++++++++---------- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 3a12044ca9eb6f..200fdfd5750ba1 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -41,7 +41,7 @@ The most obvious use of the ansible-galaxy command is downloading roles from `th $ ansible-galaxy install username.rolename roles_path -=============== +========== You can specify a particular directory where you want the downloaded roles to be placed:: diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 1cd936d028e175..a4a7b915f36a46 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -127,7 +127,7 @@ def parse(self): if self.action in ("import","info","init","install","login","search","setup","delete"): self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination') - self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True, + self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False, help='Ignore SSL certificate validation errors.') if self.action in ("init","install"): @@ -505,7 +505,7 @@ def execute_search(self): terms = [] for i in range(len(self.args)): terms.append(self.args.pop()) - search = '+'.join(terms) + search = '+'.join(terms[::-1]) if not search and not self.options.platforms and not self.options.tags and not self.options.author: raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.") @@ -520,9 +520,9 @@ def execute_search(self): data = '' if response['count'] > page_size: - data += ("Found %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) + data += ("\nFound %d roles matching your search. Showing first %s.\n" % (response['count'], page_size)) else: - data += ("Found %d roles matching your search:\n" % response['count']) + data += ("\nFound %d roles matching your search:\n" % response['count']) max_len = [] for role in response['results']: diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py index c1bf2c4ed50b49..eec9ee932e0b94 100644 --- a/lib/ansible/galaxy/api.py +++ b/lib/ansible/galaxy/api.py @@ -48,16 +48,15 @@ class GalaxyAPI(object): SUPPORTED_VERSIONS = ['v1'] def __init__(self, galaxy): - self.galaxy = galaxy self.token = GalaxyToken() self._api_server = C.GALAXY_SERVER - self._validate_certs = C.GALAXY_IGNORE_CERTS + self._validate_certs = not C.GALAXY_IGNORE_CERTS # set validate_certs - if galaxy.options.validate_certs == False: + if galaxy.options.ignore_certs: self._validate_certs = False - display.vvv('Check for valid certs: %s' % self._validate_certs) + display.vvv('Validate TLS certificates: %s' % self._validate_certs) # set the API server if galaxy.options.api_server != C.GALAXY_SERVER: @@ -65,14 +64,13 @@ def __init__(self, galaxy): display.vvv("Connecting to galaxy_server: %s" % self._api_server) server_version = self.get_server_api_version() - - if server_version in self.SUPPORTED_VERSIONS: - self.baseurl = '%s/api/%s' % (self._api_server, server_version) - self.version = server_version # for future use - display.vvv("Base API: %s" % self.baseurl) - else: + if not server_version in self.SUPPORTED_VERSIONS: raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version) + self.baseurl = '%s/api/%s' % (self._api_server, server_version) + self.version = server_version # for future use + display.vvv("Base API: %s" % self.baseurl) + def __auth_header(self): token = self.token.get() if token is None: From 847f454bccb6ec3942ff5d652db7dd1db4d77159 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Wed, 9 Dec 2015 23:25:23 -0500 Subject: [PATCH 3116/3617] Add a section to intro_configuration for Galaxy. --- docsite/rst/intro_configuration.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index dda07fc4506502..0ad54938d08ead 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -897,3 +897,19 @@ The normal behaviour is for operations to copy the existing context or use the u The default list is: nfs,vboxsf,fuse,ramfs:: special_context_filesystems = nfs,vboxsf,fuse,ramfs,myspecialfs + +Galaxy Settings +--------------- + +The following options can be set in the [galaxy] section of ansible.cfg: + +server +====== + +Override the default Galaxy server value of https://galaxy.ansible.com. + +ignore_certs +============ + +If set to *yes*, ansible-galaxy will not validate TLS certificates. Handy for testing against a server with a self-signed certificate +. \ No newline at end of file From 06dde0d332d88e958ac5489bea88f0f5bc536e1b Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Thu, 10 Dec 2015 10:57:48 -0500 Subject: [PATCH 3117/3617] Fixed documentation typos and bits that needed clarification. Fixed missing spaces in VALID_ACTIONS. --- docs/man/man1/ansible-galaxy.1.asciidoc.in | 19 ++++++++++--------- docsite/rst/galaxy.rst | 4 ++-- docsite/rst/intro_configuration.rst | 4 ++-- lib/ansible/cli/galaxy.py | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index 44f0b46b085fc8..9ffe65e45a7c07 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -147,8 +147,9 @@ configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) SEARCH ------ -The *search* sub-command returns a filtered list of roles found at -galaxy.ansible.com. +The *search* sub-command returns a filtered list of roles found on the remote +server. + USAGE ~~~~~ @@ -170,7 +171,7 @@ Provide a comma separated list of Platforms on which to filter. Specify the username of a Galaxy contributor on which to filter. -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -199,7 +200,7 @@ OPTIONS The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -213,7 +214,7 @@ LOGIN The *login* sub-command is used to authenticate with galaxy.ansible.com. Authentication is required to use the import, delete and setup commands. -It will authenticate the user,retrieve a token from Galaxy, and store it +It will authenticate the user, retrieve a token from Galaxy, and store it in the user's home directory. USAGE @@ -236,7 +237,7 @@ click on Personal Access Token to create a token. OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -262,7 +263,7 @@ $ ansible-galaxy import [options] github_user github_repo OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -291,7 +292,7 @@ $ ansible-galaxy delete [options] github_user github_repo OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. @@ -323,7 +324,7 @@ $ ansible-galaxy setup [options] source github_user github_repo secret OPTIONS ~~~~~~~ -*-c*, *--ingore-certs*:: +*-c*, *--ignore-certs*:: Ignore TLS certificate errors. diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 200fdfd5750ba1..f8cde57e62c802 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -364,8 +364,8 @@ When you create your .travis.yml file add the following to cause Travis to notif section of your ansible.cfg file. -List Travis Integrtions -======================= +List Travis Integrations +======================== Use the --list option to display your Travis integrations: diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst index 0ad54938d08ead..ccfb456ed93590 100644 --- a/docsite/rst/intro_configuration.rst +++ b/docsite/rst/intro_configuration.rst @@ -906,10 +906,10 @@ The following options can be set in the [galaxy] section of ansible.cfg: server ====== -Override the default Galaxy server value of https://galaxy.ansible.com. +Override the default Galaxy server value of https://galaxy.ansible.com. Useful if you have a hosted version of the Galaxy web app or want to point to the testing site https://galaxy-qa.ansible.com. It does not work against private, hosted repos, which Galaxy can use for fetching and installing roles. ignore_certs ============ If set to *yes*, ansible-galaxy will not validate TLS certificates. Handy for testing against a server with a self-signed certificate -. \ No newline at end of file +. diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index a4a7b915f36a46..34afa03c9f7c2f 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -49,7 +49,7 @@ class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - VALID_ACTIONS = ("delete","import","info","init","install","list","login","remove","search","setup") + VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") def __init__(self, args): self.api = None From 95785f149d21badaf7cba35b4ffa7ed5805235d4 Mon Sep 17 00:00:00 2001 From: chouseknecht Date: Thu, 10 Dec 2015 21:44:03 -0500 Subject: [PATCH 3118/3617] Fix docs. The search command works with both galaxy.ansible.com and galaxy-qa.ansible.com. --- docsite/rst/galaxy.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index f8cde57e62c802..6d64a542b4af96 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -181,9 +181,7 @@ The search command will return a list of the first 1000 results matching your se .. note:: - The search command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access - `https://galaxy-qa.ansible.com `_. You can also add a *server* definition in the [galaxy] - section of your ansible.cfg file. + The format of results pictured here is new in Ansible 2.0. Get More Information About a Role --------------------------------- From 2bc3683d41b307611a03447e9d4b194ba6ef5c1c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 13 Dec 2015 05:54:57 -0800 Subject: [PATCH 3119/3617] Restore comment about for-else since it is an uncommon idiom --- lib/ansible/plugins/action/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 154404e474cf78..254bab476bb68f 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -119,7 +119,7 @@ def _configure_module(self, module_name, module_args, task_vars=None): module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type) if module_path: break - else: + else: # This is a for-else: http://bit.ly/1ElPkyg # Use Windows version of ping module to check module paths when # using a connection that supports .ps1 suffixes. We check specifically # for win_ping here, otherwise the code would look for ping.ps1 From 0c954bd14298a81be4c9026563326a87f9c42f58 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Sun, 13 Dec 2015 18:00:54 +0100 Subject: [PATCH 3120/3617] add --full flag to ansible-pull man page add --full flag that was added in #13502 --- docs/man/man1/ansible-pull.1.asciidoc.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index 333b8e34e0f308..0afba2aeaac4fb 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -95,6 +95,10 @@ Force running of playbook even if unable to update playbook repository. This can be useful, for example, to enforce run-time state when a network connection may not always be up or possible. +*--full*:: + +Do a full clone of the repository. By default ansible-pull will do a shallow clone based on the last revision. + *-h*, *--help*:: Show the help message and exit. From 89603a0509117610e2cbebc6c48475a3b8af98b2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 13 Dec 2015 12:18:28 -0500 Subject: [PATCH 3121/3617] added that ansible-pull is now shallow to changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bf11e6c5bc498..c6319634fb71ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -350,6 +350,7 @@ newline being stripped you can change your playbook like this: * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. +* ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. * Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: ``` From f8ff63f8c8ab001ea8f096968b550f23262c193c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 03:06:52 -0500 Subject: [PATCH 3122/3617] A few tweaks to improve new forking code --- lib/ansible/plugins/strategy/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index ea30b800b027d6..4047bde73a2f83 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -149,17 +149,20 @@ def _queue_task(self, host, task, task_vars, play_context): # way to share them with the forked processes shared_loader_obj = SharedPluginLoaderObj() + queued = False while True: (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker] if worker_prc is None or not worker_prc.is_alive(): worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj) self._workers[self._cur_worker][0] = worker_prc worker_prc.start() - break + queued = True self._cur_worker += 1 if self._cur_worker >= len(self._workers): self._cur_worker = 0 time.sleep(0.0001) + if queued: + break del task_vars self._pending_results += 1 @@ -196,7 +199,7 @@ def _process_pending_results(self, iterator, one_pass=False): else: iterator.mark_host_failed(host) (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True) - if state.run_state != PlayIterator.ITERATING_RESCUE: + if not state or state.run_state != PlayIterator.ITERATING_RESCUE: self._tqm._failed_hosts[host.name] = True self._tqm._stats.increment('failures', host.name) else: From 279c5a359631d296e1a91c1520417e68750138bb Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 03:07:20 -0500 Subject: [PATCH 3123/3617] Cleanup strategy tests broken by new forking strategy --- .../plugins/strategies/test_strategy_base.py | 127 +++++++++++------- 1 file changed, 76 insertions(+), 51 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index bf01cf6fcc269c..7cc81a0324efb8 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -24,8 +24,11 @@ from ansible.errors import AnsibleError, AnsibleParserError from ansible.plugins.strategy import StrategyBase +from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_queue_manager import TaskQueueManager from ansible.executor.task_result import TaskResult +from ansible.playbook.handler import Handler +from ansible.inventory.host import Host from six.moves import queue as Queue from units.mock.loader import DictDataLoader @@ -98,37 +101,44 @@ def test_strategy_base_get_hosts(self): mock_tqm._unreachable_hosts = ["host02"] self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:]) - def test_strategy_base_queue_task(self): - fake_loader = DictDataLoader() - - workers = [] - for i in range(0, 3): - worker_main_q = MagicMock() - worker_main_q.put.return_value = None - worker_result_q = MagicMock() - workers.append([i, worker_main_q, worker_result_q]) + @patch.object(WorkerProcess, 'run') + def test_strategy_base_queue_task(self, mock_worker): + def fake_run(self): + return - mock_tqm = MagicMock() - mock_tqm._final_q = MagicMock() - mock_tqm.get_workers.return_value = workers - mock_tqm.get_loader.return_value = fake_loader + mock_worker.run.side_effect = fake_run - strategy_base = StrategyBase(tqm=mock_tqm) - strategy_base._cur_worker = 0 - strategy_base._pending_results = 0 - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 1) - self.assertEqual(strategy_base._pending_results, 1) - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 2) - self.assertEqual(strategy_base._pending_results, 2) - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 0) - self.assertEqual(strategy_base._pending_results, 3) - workers[0][1].put.side_effect = EOFError - strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock()) - self.assertEqual(strategy_base._cur_worker, 1) - self.assertEqual(strategy_base._pending_results, 3) + fake_loader = DictDataLoader() + mock_var_manager = MagicMock() + mock_host = MagicMock() + mock_inventory = MagicMock() + mock_options = MagicMock() + mock_options.module_path = None + + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_manager, + loader=fake_loader, + options=mock_options, + passwords=None, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() + + try: + strategy_base = StrategyBase(tqm=tqm) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 1) + self.assertEqual(strategy_base._pending_results, 1) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 2) + self.assertEqual(strategy_base._pending_results, 2) + strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock()) + self.assertEqual(strategy_base._cur_worker, 0) + self.assertEqual(strategy_base._pending_results, 3) + finally: + tqm.cleanup() + def test_strategy_base_process_pending_results(self): mock_tqm = MagicMock() @@ -156,6 +166,7 @@ def _queue_get(*args, **kwargs): mock_iterator = MagicMock() mock_iterator.mark_host_failed.return_value = None + mock_iterator.get_next_task_for_host.return_value = (None, None) mock_host = MagicMock() mock_host.name = 'test01' @@ -315,22 +326,15 @@ def test_strategy_base_load_included_file(self): res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator) self.assertEqual(res, []) - def test_strategy_base_run_handlers(self): - workers = [] - for i in range(0, 3): - worker_main_q = MagicMock() - worker_main_q.put.return_value = None - worker_result_q = MagicMock() - workers.append([i, worker_main_q, worker_result_q]) - - mock_tqm = MagicMock() - mock_tqm._final_q = MagicMock() - mock_tqm.get_workers.return_value = workers - mock_tqm.send_callback.return_value = None - + @patch.object(WorkerProcess, 'run') + def test_strategy_base_run_handlers(self, mock_worker): + def fake_run(*args): + return + mock_worker.side_effect = fake_run mock_play_context = MagicMock() - mock_handler_task = MagicMock() + mock_handler_task = MagicMock(Handler) + mock_handler_task.action = 'foo' mock_handler_task.get_name.return_value = "test handler" mock_handler_task.has_triggered.return_value = False @@ -341,11 +345,9 @@ def test_strategy_base_run_handlers(self): mock_play = MagicMock() mock_play.handlers = [mock_handler] - mock_host = MagicMock() + mock_host = MagicMock(Host) mock_host.name = "test01" - mock_iterator = MagicMock() - mock_inventory = MagicMock() mock_inventory.get_hosts.return_value = [mock_host] @@ -355,8 +357,31 @@ def test_strategy_base_run_handlers(self): mock_iterator = MagicMock mock_iterator._play = mock_play - strategy_base = StrategyBase(tqm=mock_tqm) - strategy_base._inventory = mock_inventory - strategy_base._notified_handlers = {"test handler": [mock_host]} - - result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) + fake_loader = DictDataLoader() + mock_options = MagicMock() + mock_options.module_path = None + + tqm = TaskQueueManager( + inventory=mock_inventory, + variable_manager=mock_var_mgr, + loader=fake_loader, + options=mock_options, + passwords=None, + ) + tqm._initialize_processes(3) + tqm.hostvars = dict() + + try: + strategy_base = StrategyBase(tqm=tqm) + + strategy_base._inventory = mock_inventory + strategy_base._notified_handlers = {"test handler": [mock_host]} + + mock_return_task = MagicMock(Handler) + mock_return_host = MagicMock(Host) + task_result = TaskResult(mock_return_host, mock_return_task, dict(changed=False)) + tqm._final_q.put(('host_task_ok', task_result)) + + result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) + finally: + tqm.cleanup() From f5f9b2fd354fe013e68f589279cc349a42a461fb Mon Sep 17 00:00:00 2001 From: Hans-Joachim Kliemeck Date: Mon, 14 Dec 2015 14:36:35 +0100 Subject: [PATCH 3124/3617] use default settings from ansible.cfg --- lib/ansible/cli/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py index 012872be7c57ab..48e01346726659 100644 --- a/lib/ansible/cli/__init__.py +++ b/lib/ansible/cli/__init__.py @@ -246,7 +246,7 @@ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS) if vault_opts: - parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true', + parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true', help='ask for vault password') parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file', help="vault password file", action="callback", callback=CLI.expand_tilde, type=str) From 1f8e484b70f90d34d127eda9cf10a619bb0e72e8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 3 Dec 2015 07:07:13 -0800 Subject: [PATCH 3125/3617] Fix the refresh flag in openstack inventory Refresh will update the dogpile cache from shade, but doesn't cause the ansible side json cache to be invalidated. It's a simple oversight. --- contrib/inventory/openstack.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index 46b43e92212b0d..231488b06df589 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -94,9 +94,9 @@ def get_groups_from_server(server_vars): return groups -def get_host_groups(inventory): +def get_host_groups(inventory, refresh=False): (cache_file, cache_expiration_time) = get_cache_settings() - if is_cache_stale(cache_file, cache_expiration_time): + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): groups = to_json(get_host_groups_from_cloud(inventory)) open(cache_file, 'w').write(groups) else: @@ -121,8 +121,10 @@ def get_host_groups_from_cloud(inventory): return groups -def is_cache_stale(cache_file, cache_expiration_time): +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True if os.path.isfile(cache_file): mod_time = os.path.getmtime(cache_file) current_time = time.time() @@ -176,7 +178,7 @@ def main(): ) if args.list: - output = get_host_groups(inventory) + output = get_host_groups(inventory, refresh=args.refresh) elif args.host: output = to_json(inventory.get_host(args.host)) print(output) From 49dc9eea169efb329d7d184df53ce3dea4dface1 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Wed, 9 Dec 2015 15:11:21 -0500 Subject: [PATCH 3126/3617] add tests for encrypted hash mysql_user --- .../tasks/user_password_update_test.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index 50307cef9560e1..9a899b206ca187 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -79,8 +79,23 @@ - include: remove_user.yml user_name={{user_name_2}} user_password={{ user_password_1 }} +- name: Create user with password1234 using hash. (expect changed=true) + mysql_user: name=jmainguy password='*D65798AAC0E5C6DF3F320F8A30E026E7EBD73A95' encrypted=yes + register: encrypt_result +- name: Check that the module made a change + assert: + that: + - "encrypt_result.changed == True" +- name: See if the password needs to be updated. (expect changed=false) + mysql_user: name=jmainguy password='password1234' + register: plain_result +- name: Check that the module did not change the password + assert: + that: + - "plain_result.changed == False" - +- name: Remove user (cleanup) + mysql_user: name=jmainguy state=absent From 9f61144401a16c9d610193522c71e8852addf63e Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 3 Dec 2015 07:04:24 -0800 Subject: [PATCH 3127/3617] Optionally only use UUIDs for openstack hosts on duplicates The OpenStack inventory lists hostnames as the UUIDs because hostsnames are not guarnateed to be unique on OpenStack. However, for the common case, this is just confusing. The new behavior is a visible change, so make it an opt-in via config. Only turn the hostnames to UUIDs if there are duplicate hostnames. --- contrib/inventory/openstack.py | 57 +++++++++++++++++++++++++++------ contrib/inventory/openstack.yml | 3 ++ 2 files changed, 50 insertions(+), 10 deletions(-) diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py index 231488b06df589..b82a042c29e16e 100755 --- a/contrib/inventory/openstack.py +++ b/contrib/inventory/openstack.py @@ -32,6 +32,13 @@ # all of them and present them as one contiguous inventory. # # See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server import argparse import collections @@ -51,7 +58,7 @@ CONFIG_FILES = ['/etc/ansible/openstack.yaml'] -def get_groups_from_server(server_vars): +def get_groups_from_server(server_vars, namegroup=True): groups = [] region = server_vars['region'] @@ -76,7 +83,8 @@ def get_groups_from_server(server_vars): groups.append(extra_group) groups.append('instance-%s' % server_vars['id']) - groups.append(server_vars['name']) + if namegroup: + groups.append(server_vars['name']) for key in ('flavor', 'image'): if 'name' in server_vars[key]: @@ -106,17 +114,36 @@ def get_host_groups(inventory, refresh=False): def get_host_groups_from_cloud(inventory): groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) hostvars = {} - for server in inventory.list_hosts(): + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): if 'interface_ip' not in server: continue - for group in get_groups_from_server(server): - groups[group].append(server['id']) - hostvars[server['id']] = dict( - ansible_ssh_host=server['interface_ip'], - openstack=server, - ) + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + server = servers[0] + hostvars[name] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=False): + groups[group].append(server['name']) + else: + for server in servers: + server_id = server['id'] + hostvars[server_id] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=True): + groups[group].append(server_id) groups['_meta'] = {'hostvars': hostvars} return groups @@ -171,11 +198,21 @@ def main(): try: config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES shade.simple_logging(debug=args.debug) - inventory = shade.inventory.OpenStackInventory( + inventory_args = dict( refresh=args.refresh, config_files=config_files, private=args.private, ) + if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + } + )) + + inventory = shade.inventory.OpenStackInventory(**inventory_args) if args.list: output = get_host_groups(inventory, refresh=args.refresh) diff --git a/contrib/inventory/openstack.yml b/contrib/inventory/openstack.yml index a99bb020580754..1520e2937ec966 100644 --- a/contrib/inventory/openstack.yml +++ b/contrib/inventory/openstack.yml @@ -26,3 +26,6 @@ clouds: username: stack password: stack project_name: stack +ansible: + use_hostnames: True + expand_hostvars: False From 6312e38133e79674910b2cb8c1b1aa695c6816fc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 10:35:38 -0500 Subject: [PATCH 3128/3617] Fixing up some non-py3 things for unit tests --- lib/ansible/executor/task_queue_manager.py | 2 +- lib/ansible/module_utils/known_hosts.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py index 9189ab95819925..dae70a1292545a 100644 --- a/lib/ansible/executor/task_queue_manager.py +++ b/lib/ansible/executor/task_queue_manager.py @@ -99,7 +99,7 @@ def __init__(self, inventory, variable_manager, loader, options, passwords, stdo def _initialize_processes(self, num): self._workers = [] - for i in xrange(num): + for i in range(num): main_q = multiprocessing.Queue() rslt_q = multiprocessing.Queue() self._workers.append([None, main_q, rslt_q]) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index d2644d97666cee..2824836650ade1 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -169,7 +169,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False): if not os.path.exists(user_ssh_dir): if create_dir: try: - os.makedirs(user_ssh_dir, 0700) + os.makedirs(user_ssh_dir, 0o700) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: From 80d23d639c2351ab6d0951763ca101516f0f2eb7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 14 Dec 2015 10:43:30 -0500 Subject: [PATCH 3129/3617] Use an octal representation that works from 2.4->3+ for known_hosts --- lib/ansible/module_utils/known_hosts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 2824836650ade1..9b6af2a28e907a 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -169,7 +169,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False): if not os.path.exists(user_ssh_dir): if create_dir: try: - os.makedirs(user_ssh_dir, 0o700) + os.makedirs(user_ssh_dir, int('700', 8)) except: module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) else: From c9eb41109f83358d8d968457728996f60b30b933 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 08:03:56 -0800 Subject: [PATCH 3130/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 0d23b3df526875..e6b7b17326b4c9 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 0d23b3df526875c8fc6edf94268f3aa850ec05f1 +Subproject commit e6b7b17326b4c9d11501112270c52ae25955938a diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 51813e003331c3..f3251de29cb106 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 51813e003331c3341b07c5cda33346cada537a3b +Subproject commit f3251de29cb10664b2c63a0021530c3fe34111a3 From 457f86f61a3bef95b562dbf91b523c563bff2f63 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 08:50:37 -0800 Subject: [PATCH 3131/3617] Minor: Correct type pyhton => python --- test/integration/roles/test_docker/tasks/main.yml | 2 +- test/units/plugins/cache/test_cache.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml index 2ea15644d5f847..76b3fa7070209f 100644 --- a/test/integration/roles/test_docker/tasks/main.yml +++ b/test/integration/roles/test_docker/tasks/main.yml @@ -3,7 +3,7 @@ #- include: docker-setup-rht.yml # Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when # they've got that sorted out - # CentOS 6 currently broken by conflicting files in pyhton-backports and python-backports-ssl_match_hostname + # CentOS 6 currently broken by conflicting files in python-backports and python-backports-ssl_match_hostname #when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6 # python-docker isn't available until 14.10. Revist at the next Ubuntu LTS diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py index af1d924910dc96..0547ba55bf0126 100644 --- a/test/units/plugins/cache/test_cache.py +++ b/test/units/plugins/cache/test_cache.py @@ -110,6 +110,6 @@ def test_memcached_cachemodule(self): def test_memory_cachemodule(self): self.assertIsInstance(MemoryCache(), MemoryCache) - @unittest.skipUnless(HAVE_REDIS, 'Redis pyhton module not installed') + @unittest.skipUnless(HAVE_REDIS, 'Redis python module not installed') def test_redis_cachemodule(self): self.assertIsInstance(RedisCache(), RedisCache) From e595c501976d5f378414dec90543151d7319253b Mon Sep 17 00:00:00 2001 From: gp Date: Mon, 14 Dec 2015 12:06:35 -0500 Subject: [PATCH 3132/3617] Fix typo in galaxy.rst Fix typo --- docsite/rst/galaxy.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index 783ac15e456a76..c9dea27336788f 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -126,7 +126,7 @@ The above will create the following directory structure in the current working d :: README.md - .travsis.yml + .travis.yml defaults/ main.yml files/ From a7ac98262d94cc24a584b8e163cebc0a2a492cd6 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 12 Dec 2015 20:18:36 +0100 Subject: [PATCH 3133/3617] Make module_utils.known_hosts.get_fqdn work on ipv6 --- lib/ansible/module_utils/known_hosts.py | 16 +++++++++------- .../units/module_utils/basic/test_known_hosts.py | 8 ++++++++ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 9b6af2a28e907a..64ad0c76c2b350 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -74,12 +74,12 @@ def get_fqdn(repo_url): if "@" in repo_url and "://" not in repo_url: # most likely an user@host:path or user@host/path type URL repo_url = repo_url.split("@", 1)[1] - if ":" in repo_url: - repo_url = repo_url.split(":")[0] - result = repo_url + if repo_url.startswith('['): + result = repo_url.split(']', 1)[0] + ']' + elif ":" in repo_url: + result = repo_url.split(":")[0] elif "/" in repo_url: - repo_url = repo_url.split("/")[0] - result = repo_url + result = repo_url.split("/")[0] elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse.urlparse(repo_url) @@ -87,11 +87,13 @@ def get_fqdn(repo_url): # ensure we actually have a parts[1] before continuing. if parts[1] != '': result = parts[1] - if ":" in result: - result = result.split(":")[0] if "@" in result: result = result.split("@", 1)[1] + if result[0].startswith('['): + result = result.split(']', 1)[0] + ']' + elif ":" in result: + result = result.split(":")[0] return result def check_hostkey(module, fqdn): diff --git a/test/units/module_utils/basic/test_known_hosts.py b/test/units/module_utils/basic/test_known_hosts.py index 952184bfec9f4c..515d67686defc2 100644 --- a/test/units/module_utils/basic/test_known_hosts.py +++ b/test/units/module_utils/basic/test_known_hosts.py @@ -33,6 +33,14 @@ class TestAnsibleModuleKnownHosts(unittest.TestCase): {'is_ssh_url': True, 'get_fqdn': 'five.example.org'}, 'ssh://six.example.org:21/example.org': {'is_ssh_url': True, 'get_fqdn': 'six.example.org'}, + 'ssh://[2001:DB8::abcd:abcd]/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'ssh://[2001:DB8::abcd:abcd]:22/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'username@[2001:DB8::abcd:abcd]/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, + 'username@[2001:DB8::abcd:abcd]:22/example.git': + {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'}, } def test_is_ssh_url(self): From 8d16638fec3e88e0f7b0dde24aae095100436644 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 10:54:10 -0800 Subject: [PATCH 3134/3617] Fix for template module not creating a file that was not present when force=false --- lib/ansible/plugins/action/template.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py index 109f3e80c0bba7..d134f80a8df434 100644 --- a/lib/ansible/plugins/action/template.py +++ b/lib/ansible/plugins/action/template.py @@ -150,7 +150,7 @@ def run(self, tmp=None, task_vars=None): diff = {} new_module_args = self._task.args.copy() - if force and local_checksum != remote_checksum: + if (remote_checksum == '1') or (force and local_checksum != remote_checksum): result['changed'] = True # if showing diffs, we need to get the remote value From 27cd7668c152c5b2b74a10ffe78bfca7a11aeaac Mon Sep 17 00:00:00 2001 From: Peter Sprygada Date: Tue, 8 Dec 2015 07:34:09 -0500 Subject: [PATCH 3135/3617] the ssh shared module will try to use keys if the password is not supplied The current ssh shared module forces only password based authentication. This change will allow the ssh module to use keys if a password is not provided. --- lib/ansible/module_utils/ssh.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/ssh.py b/lib/ansible/module_utils/ssh.py index 343f017a988509..00922ef8cdd8ba 100644 --- a/lib/ansible/module_utils/ssh.py +++ b/lib/ansible/module_utils/ssh.py @@ -91,12 +91,17 @@ class Ssh(object): def __init__(self): self.client = None - def open(self, host, port=22, username=None, password=None, timeout=10): + def open(self, host, port=22, username=None, password=None, + timeout=10, key_filename=None): + ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + use_keys = password is None + ssh.connect(host, port=port, username=username, password=password, - timeout=timeout, allow_agent=False, look_for_keys=False) + timeout=timeout, allow_agent=use_keys, look_for_keys=use_keys, + key_filename=key_filename) self.client = ssh return self.on_open() From be4d1f9ee380705768574baefb75830e3c76afa2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Tue, 15 Dec 2015 12:49:20 +0100 Subject: [PATCH 3136/3617] Fix a part of python 3 tests (make tests-py3, see https://github.com/ansible/ansible/issues/13553 for more details). --- lib/ansible/module_utils/known_hosts.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py index 64ad0c76c2b350..52b0bb74b0f7cb 100644 --- a/lib/ansible/module_utils/known_hosts.py +++ b/lib/ansible/module_utils/known_hosts.py @@ -28,7 +28,11 @@ import os import hmac -import urlparse + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse try: from hashlib import sha1 From a0842781a6a77a0e51ad411ab186395379cc4dcb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 15 Dec 2015 08:44:43 -0500 Subject: [PATCH 3137/3617] renamed ssh.py shared module file to clarify --- lib/ansible/module_utils/{ssh.py => issh.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename lib/ansible/module_utils/{ssh.py => issh.py} (100%) diff --git a/lib/ansible/module_utils/ssh.py b/lib/ansible/module_utils/issh.py similarity index 100% rename from lib/ansible/module_utils/ssh.py rename to lib/ansible/module_utils/issh.py From be5488cb60869c67b0ea521a4044062157817e50 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 15 Dec 2015 09:27:53 -0500 Subject: [PATCH 3138/3617] clean debug output to match prev versions --- lib/ansible/plugins/callback/__init__.py | 6 ++++++ lib/ansible/plugins/callback/default.py | 1 + lib/ansible/plugins/callback/minimal.py | 1 + 3 files changed, 8 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index b8a48943f28661..7371fe0a51e8d5 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -140,6 +140,12 @@ def _process_items(self, result): else: self.v2_playbook_item_on_ok(newres) + def _clean_results(self, result, task_name): + if 'changed' in result and task_name in ['debug']: + del result['changed'] + if 'invocation' in result and task_name in ['debug']: + del result['invocation'] + def set_play_context(self, play_context): pass diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 1f37f4b975e0f9..e515945bba516b 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -62,6 +62,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): def v2_runner_on_ok(self, result): + self._clean_results(result._result, result._task.action) delegated_vars = result._result.get('_ansible_delegated_vars', None) if result._task.action == 'include': return diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index f855c1a6e53973..71f9f5dfeef01b 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -64,6 +64,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red') def v2_runner_on_ok(self, result): + self._clean_results(result._result, result._task.action) if result._task.action in C.MODULE_NO_JSON: self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green') else: From fcc9258b743d2f596628f28dd4cdc01f0f8d306e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 01:48:22 -0500 Subject: [PATCH 3139/3617] Use the original host rather than the serialized one when processing results Fixes #13526 Fixes #13564 Fixes #13566 --- lib/ansible/plugins/strategy/__init__.py | 25 +++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 4047bde73a2f83..d2d79d036bdbed 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -185,10 +185,20 @@ def _process_pending_results(self, iterator, one_pass=False): result = self._final_q.get() display.debug("got result from result worker: %s" % ([text_type(x) for x in result],)) + # helper method, used to find the original host from the one + # returned in the result/message, which has been serialized and + # thus had some information stripped from it to speed up the + # serialization process + def get_original_host(host): + if host.name in self._inventory._hosts_cache: + return self._inventory._hosts_cache[host.name] + else: + return self._inventory.get_host(host.name) + # all host status messages contain 2 entries: (msg, task_result) if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'): task_result = result[1] - host = task_result._host + host = get_original_host(task_result._host) task = task_result._task if result[0] == 'host_task_failed' or task_result.is_failed(): if not task.ignore_errors: @@ -244,7 +254,7 @@ def _process_pending_results(self, iterator, one_pass=False): self._add_host(new_host_info, iterator) elif result[0] == 'add_group': - host = result[1] + host = get_original_host(result[1]) result_item = result[2] self._add_group(host, result_item) @@ -252,19 +262,20 @@ def _process_pending_results(self, iterator, one_pass=False): task_result = result[1] handler_name = result[2] - original_task = iterator.get_original_task(task_result._host, task_result._task) + original_host = get_original_host(task_result._host) + original_task = iterator.get_original_task(original_host, task_result._task) if handler_name not in self._notified_handlers: self._notified_handlers[handler_name] = [] - if task_result._host not in self._notified_handlers[handler_name]: - self._notified_handlers[handler_name].append(task_result._host) + if original_host not in self._notified_handlers[handler_name]: + self._notified_handlers[handler_name].append(original_host) display.vv("NOTIFIED HANDLER %s" % (handler_name,)) elif result[0] == 'register_host_var': # essentially the same as 'set_host_var' below, however we # never follow the delegate_to value for registered vars and # the variable goes in the fact_cache - host = result[1] + host = get_original_host(result[1]) task = result[2] var_value = wrap_var(result[3]) var_name = task.register @@ -278,7 +289,7 @@ def _process_pending_results(self, iterator, one_pass=False): self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value}) elif result[0] in ('set_host_var', 'set_host_facts'): - host = result[1] + host = get_original_host(result[1]) task = result[2] item = result[3] From 9942d71d345cf221dbcdb19f362d80430d995905 Mon Sep 17 00:00:00 2001 From: Matt Clay Date: Wed, 16 Dec 2015 01:37:02 -0800 Subject: [PATCH 3140/3617] Test for filename option in apt_repository module. --- .../roles/test_apt_repository/tasks/apt.yml | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/test/integration/roles/test_apt_repository/tasks/apt.yml b/test/integration/roles/test_apt_repository/tasks/apt.yml index 49d13bc52a3940..9c8e3ab44737dc 100644 --- a/test/integration/roles/test_apt_repository/tasks/apt.yml +++ b/test/integration/roles/test_apt_repository/tasks/apt.yml @@ -2,6 +2,7 @@ - set_fact: test_ppa_name: 'ppa:menulibre-dev/devel' + test_ppa_filename: 'menulibre-dev' test_ppa_spec: 'deb http://ppa.launchpad.net/menulibre-dev/devel/ubuntu {{ansible_distribution_release}} main' test_ppa_key: 'A7AD98A1' # http://keyserver.ubuntu.com:11371/pks/lookup?search=0xD06AAF4C11DAB86DF421421EFE6B20ECA7AD98A1&op=index @@ -144,6 +145,47 @@ - name: 'ensure ppa key is absent (expect: pass)' apt_key: id='{{test_ppa_key}}' state=absent +# +# TEST: apt_repository: repo= filename= +# +- include: 'cleanup.yml' + +- name: 'record apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_before + +- name: 'name= filename= (expect: pass)' + apt_repository: repo='{{test_ppa_spec}}' filename='{{test_ppa_filename}}' state=present + register: result + +- assert: + that: + - 'result.changed' + - 'result.state == "present"' + - 'result.repo == "{{test_ppa_spec}}"' + +- name: 'examine source file' + stat: path='/etc/apt/sources.list.d/{{test_ppa_filename}}.list' + register: source_file + +- name: 'assert source file exists' + assert: + that: + - 'source_file.stat.exists == True' + +- name: 'examine apt cache mtime' + stat: path='/var/cache/apt/pkgcache.bin' + register: cache_after + +- name: 'assert the apt cache did change' + assert: + that: + - 'cache_before.stat.mtime != cache_after.stat.mtime' + +# When installing a repo with the spec, the key is *NOT* added +- name: 'ensure ppa key is absent (expect: pass)' + apt_key: id='{{test_ppa_key}}' state=absent + # # TEARDOWN # From 63b624707d0bcb057cec7c81d86b511106cba512 Mon Sep 17 00:00:00 2001 From: David Date: Wed, 16 Dec 2015 23:46:06 +0800 Subject: [PATCH 3141/3617] Fix typo --- docsite/rst/playbooks_roles.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index 516403ac805227..c6c01db5d484b0 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -132,7 +132,7 @@ Note that you cannot do variable substitution when including one playbook inside another. .. note:: - You can not conditionally path the location to an include file, + You can not conditionally pass the location to an include file, like you can with 'vars_files'. If you find yourself needing to do this, consider how you can restructure your playbook to be more class/role oriented. This is to say you cannot use a 'fact' to From 73ead4fbbadb8ad874f95f0dd542256b2ad730aa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 14 Dec 2015 20:05:55 -0800 Subject: [PATCH 3142/3617] First attempt to fix https certificate errors through a proxy with python-2.7.9+ Fixes #12549 --- lib/ansible/module_utils/urls.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 979d5943dde27f..0f45c360349b39 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -326,11 +326,15 @@ def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) else: sock = socket.create_connection((self.host, self.port), self.timeout) + + server_hostname = self.host if self._tunnel_host: self.sock = sock self._tunnel() + server_hostname = self._tunnel_host + if HAS_SSLCONTEXT: - self.sock = self.context.wrap_socket(sock, server_hostname=self.host) + self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) else: self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) @@ -542,7 +546,7 @@ def http_request(self, req): connect_result = s.recv(4096) self.validate_proxy_response(connect_result) if context: - ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname')) + ssl_s = context.wrap_socket(s, server_hostname=self.hostname) else: ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL) match_hostname(ssl_s.getpeercert(), self.hostname) From 72a0654b81aec47e9fa989ba8c1d50a55a093f6f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 15 Dec 2015 15:35:13 -0800 Subject: [PATCH 3143/3617] Fixes for proxy on RHEL5 --- lib/ansible/module_utils/urls.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index 0f45c360349b39..d0ee260e17f26c 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -328,6 +328,8 @@ def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) server_hostname = self.host + # Note: self._tunnel_host is not available on py < 2.6 but this code + # isn't used on py < 2.6 (lack of create_connection) if self._tunnel_host: self.sock = sock self._tunnel() @@ -377,7 +379,10 @@ def generic_urlparse(parts): # get the username, password, etc. try: netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$') - (auth, hostname, port) = netloc_re.match(parts[1]) + match = netloc_re.match(parts[1]) + auth = match.group(1) + hostname = match.group(2) + port = match.group(3) if port: # the capture group for the port will include the ':', # so remove it and convert the port to an integer @@ -387,6 +392,8 @@ def generic_urlparse(parts): # and then split it up based on the first ':' found auth = auth[:-1] username, password = auth.split(':', 1) + else: + username = password = None generic_parts['username'] = username generic_parts['password'] = password generic_parts['hostname'] = hostname @@ -394,7 +401,7 @@ def generic_urlparse(parts): except: generic_parts['username'] = None generic_parts['password'] = None - generic_parts['hostname'] = None + generic_parts['hostname'] = parts[1] generic_parts['port'] = None return generic_parts @@ -536,7 +543,8 @@ def http_request(self, req): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if https_proxy: proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy)) - s.connect((proxy_parts.get('hostname'), proxy_parts.get('port'))) + port = proxy_parts.get('port') or 443 + s.connect((proxy_parts.get('hostname'), port)) if proxy_parts.get('scheme') == 'http': s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port)) if proxy_parts.get('username'): From 33863eb653f3ed4d6f30ab816743443f473c5eae Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 07:38:51 -0800 Subject: [PATCH 3144/3617] Conditionally create the CustomHTTPSConnection class only if we have the required baseclasses. Fixes #11918 --- lib/ansible/module_utils/urls.py | 74 +++++++++++++++++--------------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py index d0ee260e17f26c..41613f6cb61ab4 100644 --- a/lib/ansible/module_utils/urls.py +++ b/lib/ansible/module_utils/urls.py @@ -310,42 +310,45 @@ class NoSSLError(SSLValidationError): """Needed to connect to an HTTPS url but no ssl library available to verify the certificate""" pass +# Some environments (Google Compute Engine's CoreOS deploys) do not compile +# against openssl and thus do not have any HTTPS support. +CustomHTTPSConnection = CustomHTTPSHandler = None +if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib2, 'HTTPSHandler'): + class CustomHTTPSConnection(httplib.HTTPSConnection): + def __init__(self, *args, **kwargs): + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + if HAS_SSLCONTEXT: + self.context = create_default_context() + if self.cert_file: + self.context.load_cert_chain(self.cert_file, self.key_file) + + def connect(self): + "Connect to a host on a given (SSL) port." + + if hasattr(self, 'source_address'): + sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) + else: + sock = socket.create_connection((self.host, self.port), self.timeout) + + server_hostname = self.host + # Note: self._tunnel_host is not available on py < 2.6 but this code + # isn't used on py < 2.6 (lack of create_connection) + if self._tunnel_host: + self.sock = sock + self._tunnel() + server_hostname = self._tunnel_host + + if HAS_SSLCONTEXT: + self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) + else: + self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) -class CustomHTTPSConnection(httplib.HTTPSConnection): - def __init__(self, *args, **kwargs): - httplib.HTTPSConnection.__init__(self, *args, **kwargs) - if HAS_SSLCONTEXT: - self.context = create_default_context() - if self.cert_file: - self.context.load_cert_chain(self.cert_file, self.key_file) - - def connect(self): - "Connect to a host on a given (SSL) port." - - if hasattr(self, 'source_address'): - sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address) - else: - sock = socket.create_connection((self.host, self.port), self.timeout) - - server_hostname = self.host - # Note: self._tunnel_host is not available on py < 2.6 but this code - # isn't used on py < 2.6 (lack of create_connection) - if self._tunnel_host: - self.sock = sock - self._tunnel() - server_hostname = self._tunnel_host - - if HAS_SSLCONTEXT: - self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname) - else: - self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL) - -class CustomHTTPSHandler(urllib2.HTTPSHandler): + class CustomHTTPSHandler(urllib2.HTTPSHandler): - def https_open(self, req): - return self.do_open(CustomHTTPSConnection, req) + def https_open(self, req): + return self.do_open(CustomHTTPSConnection, req) - https_request = urllib2.AbstractHTTPHandler.do_request_ + https_request = urllib2.AbstractHTTPHandler.do_request_ def generic_urlparse(parts): ''' @@ -673,8 +676,9 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True, handlers.append(proxyhandler) # pre-2.6 versions of python cannot use the custom https - # handler, since the socket class is lacking this method - if hasattr(socket, 'create_connection'): + # handler, since the socket class is lacking create_connection. + # Some python builds lack HTTPS support. + if hasattr(socket, 'create_connection') and CustomHTTPSHandler: handlers.append(CustomHTTPSHandler) opener = urllib2.build_opener(*handlers) From 0095d04af9712c0c026b29e45dbe57a70e30f1e0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 08:02:46 -0800 Subject: [PATCH 3145/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index e6b7b17326b4c9..50e7bff554647c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit e6b7b17326b4c9d11501112270c52ae25955938a +Subproject commit 50e7bff554647ccd8a34729171420e72b3a00c61 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f3251de29cb106..bde5686552fdd8 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f3251de29cb10664b2c63a0021530c3fe34111a3 +Subproject commit bde5686552fdd88a758c7197b2eebe98b1afbf07 From 6a252a3f7727649c61c007e73f04201fd6fbdfa8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 11:21:19 -0500 Subject: [PATCH 3146/3617] Preserve the cumulative path for checking includes which have parents Otherwise, each relative include path is checked on its own, rather than in relation to the (possibly relative) path of its parent, meaning includes multiple level deep may fail to find the correct (or any) file. Fixes #13472 --- lib/ansible/playbook/included_file.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index b7c0fb81756bba..7fb851a12afcad 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -81,14 +81,19 @@ def process_include_results(results, tqm, iterator, loader, variable_manager): # handle relative includes by walking up the list of parent include # tasks and checking the relative result to see if it exists parent_include = original_task._task_include + cumulative_path = None while parent_include is not None: parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params'))) + if cumulative_path is None: + cumulative_path = parent_include_dir + elif not os.path.isabs(cumulative_path): + cumulative_path = os.path.join(parent_include_dir, cumulative_path) include_target = templar.template(include_result['include']) if original_task._role: - new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir) + new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path) include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target) else: - include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target) + include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target) if os.path.exists(include_file): break From 375eb501b3b1edf7fd91807374edfcd60ca736b8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 09:40:01 -0800 Subject: [PATCH 3147/3617] Update url to site that has an invalid certificate --- test/integration/roles/test_get_url/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 6e3842f6abf373..09ee34277a04aa 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -28,7 +28,7 @@ - name: test https fetch to a site with mismatched hostname and certificate get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result @@ -46,7 +46,7 @@ - name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result From 34e88e48a567d52e3ed0c3ecb6a5aa578e53dd19 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 16 Nov 2015 22:08:15 -0500 Subject: [PATCH 3148/3617] Add shared connection code for mysql modules --- lib/ansible/module_utils/mysql.py | 66 +++++++++++++++ .../utils/module_docs_fragments/mysql.py | 84 +++++++++++++++++++ .../tasks/user_password_update_test.yml | 1 - .../tasks/assert_fail_msg.yml | 2 - 4 files changed, 150 insertions(+), 3 deletions(-) create mode 100644 lib/ansible/module_utils/mysql.py create mode 100644 lib/ansible/utils/module_docs_fragments/mysql.py diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py new file mode 100644 index 00000000000000..48e00adfd9cfdf --- /dev/null +++ b/lib/ansible/module_utils/mysql.py @@ -0,0 +1,66 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Jonathan Mainguy , 2015 +# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, +# are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None): + config = { + 'host': module.params['login_host'], + 'ssl': { + } + } + + if module.params['login_unix_socket']: + config['unix_socket'] = module.params['login_unix_socket'] + else: + config['port'] = module.params['login_port'] + + if os.path.exists(config_file): + config['read_default_file'] = config_file + + # If login_user or login_password are given, they should override the + # config file + if login_user is not None: + config['user'] = login_user + if login_password is not None: + config['passwd'] = login_password + if ssl_cert is not None: + config['ssl']['cert'] = ssl_cert + if ssl_key is not None: + config['ssl']['key'] = ssl_key + if ssl_ca is not None: + config['ssl']['ca'] = ssl_ca + if db is not None: + config['db'] = db + + db_connection = MySQLdb.connect(**config) + if cursor_class is not None: + return db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) + else: + return db_connection.cursor() diff --git a/lib/ansible/utils/module_docs_fragments/mysql.py b/lib/ansible/utils/module_docs_fragments/mysql.py new file mode 100644 index 00000000000000..5dd1e04f93b3d4 --- /dev/null +++ b/lib/ansible/utils/module_docs_fragments/mysql.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015 Jonathan Mainguy +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +class ModuleDocFragment(object): + + # Standard mysql documentation fragment + DOCUMENTATION = ''' +options: + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - Host running the database + required: false + default: localhost + login_port: + description: + - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used + required: false + default: 3306 + login_unix_socket: + description: + - The path to a Unix domain socket for local connections + required: false + default: null + config_file: + description: + - Specify a config file from which user and password are to be read + required: false + default: '~/.my.cnf' + version_added: "2.0" + ssl_ca: + required: false + default: null + version_added: "2.0" + description: + - The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate as used by the server. + ssl_cert: + required: false + default: null + version_added: "2.0" + description: + - The path to a client public key certificate. + ssl_key: + required: false + default: null + version_added: "2.0" + description: + - The path to the client private key. +requirements: + - MySQLdb +notes: + - Requires the MySQLdb Python package on the remote host. For Ubuntu, this + is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this + is as easy as yum install MySQL-python. (See M(yum).) + - Both C(login_password) and C(login_user) are required when you are + passing credentials. If none are present, the module will attempt to read + the credentials from C(~/.my.cnf), and finally fall back to using the MySQL + default login of 'root' with no password. +''' diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml index 50307cef9560e1..904165c33ec6d8 100644 --- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml +++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml @@ -63,7 +63,6 @@ assert: that: - "result.failed == true" - - "'check login credentials (login_user, and login_password' in result.msg" - name: create database using user2 and new password mysql_db: name={{ db_name }} state=present login_user={{ user_name_2 }} login_password={{ user_password_1 }} diff --git a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml index 70aa26856eded2..ba51b9d67cbc0f 100644 --- a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml +++ b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml @@ -23,5 +23,3 @@ assert: that: - "output.failed == true" - - "'{{msg}}' in output.msg" - From 851c0058b148ce041af5ca5c9fbdf25ff854cf8f Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Wed, 16 Dec 2015 12:45:05 -0600 Subject: [PATCH 3149/3617] Removing yaml support for path: --- docsite/rst/galaxy.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst index c9dea27336788f..f4ca16cb8f15b8 100644 --- a/docsite/rst/galaxy.rst +++ b/docsite/rst/galaxy.rst @@ -73,10 +73,6 @@ And here's an example showing some specific version downloads from multiple sour # from GitHub - src: https://github.com/bennojoy/nginx - # from GitHub installing to a relative path - - src: https://github.com/bennojoy/nginx - path: vagrant/roles/ - # from GitHub, overriding the name and specifying a specific tag - src: https://github.com/bennojoy/nginx version: master @@ -98,7 +94,6 @@ And here's an example showing some specific version downloads from multiple sour - src: git@gitlab.company.com:mygroup/ansible-base.git scm: git version: 0.1.0 - path: roles/ As you can see in the above, there are a large amount of controls available to customize where roles can be pulled from, and what to save roles as. From 6109f703970d741df6e2e28e750667f5d0083fda Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 13:56:55 -0500 Subject: [PATCH 3150/3617] Attempt at fixing strategy unit test failures on py2.6 and py3 --- test/units/plugins/strategies/test_strategy_base.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 7cc81a0324efb8..53e243f926be67 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -377,9 +377,7 @@ def fake_run(*args): strategy_base._inventory = mock_inventory strategy_base._notified_handlers = {"test handler": [mock_host]} - mock_return_task = MagicMock(Handler) - mock_return_host = MagicMock(Host) - task_result = TaskResult(mock_return_host, mock_return_task, dict(changed=False)) + task_result = TaskResult(Host('host01'), Handler(), dict(changed=False)) tqm._final_q.put(('host_task_ok', task_result)) result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context) From 9724117bbb6c09a4d6d2e1f6573e69db697bdcc7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 11:15:39 -0800 Subject: [PATCH 3151/3617] Update submodule refs for mysql refactor --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 50e7bff554647c..3c48320b295c3b 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 50e7bff554647ccd8a34729171420e72b3a00c61 +Subproject commit 3c48320b295c3b4f99caccdc5f173b224109a393 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index bde5686552fdd8..8ec4f95ffd6d4e 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit bde5686552fdd88a758c7197b2eebe98b1afbf07 +Subproject commit 8ec4f95ffd6d4e837cf0f3dd28649fb09afd0caf From baece499dfb6a8d8556db2b686d4f3c86d1d25b1 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 16 Dec 2015 11:47:12 -0800 Subject: [PATCH 3152/3617] fix plugin loading for Windows modules force plugin loader to only consider .py files, since that's the only place docs can live ATM... --- lib/ansible/cli/doc.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py index a17164eb50ed8d..265b1c9a3fc59f 100644 --- a/lib/ansible/cli/doc.py +++ b/lib/ansible/cli/doc.py @@ -90,7 +90,8 @@ def run(self): for module in self.args: try: - filename = module_loader.find_plugin(module) + # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs + filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader))) continue @@ -167,7 +168,8 @@ def get_module_list_text(self): if module in module_docs.BLACKLIST_MODULES: continue - filename = module_loader.find_plugin(module) + # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs + filename = module_loader.find_plugin(module, mod_type='.py') if filename is None: continue From 491fd754f1cbe1944b0f45690842fd49b5977775 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 16:35:56 -0500 Subject: [PATCH 3153/3617] Updating the porting guide to note the complex args/bare vars change Related to #13518 --- docsite/rst/porting_guide_2.0.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst index 8d69ecd4403c84..543be052bdc6b4 100644 --- a/docsite/rst/porting_guide_2.0.rst +++ b/docsite/rst/porting_guide_2.0.rst @@ -55,6 +55,24 @@ uses key=value escaping which has not changed. The other option is to check for # Output "msg": "Testing some things" +* When specifying complex args as a variable, the variable must use the full jinja2 + variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. + In fact, even specifying args with variables has been deprecated, and will not be + allowed in future versions:: + + --- + - hosts: localhost + connection: local + gather_facts: false + vars: + my_dirs: + - { path: /tmp/3a, state: directory, mode: 0755 } + - { path: /tmp/3b, state: directory, mode: 0700 } + tasks: + - file: + args: "{{item}}" # <- args here uses the full variable syntax + with_items: my_dirs + * porting task includes * More dynamic. Corner-case formats that were not supposed to work now do not, as expected. * variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324 From 8716bf8021800a18cb8d6cfea3f296ba4f834692 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 16:32:06 -0500 Subject: [PATCH 3154/3617] All variables in complex args again Also updates the CHANGELOG to note the slight change, where bare variables in args are no longer allowed to be bare variables Fixes #13518 --- CHANGELOG.md | 20 ++++++++++++++++++++ lib/ansible/parsing/mod_args.py | 11 ++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c6319634fb71ae..005171ec9a95b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,26 @@ newline being stripped you can change your playbook like this: "msg": "Testing some things" ``` +* When specifying complex args as a variable, the variable must use the full jinja2 +variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. +In fact, even specifying args with variables has been deprecated, and will not be +allowed in future versions: + + ``` + --- + - hosts: localhost + connection: local + gather_facts: false + vars: + my_dirs: + - { path: /tmp/3a, state: directory, mode: 0755 } + - { path: /tmp/3b, state: directory, mode: 0700 } + tasks: + - file: + args: "{{item}}" + with_items: my_dirs + ``` + ###Plugins * Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py index abc35a415e345b..86b2d0d996dd3b 100644 --- a/lib/ansible/parsing/mod_args.py +++ b/lib/ansible/parsing/mod_args.py @@ -137,7 +137,16 @@ def _normalize_parameters(self, thing, action=None, additional_args=dict()): # than those which may be parsed/normalized next final_args = dict() if additional_args: - final_args.update(additional_args) + if isinstance(additional_args, string_types): + templar = Templar(loader=None) + if templar._contains_vars(additional_args): + final_args['_variable_params'] = additional_args + else: + raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')") + elif isinstance(additional_args, dict): + final_args.update(additional_args) + else: + raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").') # how we normalize depends if we figured out what the module name is # yet. If we have already figured it out, it's an 'old style' invocation. From fffd29d1ab15dc93a2854f874695b63e15d5c198 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 14:06:11 -0800 Subject: [PATCH 3155/3617] Update mysql setup to handle installing mysql with dnf too. --- test/integration/roles/setup_mysql_db/tasks/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/integration/roles/setup_mysql_db/tasks/main.yml b/test/integration/roles/setup_mysql_db/tasks/main.yml index a8010e71389186..612d94f6d11b96 100644 --- a/test/integration/roles/setup_mysql_db/tasks/main.yml +++ b/test/integration/roles/setup_mysql_db/tasks/main.yml @@ -31,6 +31,11 @@ with_items: mysql_packages when: ansible_pkg_mgr == 'yum' +- name: install mysqldb_test rpm dependencies + dnf: name={{ item }} state=latest + with_items: mysql_packages + when: ansible_pkg_mgr == 'dnf' + - name: install mysqldb_test debian dependencies apt: name={{ item }} state=latest with_items: mysql_packages From fd4ad2c8f24be48e2fa103a6b8feae287c4b57fe Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 14:08:08 -0800 Subject: [PATCH 3156/3617] Update submodule ref to fix a bug in mysql_user with mariadb --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 3c48320b295c3b..16a3bdaa7da9e9 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 3c48320b295c3b4f99caccdc5f173b224109a393 +Subproject commit 16a3bdaa7da9e9f7c0572d3a3fdbfd79f29c2b9d From 857456ea5f159bbd333528aa6111b1510e1be78b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 18:21:47 -0500 Subject: [PATCH 3157/3617] Fixing template integration test for python 2.6 versions No longer immediately fallback to to_json if simplejson is not installed --- lib/ansible/plugins/filter/core.py | 4 +++- test/integration/roles/test_template/tasks/main.yml | 7 ------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py index 3ab9db5a51b232..dc9acb4d092ba6 100644 --- a/lib/ansible/plugins/filter/core.py +++ b/lib/ansible/plugins/filter/core.py @@ -100,9 +100,11 @@ def to_nice_json(a, *args, **kw): else: if major >= 2: return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw) + try: + return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw) + except: # Fallback to the to_json filter return to_json(a, *args, **kw) - return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw) def bool(a): ''' return a bool for the arg ''' diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml index 28477d44e5ba90..9fd1d860e00250 100644 --- a/test/integration/roles/test_template/tasks/main.yml +++ b/test/integration/roles/test_template/tasks/main.yml @@ -49,13 +49,6 @@ - name: copy known good into place copy: src=foo.txt dest={{output_dir}}/foo.txt -# Seems that python-2.6 now outputs the same format as everywhere else? -# when: pyver.stdout != '2.6' - -#- name: copy known good into place -# copy: src=foo-py26.txt dest={{output_dir}}/foo.txt -# when: pyver.stdout == '2.6' - - name: compare templated file to known good shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt register: diff_result From 15135f3c16a87f68bede61415f2571097eaa6268 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 19:12:05 -0500 Subject: [PATCH 3158/3617] Make sure we're using the original host when processing include results Also fixes a bug where we were passing an incorrect number of parameters to _do_handler_run() when processing an include file in a handler task/block. Fixes #13560 --- lib/ansible/playbook/included_file.py | 15 +++++++++++---- lib/ansible/plugins/strategy/__init__.py | 2 ++ lib/ansible/plugins/strategy/free.py | 10 ++++++++-- lib/ansible/plugins/strategy/linear.py | 10 ++++++++-- 4 files changed, 29 insertions(+), 8 deletions(-) diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py index 7fb851a12afcad..cc756a75a96921 100644 --- a/lib/ansible/playbook/included_file.py +++ b/lib/ansible/playbook/included_file.py @@ -49,9 +49,15 @@ def __repr__(self): return "%s (%s): %s" % (self._filename, self._args, self._hosts) @staticmethod - def process_include_results(results, tqm, iterator, loader, variable_manager): + def process_include_results(results, tqm, iterator, inventory, loader, variable_manager): included_files = [] + def get_original_host(host): + if host.name in inventory._hosts_cache: + return inventory._hosts_cache[host.name] + else: + return inventory.get_host(host.name) + for res in results: if res._task.action == 'include': @@ -67,9 +73,10 @@ def process_include_results(results, tqm, iterator, loader, variable_manager): if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result: continue - original_task = iterator.get_original_task(res._host, res._task) + original_host = get_original_host(res._host) + original_task = iterator.get_original_task(original_host, res._task) - task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task) + task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=original_host, task=original_task) templar = Templar(loader=loader, variables=task_vars) include_variables = include_result.get('include_variables', dict()) @@ -116,6 +123,6 @@ def process_include_results(results, tqm, iterator, loader, variable_manager): except ValueError: included_files.append(inc_file) - inc_file.add_host(res._host) + inc_file.add_host(original_host) return included_files diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index d2d79d036bdbed..7b2a3794efcf93 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -576,6 +576,7 @@ def _do_handler_run(self, handler, handler_name, iterator, play_context, notifie host_results, self._tqm, iterator=iterator, + inventory=self._inventory, loader=self._loader, variable_manager=self._variable_manager ) @@ -594,6 +595,7 @@ def _do_handler_run(self, handler, handler_name, iterator, play_context, notifie for task in block.block: result = self._do_handler_run( handler=task, + handler_name=None, iterator=iterator, play_context=play_context, notified_hosts=included_file._hosts[:], diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 11eeaa92494e78..f4fc1226a1f542 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -139,8 +139,14 @@ def run(self, iterator, play_context): host_results.extend(results) try: - included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, - loader=self._loader, variable_manager=self._variable_manager) + included_files = IncludedFile.process_include_results( + host_results, + self._tqm, + iterator=iterator, + inventory=self._inventory, + loader=self._loader, + variable_manager=self._variable_manager + ) except AnsibleError as e: return False diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 8c94267cf46fc5..7bb227dbaea857 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -261,8 +261,14 @@ def run(self, iterator, play_context): break try: - included_files = IncludedFile.process_include_results(host_results, self._tqm, - iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) + included_files = IncludedFile.process_include_results( + host_results, + self._tqm, + iterator=iterator, + inventory=self._inventory, + loader=self._loader, + variable_manager=self._variable_manager + ) except AnsibleError as e: return False From e5c2c03dea0998872a6b16a18d6c187685a5fc7a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 15 Dec 2015 09:39:13 -0500 Subject: [PATCH 3159/3617] Enable host_key checking at the strategy level Implements a new method in the ssh connection plugin (fetch_and_store_key) which is used to prefetch the key using ssh-keyscan. --- lib/ansible/executor/task_executor.py | 17 +- lib/ansible/inventory/host.py | 11 +- lib/ansible/plugins/connection/__init__.py | 5 +- lib/ansible/plugins/connection/ssh.py | 193 +++++++++++++++++++-- lib/ansible/plugins/strategy/__init__.py | 30 +++- lib/ansible/utils/connection.py | 50 ++++++ 6 files changed, 273 insertions(+), 33 deletions(-) create mode 100644 lib/ansible/utils/connection.py diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 5d7430fad25062..2623bc775b223c 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -32,6 +32,7 @@ from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.template import Templar +from ansible.utils.connection import get_smart_connection_type from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode @@ -564,21 +565,7 @@ def _get_connection(self, variables, templar): conn_type = self._play_context.connection if conn_type == 'smart': - conn_type = 'ssh' - if sys.platform.startswith('darwin') and self._play_context.password: - # due to a current bug in sshpass on OSX, which can trigger - # a kernel panic even for non-privileged users, we revert to - # paramiko on that OS when a SSH password is specified - conn_type = "paramiko" - else: - # see if SSH can support ControlPersist if not use paramiko - try: - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err or "Usage:" in err: - conn_type = "paramiko" - except OSError: - conn_type = "paramiko" + conn_type = get_smart_connection_type(self._play_context) connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin) if not connection: diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 6263dcbc80dbcc..70f9f57b5f1646 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -57,6 +57,7 @@ def serialize(self): name=self.name, vars=self.vars.copy(), address=self.address, + has_hostkey=self.has_hostkey, uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, @@ -65,10 +66,11 @@ def serialize(self): def deserialize(self, data): self.__init__() - self.name = data.get('name') - self.vars = data.get('vars', dict()) - self.address = data.get('address', '') - self._uuid = data.get('uuid', uuid.uuid4()) + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.address = data.get('address', '') + self.has_hostkey = data.get('has_hostkey', False) + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -89,6 +91,7 @@ def __init__(self, name=None, port=None): self._gathered_facts = False self._uuid = uuid.uuid4() + self.has_hostkey = False def __repr__(self): return self.get_name() diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 06616bac4cad19..7fc19c8c195166 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -23,11 +23,11 @@ import fcntl import gettext import os -from abc import ABCMeta, abstractmethod, abstractproperty +from abc import ABCMeta, abstractmethod, abstractproperty from functools import wraps -from ansible.compat.six import with_metaclass +from ansible.compat.six import with_metaclass from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins import shell_loader @@ -233,3 +233,4 @@ def connection_unlock(self): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f)) + diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index a2abcf20aee903..cce29824e1a53e 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -19,7 +19,12 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +from ansible.compat.six import text_type + +import base64 import fcntl +import hmac +import operator import os import pipes import pty @@ -28,9 +33,13 @@ import subprocess import time +from hashlib import md5, sha1, sha256 + from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.boolean import boolean +from ansible.utils.connection import get_smart_connection_type from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode @@ -41,7 +50,128 @@ display = Display() SSHPASS_AVAILABLE = None +HASHED_KEY_MAGIC = "|1|" + +def split_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + +def get_ssh_opts(play_context): + # FIXME: caching may help here + opts_dict = dict() + try: + cmd = ['ssh', '-G', play_context.remote_addr] + res = subprocess.check_output(cmd) + for line in res.split('\n'): + if ' ' in line: + (key, val) = line.split(' ', 1) + else: + key = line + val = '' + opts_dict[key.lower()] = val + + # next, we manually override any options that are being + # set via ssh_args or due to the fact that `ssh -G` doesn't + # actually use the options set via -o + for opt in ['ssh_args', 'ssh_common_args', 'ssh_extra_args']: + attr = getattr(play_context, opt, None) + if attr is not None: + args = split_args(attr) + for arg in args: + if '=' in arg: + (key, val) = arg.split('=', 1) + opts_dict[key.lower()] = val + + return opts_dict + except subprocess.CalledProcessError: + return dict() + +def host_in_known_hosts(host, ssh_opts): + # the setting from the ssh_opts may actually be multiple files, so + # we use shlex.split and simply take the first one specified + user_host_file = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) + + host_file_list = [] + host_file_list.append(user_host_file) + host_file_list.append("/etc/ssh/ssh_known_hosts") + host_file_list.append("/etc/ssh/ssh_known_hosts2") + + hfiles_not_found = 0 + for hf in host_file_list: + if not os.path.exists(hf): + continue + try: + host_fh = open(hf) + except (OSError, IOError) as e: + continue + else: + data = host_fh.read() + host_fh.close() + + for line in data.split("\n"): + line = line.strip() + if line is None or " " not in line: + continue + tokens = line.split() + if not tokens: + continue + if tokens[0].find(HASHED_KEY_MAGIC) == 0: + # this is a hashed known host entry + try: + (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2) + hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) + hash.update(host) + if hash.digest() == kn_host.decode('base64'): + return True + except: + # invalid hashed host key, skip it + continue + else: + # standard host file entry + if host in tokens[0]: + return True + + return False + +def fetch_ssh_host_key(play_context, ssh_opts): + keyscan_cmd = ['ssh-keyscan'] + + if play_context.port: + keyscan_cmd.extend(['-p', text_type(play_context.port)]) + + if boolean(ssh_opts.get('hashknownhosts', 'no')): + keyscan_cmd.append('-H') + keyscan_cmd.append(play_context.remote_addr) + + p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + (stdout, stderr) = p.communicate() + if stdout == '': + raise AnsibleConnectionFailure("Failed to connect to the host to fetch the host key: %s." % stderr) + else: + return stdout + +def add_host_key(host_key, ssh_opts): + # the setting from the ssh_opts may actually be multiple files, so + # we use shlex.split and simply take the first one specified + user_known_hosts = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) + user_ssh_dir = os.path.dirname(user_known_hosts) + + if not os.path.exists(user_ssh_dir): + raise AnsibleError("the user ssh directory does not exist: %s" % user_ssh_dir) + elif not os.path.isdir(user_ssh_dir): + raise AnsibleError("%s is not a directory" % user_ssh_dir) + + try: + display.vv("adding to known_hosts file: %s" % user_known_hosts) + with open(user_known_hosts, 'a') as f: + f.write(host_key) + except (OSError, IOError) as e: + raise AnsibleError("error when trying to access the known hosts file: '%s', error was: %s" % (user_known_hosts, text_type(e))) class Connection(ConnectionBase): ''' ssh based connections ''' @@ -62,6 +192,56 @@ def __init__(self, *args, **kwargs): def _connect(self): return self + @staticmethod + def fetch_and_store_key(host, play_context): + ssh_opts = get_ssh_opts(play_context) + if not host_in_known_hosts(play_context.remote_addr, ssh_opts): + display.debug("host %s does not have a known host key, fetching it" % host) + + # build the list of valid host key types, for use later as we scan for keys. + # we also use this to determine the most preferred key when multiple keys are available + valid_host_key_types = [x.lower() for x in ssh_opts.get('hostbasedkeytypes', '').split(',')] + + # attempt to fetch the key with ssh-keyscan. More than one key may be + # returned, so we save all and use the above list to determine which + host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') + host_keys = dict() + for host_key in host_key_data: + (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) + key_type = key_type.lower() + if key_type in valid_host_key_types and key_type not in host_keys: + host_keys[key_type.lower()] = host_key + + if len(host_keys) == 0: + raise AnsibleConnectionFailure("none of the available host keys found were in the HostBasedKeyTypes configuration option") + + # now we determine the preferred key by sorting the above dict on the + # index of the key type in the valid keys list + preferred_key = sorted(host_keys.items(), cmp=lambda x,y: cmp(valid_host_key_types.index(x), valid_host_key_types.index(y)), key=operator.itemgetter(0))[0] + + # shamelessly copied from here: + # https://github.com/ojarva/python-sshpubkeys/blob/master/sshpubkeys/__init__.py#L39 + # (which shamelessly copied it from somewhere else...) + (host_info, key_type, key_hash) = preferred_key[1].strip().split(' ', 3) + decoded_key = key_hash.decode('base64') + fp_plain = md5(decoded_key).hexdigest() + key_data = ':'.join(a+b for a, b in zip(fp_plain[::2], fp_plain[1::2])) + + # prompt the user to add the key + # if yes, add it, otherwise raise AnsibleConnectionFailure + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) + display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) + display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) + response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") + display.display("") + if boolean(response): + add_host_key(host_key, ssh_opts) + return True + else: + raise AnsibleConnectionFailure("Host key validation failed.") + + return False + @staticmethod def _sshpass_available(): global SSHPASS_AVAILABLE @@ -100,15 +280,6 @@ def _persistence_controls(command): return controlpersist, controlpath - @staticmethod - def _split_args(argstring): - """ - Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a - list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to - the argument list. The list will not contain any empty elements. - """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] - def _add_args(self, explanation, args): """ Adds the given args to self._command and displays a caller-supplied @@ -157,7 +328,7 @@ def _build_command(self, binary, *other_args): # Next, we add [ssh_connection]ssh_args from ansible.cfg. if self._play_context.ssh_args: - args = self._split_args(self._play_context.ssh_args) + args = split_args(self._play_context.ssh_args) self._add_args("ansible.cfg set ssh_args", args) # Now we add various arguments controlled by configuration file settings @@ -210,7 +381,7 @@ def _build_command(self, binary, *other_args): for opt in ['ssh_common_args', binary + '_extra_args']: attr = getattr(self._play_context, opt, None) if attr is not None: - args = self._split_args(attr) + args = split_args(attr) self._add_args("PlayContext set %s" % opt, args) # Check if ControlPersist is enabled and add a ControlPath if one hasn't diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index 7b2a3794efcf93..e460708f906d2a 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -29,7 +29,7 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable +from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult @@ -39,6 +39,7 @@ from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar +from ansible.utils.connection import get_smart_connection_type from ansible.vars.unsafe_proxy import wrap_var try: @@ -139,6 +140,33 @@ def _queue_task(self, host, task, task_vars, play_context): display.debug("entering _queue_task() for %s/%s" % (host, task)) + if C.HOST_KEY_CHECKING and not host.has_hostkey: + # caveat here, regarding with loops. It is assumed that none of the connection + # related variables would contain '{{item}}' as it would cause some really + # weird loops. As is, if someone did something odd like that they would need + # to disable host key checking + templar = Templar(loader=self._loader, variables=task_vars) + temp_pc = play_context.set_task_and_variable_override(task=task, variables=task_vars, templar=templar) + temp_pc.post_validate(templar) + if temp_pc.connection in ('smart', 'ssh') and get_smart_connection_type(temp_pc) == 'ssh': + try: + # get the ssh connection plugin's class, and use its builtin + # static method to fetch and save the key to the known_hosts file + ssh_conn = connection_loader.get('ssh', class_only=True) + ssh_conn.fetch_and_store_key(host, temp_pc) + except AnsibleConnectionFailure as e: + # if that fails, add the host to the list of unreachable + # hosts and send the appropriate callback + self._tqm._unreachable_hosts[host.name] = True + self._tqm._stats.increment('dark', host.name) + tr = TaskResult(host=host, task=task, return_data=dict(msg=text_type(e))) + self._tqm.send_callback('v2_runner_on_unreachable', tr) + return + + # finally, we set the has_hostkey flag to true for this + # host so we can skip it quickly in the future + host.has_hostkey = True + task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) diff --git a/lib/ansible/utils/connection.py b/lib/ansible/utils/connection.py new file mode 100644 index 00000000000000..6f6b405640e6bf --- /dev/null +++ b/lib/ansible/utils/connection.py @@ -0,0 +1,50 @@ +# (c) 2015, Ansible, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import subprocess +import sys + + +__all__ = ['get_smart_connection_type'] + +def get_smart_connection_type(play_context): + ''' + Uses the ssh command with the ControlPersist option while checking + for an error to determine if we should use ssh or paramiko. Also + may take other factors into account. + ''' + + conn_type = 'ssh' + if sys.platform.startswith('darwin') and play_context.password: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified + conn_type = "paramiko" + else: + # see if SSH can support ControlPersist if not use paramiko + try: + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err or "Usage:" in err: + conn_type = "paramiko" + except OSError: + conn_type = "paramiko" + + return conn_type From d7f2f606e179cf0df4d308a0055b4ad62207b47c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 16 Dec 2015 21:49:33 -0500 Subject: [PATCH 3160/3617] Add has_hostkey to mock objects to fix broken unit tests --- test/units/plugins/strategies/test_strategy_base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py index 53e243f926be67..8d1a1e8adabf4a 100644 --- a/test/units/plugins/strategies/test_strategy_base.py +++ b/test/units/plugins/strategies/test_strategy_base.py @@ -76,6 +76,7 @@ def test_strategy_base_get_hosts(self): for i in range(0, 5): mock_host = MagicMock() mock_host.name = "host%02d" % (i+1) + mock_host.has_hostkey = True mock_hosts.append(mock_host) mock_inventory = MagicMock() @@ -111,6 +112,7 @@ def fake_run(self): fake_loader = DictDataLoader() mock_var_manager = MagicMock() mock_host = MagicMock() + mock_host.has_hostkey = True mock_inventory = MagicMock() mock_options = MagicMock() mock_options.module_path = None @@ -171,6 +173,7 @@ def _queue_get(*args, **kwargs): mock_host = MagicMock() mock_host.name = 'test01' mock_host.vars = dict() + mock_host.has_hostkey = True mock_task = MagicMock() mock_task._role = None @@ -347,6 +350,7 @@ def fake_run(*args): mock_host = MagicMock(Host) mock_host.name = "test01" + mock_host.has_hostkey = True mock_inventory = MagicMock() mock_inventory.get_hosts.return_value = [mock_host] From d9c74536be63cedc3dd1711c73844827990e898d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 09:44:40 -0500 Subject: [PATCH 3161/3617] Fix handling of environment inheritence, and template each inherited env Environments were not being templated individually, so a variable environment value was causing the exception regarding dicts to be hit. Also, environments as inherited were coming through with the tasks listed first, followed by the parents, so they were being merged backwards. Reversing the list of environments fixed this. --- lib/ansible/plugins/action/__init__.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 254bab476bb68f..e9b18651d66b72 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -151,14 +151,19 @@ def _compute_environment_string(self): if not isinstance(environments, list): environments = [ environments ] + # the environments as inherited need to be reversed, to make + # sure we merge in the parent's values first so those in the + # block then task 'win' in precedence + environments.reverse() for environment in environments: if environment is None: continue - if not isinstance(environment, dict): - raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment))) + temp_environment = self._templar.template(environment) + if not isinstance(temp_environment, dict): + raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment))) # very deliberately using update here instead of combine_vars, as # these environment settings should not need to merge sub-dicts - final_environment.update(environment) + final_environment.update(temp_environment) final_environment = self._templar.template(final_environment) return self._connection._shell.env_prefix(**final_environment) From dd3d04e96ab30bb0df89b5e3ab1ac9a9d91d5841 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 10:31:14 -0500 Subject: [PATCH 3162/3617] Adding pip install of virtualenv to test deps integration role --- .../roles/ansible_test_deps/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index f71128921d999f..5f75085d92056a 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -10,6 +10,9 @@ ignore_errors: true when: ansible_os_family == 'Debian' +- name: Install virtualenv + pip: name=virtualenv state=present + - name: Install RH epel yum: name="epel-release" state=installed sudo: true From 0b1ad8d4905fa83eddbc08e2a3dd395aa99b8aed Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 10:41:58 -0500 Subject: [PATCH 3163/3617] Switch virtualenv dep installation from pip to package manager --- .../roles/ansible_test_deps/tasks/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 5f75085d92056a..c9cb256a35c19a 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -11,7 +11,12 @@ when: ansible_os_family == 'Debian' - name: Install virtualenv - pip: name=virtualenv state=present + yum: name=python-virtualenv state=installed + when: ansible_os_family == 'RedHat' + +- name: Install virtualenv + apt: name=python-virtualenv state=installed + when: ansible_os_family == 'Debian' - name: Install RH epel yum: name="epel-release" state=installed From cf3d503f790ddf7ba74bc768bd2faad7a550f5ee Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 11:00:54 -0500 Subject: [PATCH 3164/3617] Moving apt cache update to top to ensure cache is updated before deps installed --- .../roles/ansible_test_deps/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index c9cb256a35c19a..c2fc955a1640e9 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -1,5 +1,8 @@ --- +- apt: update_cache=yes + when: ansible_os_family == 'Debian' + - name: Install sudo yum: name=sudo state=installed ignore_errors: true @@ -42,9 +45,6 @@ - libselinux-python when: ansible_os_family == 'RedHat' -- apt: update_cache=yes - when: ansible_os_family == 'Debian' - - name: Install Debian ansible dependencies apt: name="{{ item }}" state=installed update_cache=yes sudo: true From 26bbabcfba637e17b36bb20d064c390cf0461e4d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 11:15:06 -0500 Subject: [PATCH 3165/3617] Consolidating package lines for virtualenv install in test deps integration --- .../roles/ansible_test_deps/tasks/main.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index c2fc955a1640e9..ac133730ec5bfc 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -13,14 +13,6 @@ ignore_errors: true when: ansible_os_family == 'Debian' -- name: Install virtualenv - yum: name=python-virtualenv state=installed - when: ansible_os_family == 'RedHat' - -- name: Install virtualenv - apt: name=python-virtualenv state=installed - when: ansible_os_family == 'Debian' - - name: Install RH epel yum: name="epel-release" state=installed sudo: true @@ -43,6 +35,7 @@ - gcc - python-devel - libselinux-python + - python-virtualenv when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies @@ -57,6 +50,7 @@ - git - unzip - python-dev + - python-virtualenv when: ansible_os_family == 'Debian' - name: Install ubuntu 12.04 ansible dependencies From 21c127c5813c800204c729d84188f1e6d7bae3e7 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 12:06:17 -0500 Subject: [PATCH 3166/3617] Fixing bugs in ssh known_host fetching * If remote_addr is not set in the PlayContext, use the host.address field instead (which is how the action plugin works) Fixes #13581 --- lib/ansible/plugins/connection/ssh.py | 29 +++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index cce29824e1a53e..c24d1667348713 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -60,11 +60,15 @@ def split_args(argstring): """ return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] -def get_ssh_opts(play_context): +def get_ssh_opts(host, play_context): # FIXME: caching may help here opts_dict = dict() try: - cmd = ['ssh', '-G', play_context.remote_addr] + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + cmd = ['ssh', '-G', remote_addr] res = subprocess.check_output(cmd) for line in res.split('\n'): if ' ' in line: @@ -137,7 +141,7 @@ def host_in_known_hosts(host, ssh_opts): return False -def fetch_ssh_host_key(play_context, ssh_opts): +def fetch_ssh_host_key(host, play_context, ssh_opts): keyscan_cmd = ['ssh-keyscan'] if play_context.port: @@ -146,7 +150,11 @@ def fetch_ssh_host_key(play_context, ssh_opts): if boolean(ssh_opts.get('hashknownhosts', 'no')): keyscan_cmd.append('-H') - keyscan_cmd.append(play_context.remote_addr) + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + keyscan_cmd.append(remote_addr) p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (stdout, stderr) = p.communicate() @@ -194,8 +202,13 @@ def _connect(self): @staticmethod def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(play_context) - if not host_in_known_hosts(play_context.remote_addr, ssh_opts): + ssh_opts = get_ssh_opts(host, play_context) + + remote_addr = play_context.remote_addr + if not remote_addr: + remote_addr = host.address + + if not host_in_known_hosts(remote_addr, ssh_opts): display.debug("host %s does not have a known host key, fetching it" % host) # build the list of valid host key types, for use later as we scan for keys. @@ -204,7 +217,7 @@ def fetch_and_store_key(host, play_context): # attempt to fetch the key with ssh-keyscan. More than one key may be # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') + host_key_data = fetch_ssh_host_key(host, play_context, ssh_opts).strip().split('\n') host_keys = dict() for host_key in host_key_data: (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) @@ -229,7 +242,7 @@ def fetch_and_store_key(host, play_context): # prompt the user to add the key # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, remote_addr)) display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") From 8db4415e2e95e5993822b4f75e700dd14a928ad9 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 17 Dec 2015 12:25:29 -0500 Subject: [PATCH 3167/3617] changed test to use filter for accurate reporting --- test/integration/roles/test_service/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml index c0e590643c905e..8b61d62143a8c2 100644 --- a/test/integration/roles/test_service/tasks/main.yml +++ b/test/integration/roles/test_service/tasks/main.yml @@ -98,7 +98,7 @@ - name: assert that the broken test failed assert: that: - - "broken_enable_result.failed == True" + - "broken_enable_result|failed" - name: remove the test daemon script file: path=/usr/sbin/ansible_test_service state=absent From 586208234cc921acc70fbe1fff211707ceba0c7a Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 12:42:53 -0500 Subject: [PATCH 3168/3617] Revert "Fixing bugs in ssh known_host fetching" This reverts commit 21c127c5813c800204c729d84188f1e6d7bae3e7. --- lib/ansible/plugins/connection/ssh.py | 29 ++++++++------------------- 1 file changed, 8 insertions(+), 21 deletions(-) diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index c24d1667348713..cce29824e1a53e 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -60,15 +60,11 @@ def split_args(argstring): """ return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] -def get_ssh_opts(host, play_context): +def get_ssh_opts(play_context): # FIXME: caching may help here opts_dict = dict() try: - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - cmd = ['ssh', '-G', remote_addr] + cmd = ['ssh', '-G', play_context.remote_addr] res = subprocess.check_output(cmd) for line in res.split('\n'): if ' ' in line: @@ -141,7 +137,7 @@ def host_in_known_hosts(host, ssh_opts): return False -def fetch_ssh_host_key(host, play_context, ssh_opts): +def fetch_ssh_host_key(play_context, ssh_opts): keyscan_cmd = ['ssh-keyscan'] if play_context.port: @@ -150,11 +146,7 @@ def fetch_ssh_host_key(host, play_context, ssh_opts): if boolean(ssh_opts.get('hashknownhosts', 'no')): keyscan_cmd.append('-H') - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - keyscan_cmd.append(remote_addr) + keyscan_cmd.append(play_context.remote_addr) p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (stdout, stderr) = p.communicate() @@ -202,13 +194,8 @@ def _connect(self): @staticmethod def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(host, play_context) - - remote_addr = play_context.remote_addr - if not remote_addr: - remote_addr = host.address - - if not host_in_known_hosts(remote_addr, ssh_opts): + ssh_opts = get_ssh_opts(play_context) + if not host_in_known_hosts(play_context.remote_addr, ssh_opts): display.debug("host %s does not have a known host key, fetching it" % host) # build the list of valid host key types, for use later as we scan for keys. @@ -217,7 +204,7 @@ def fetch_and_store_key(host, play_context): # attempt to fetch the key with ssh-keyscan. More than one key may be # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(host, play_context, ssh_opts).strip().split('\n') + host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') host_keys = dict() for host_key in host_key_data: (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) @@ -242,7 +229,7 @@ def fetch_and_store_key(host, play_context): # prompt the user to add the key # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, remote_addr)) + display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") From e5462194261c7b55ccdf41adc4525dc86a1a34c1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 12:43:36 -0500 Subject: [PATCH 3169/3617] Revert "Enable host_key checking at the strategy level" This reverts commit 1a6d660d7e285cceec474952a33af4d8dffd0a8d. --- lib/ansible/executor/task_executor.py | 17 +- lib/ansible/inventory/host.py | 11 +- lib/ansible/plugins/connection/__init__.py | 5 +- lib/ansible/plugins/connection/ssh.py | 193 ++------------------- lib/ansible/plugins/strategy/__init__.py | 30 +--- lib/ansible/utils/connection.py | 50 ------ 6 files changed, 33 insertions(+), 273 deletions(-) delete mode 100644 lib/ansible/utils/connection.py diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 2623bc775b223c..5d7430fad25062 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -32,7 +32,6 @@ from ansible.playbook.conditional import Conditional from ansible.playbook.task import Task from ansible.template import Templar -from ansible.utils.connection import get_smart_connection_type from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode @@ -565,7 +564,21 @@ def _get_connection(self, variables, templar): conn_type = self._play_context.connection if conn_type == 'smart': - conn_type = get_smart_connection_type(self._play_context) + conn_type = 'ssh' + if sys.platform.startswith('darwin') and self._play_context.password: + # due to a current bug in sshpass on OSX, which can trigger + # a kernel panic even for non-privileged users, we revert to + # paramiko on that OS when a SSH password is specified + conn_type = "paramiko" + else: + # see if SSH can support ControlPersist if not use paramiko + try: + cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (out, err) = cmd.communicate() + if "Bad configuration option" in err or "Usage:" in err: + conn_type = "paramiko" + except OSError: + conn_type = "paramiko" connection = self._shared_loader_obj.connection_loader.get(conn_type, self._play_context, self._new_stdin) if not connection: diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py index 70f9f57b5f1646..6263dcbc80dbcc 100644 --- a/lib/ansible/inventory/host.py +++ b/lib/ansible/inventory/host.py @@ -57,7 +57,6 @@ def serialize(self): name=self.name, vars=self.vars.copy(), address=self.address, - has_hostkey=self.has_hostkey, uuid=self._uuid, gathered_facts=self._gathered_facts, groups=groups, @@ -66,11 +65,10 @@ def serialize(self): def deserialize(self, data): self.__init__() - self.name = data.get('name') - self.vars = data.get('vars', dict()) - self.address = data.get('address', '') - self.has_hostkey = data.get('has_hostkey', False) - self._uuid = data.get('uuid', uuid.uuid4()) + self.name = data.get('name') + self.vars = data.get('vars', dict()) + self.address = data.get('address', '') + self._uuid = data.get('uuid', uuid.uuid4()) groups = data.get('groups', []) for group_data in groups: @@ -91,7 +89,6 @@ def __init__(self, name=None, port=None): self._gathered_facts = False self._uuid = uuid.uuid4() - self.has_hostkey = False def __repr__(self): return self.get_name() diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 7fc19c8c195166..06616bac4cad19 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -23,11 +23,11 @@ import fcntl import gettext import os - from abc import ABCMeta, abstractmethod, abstractproperty -from functools import wraps +from functools import wraps from ansible.compat.six import with_metaclass + from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins import shell_loader @@ -233,4 +233,3 @@ def connection_unlock(self): f = self._play_context.connection_lockfd fcntl.lockf(f, fcntl.LOCK_UN) display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f)) - diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index cce29824e1a53e..a2abcf20aee903 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -19,12 +19,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type -from ansible.compat.six import text_type - -import base64 import fcntl -import hmac -import operator import os import pipes import pty @@ -33,13 +28,9 @@ import subprocess import time -from hashlib import md5, sha1, sha256 - from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase -from ansible.utils.boolean import boolean -from ansible.utils.connection import get_smart_connection_type from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode @@ -50,128 +41,7 @@ display = Display() SSHPASS_AVAILABLE = None -HASHED_KEY_MAGIC = "|1|" - -def split_args(argstring): - """ - Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a - list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to - the argument list. The list will not contain any empty elements. - """ - return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] - -def get_ssh_opts(play_context): - # FIXME: caching may help here - opts_dict = dict() - try: - cmd = ['ssh', '-G', play_context.remote_addr] - res = subprocess.check_output(cmd) - for line in res.split('\n'): - if ' ' in line: - (key, val) = line.split(' ', 1) - else: - key = line - val = '' - opts_dict[key.lower()] = val - - # next, we manually override any options that are being - # set via ssh_args or due to the fact that `ssh -G` doesn't - # actually use the options set via -o - for opt in ['ssh_args', 'ssh_common_args', 'ssh_extra_args']: - attr = getattr(play_context, opt, None) - if attr is not None: - args = split_args(attr) - for arg in args: - if '=' in arg: - (key, val) = arg.split('=', 1) - opts_dict[key.lower()] = val - - return opts_dict - except subprocess.CalledProcessError: - return dict() - -def host_in_known_hosts(host, ssh_opts): - # the setting from the ssh_opts may actually be multiple files, so - # we use shlex.split and simply take the first one specified - user_host_file = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) - - host_file_list = [] - host_file_list.append(user_host_file) - host_file_list.append("/etc/ssh/ssh_known_hosts") - host_file_list.append("/etc/ssh/ssh_known_hosts2") - - hfiles_not_found = 0 - for hf in host_file_list: - if not os.path.exists(hf): - continue - try: - host_fh = open(hf) - except (OSError, IOError) as e: - continue - else: - data = host_fh.read() - host_fh.close() - - for line in data.split("\n"): - line = line.strip() - if line is None or " " not in line: - continue - tokens = line.split() - if not tokens: - continue - if tokens[0].find(HASHED_KEY_MAGIC) == 0: - # this is a hashed known host entry - try: - (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2) - hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1) - hash.update(host) - if hash.digest() == kn_host.decode('base64'): - return True - except: - # invalid hashed host key, skip it - continue - else: - # standard host file entry - if host in tokens[0]: - return True - - return False - -def fetch_ssh_host_key(play_context, ssh_opts): - keyscan_cmd = ['ssh-keyscan'] - - if play_context.port: - keyscan_cmd.extend(['-p', text_type(play_context.port)]) - - if boolean(ssh_opts.get('hashknownhosts', 'no')): - keyscan_cmd.append('-H') - keyscan_cmd.append(play_context.remote_addr) - - p = subprocess.Popen(keyscan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) - (stdout, stderr) = p.communicate() - if stdout == '': - raise AnsibleConnectionFailure("Failed to connect to the host to fetch the host key: %s." % stderr) - else: - return stdout - -def add_host_key(host_key, ssh_opts): - # the setting from the ssh_opts may actually be multiple files, so - # we use shlex.split and simply take the first one specified - user_known_hosts = os.path.expanduser(shlex.split(ssh_opts.get('userknownhostsfile', '~/.ssh/known_hosts'))[0]) - user_ssh_dir = os.path.dirname(user_known_hosts) - - if not os.path.exists(user_ssh_dir): - raise AnsibleError("the user ssh directory does not exist: %s" % user_ssh_dir) - elif not os.path.isdir(user_ssh_dir): - raise AnsibleError("%s is not a directory" % user_ssh_dir) - - try: - display.vv("adding to known_hosts file: %s" % user_known_hosts) - with open(user_known_hosts, 'a') as f: - f.write(host_key) - except (OSError, IOError) as e: - raise AnsibleError("error when trying to access the known hosts file: '%s', error was: %s" % (user_known_hosts, text_type(e))) class Connection(ConnectionBase): ''' ssh based connections ''' @@ -192,56 +62,6 @@ def __init__(self, *args, **kwargs): def _connect(self): return self - @staticmethod - def fetch_and_store_key(host, play_context): - ssh_opts = get_ssh_opts(play_context) - if not host_in_known_hosts(play_context.remote_addr, ssh_opts): - display.debug("host %s does not have a known host key, fetching it" % host) - - # build the list of valid host key types, for use later as we scan for keys. - # we also use this to determine the most preferred key when multiple keys are available - valid_host_key_types = [x.lower() for x in ssh_opts.get('hostbasedkeytypes', '').split(',')] - - # attempt to fetch the key with ssh-keyscan. More than one key may be - # returned, so we save all and use the above list to determine which - host_key_data = fetch_ssh_host_key(play_context, ssh_opts).strip().split('\n') - host_keys = dict() - for host_key in host_key_data: - (host_info, key_type, key_hash) = host_key.strip().split(' ', 3) - key_type = key_type.lower() - if key_type in valid_host_key_types and key_type not in host_keys: - host_keys[key_type.lower()] = host_key - - if len(host_keys) == 0: - raise AnsibleConnectionFailure("none of the available host keys found were in the HostBasedKeyTypes configuration option") - - # now we determine the preferred key by sorting the above dict on the - # index of the key type in the valid keys list - preferred_key = sorted(host_keys.items(), cmp=lambda x,y: cmp(valid_host_key_types.index(x), valid_host_key_types.index(y)), key=operator.itemgetter(0))[0] - - # shamelessly copied from here: - # https://github.com/ojarva/python-sshpubkeys/blob/master/sshpubkeys/__init__.py#L39 - # (which shamelessly copied it from somewhere else...) - (host_info, key_type, key_hash) = preferred_key[1].strip().split(' ', 3) - decoded_key = key_hash.decode('base64') - fp_plain = md5(decoded_key).hexdigest() - key_data = ':'.join(a+b for a, b in zip(fp_plain[::2], fp_plain[1::2])) - - # prompt the user to add the key - # if yes, add it, otherwise raise AnsibleConnectionFailure - display.display("\nThe authenticity of host %s (%s) can't be established." % (host.name, play_context.remote_addr)) - display.display("%s key fingerprint is SHA256:%s." % (key_type.upper(), sha256(decoded_key).digest().encode('base64').strip())) - display.display("%s key fingerprint is MD5:%s." % (key_type.upper(), key_data)) - response = display.prompt("Are you sure you want to continue connecting (yes/no)? ") - display.display("") - if boolean(response): - add_host_key(host_key, ssh_opts) - return True - else: - raise AnsibleConnectionFailure("Host key validation failed.") - - return False - @staticmethod def _sshpass_available(): global SSHPASS_AVAILABLE @@ -280,6 +100,15 @@ def _persistence_controls(command): return controlpersist, controlpath + @staticmethod + def _split_args(argstring): + """ + Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a + list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to + the argument list. The list will not contain any empty elements. + """ + return [to_unicode(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()] + def _add_args(self, explanation, args): """ Adds the given args to self._command and displays a caller-supplied @@ -328,7 +157,7 @@ def _build_command(self, binary, *other_args): # Next, we add [ssh_connection]ssh_args from ansible.cfg. if self._play_context.ssh_args: - args = split_args(self._play_context.ssh_args) + args = self._split_args(self._play_context.ssh_args) self._add_args("ansible.cfg set ssh_args", args) # Now we add various arguments controlled by configuration file settings @@ -381,7 +210,7 @@ def _build_command(self, binary, *other_args): for opt in ['ssh_common_args', binary + '_extra_args']: attr = getattr(self._play_context, opt, None) if attr is not None: - args = split_args(attr) + args = self._split_args(attr) self._add_args("PlayContext set %s" % opt, args) # Check if ControlPersist is enabled and add a ControlPath if one hasn't diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py index e460708f906d2a..7b2a3794efcf93 100644 --- a/lib/ansible/plugins/strategy/__init__.py +++ b/lib/ansible/plugins/strategy/__init__.py @@ -29,7 +29,7 @@ from jinja2.exceptions import UndefinedError from ansible import constants as C -from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleConnectionFailure +from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable from ansible.executor.play_iterator import PlayIterator from ansible.executor.process.worker import WorkerProcess from ansible.executor.task_result import TaskResult @@ -39,7 +39,6 @@ from ansible.playbook.included_file import IncludedFile from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader from ansible.template import Templar -from ansible.utils.connection import get_smart_connection_type from ansible.vars.unsafe_proxy import wrap_var try: @@ -140,33 +139,6 @@ def _queue_task(self, host, task, task_vars, play_context): display.debug("entering _queue_task() for %s/%s" % (host, task)) - if C.HOST_KEY_CHECKING and not host.has_hostkey: - # caveat here, regarding with loops. It is assumed that none of the connection - # related variables would contain '{{item}}' as it would cause some really - # weird loops. As is, if someone did something odd like that they would need - # to disable host key checking - templar = Templar(loader=self._loader, variables=task_vars) - temp_pc = play_context.set_task_and_variable_override(task=task, variables=task_vars, templar=templar) - temp_pc.post_validate(templar) - if temp_pc.connection in ('smart', 'ssh') and get_smart_connection_type(temp_pc) == 'ssh': - try: - # get the ssh connection plugin's class, and use its builtin - # static method to fetch and save the key to the known_hosts file - ssh_conn = connection_loader.get('ssh', class_only=True) - ssh_conn.fetch_and_store_key(host, temp_pc) - except AnsibleConnectionFailure as e: - # if that fails, add the host to the list of unreachable - # hosts and send the appropriate callback - self._tqm._unreachable_hosts[host.name] = True - self._tqm._stats.increment('dark', host.name) - tr = TaskResult(host=host, task=task, return_data=dict(msg=text_type(e))) - self._tqm.send_callback('v2_runner_on_unreachable', tr) - return - - # finally, we set the has_hostkey flag to true for this - # host so we can skip it quickly in the future - host.has_hostkey = True - task_vars['hostvars'] = self._tqm.hostvars # and then queue the new task display.debug("%s - putting task (%s) in queue" % (host, task)) diff --git a/lib/ansible/utils/connection.py b/lib/ansible/utils/connection.py deleted file mode 100644 index 6f6b405640e6bf..00000000000000 --- a/lib/ansible/utils/connection.py +++ /dev/null @@ -1,50 +0,0 @@ -# (c) 2015, Ansible, Inc. -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import subprocess -import sys - - -__all__ = ['get_smart_connection_type'] - -def get_smart_connection_type(play_context): - ''' - Uses the ssh command with the ControlPersist option while checking - for an error to determine if we should use ssh or paramiko. Also - may take other factors into account. - ''' - - conn_type = 'ssh' - if sys.platform.startswith('darwin') and play_context.password: - # due to a current bug in sshpass on OSX, which can trigger - # a kernel panic even for non-privileged users, we revert to - # paramiko on that OS when a SSH password is specified - conn_type = "paramiko" - else: - # see if SSH can support ControlPersist if not use paramiko - try: - cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (out, err) = cmd.communicate() - if "Bad configuration option" in err or "Usage:" in err: - conn_type = "paramiko" - except OSError: - conn_type = "paramiko" - - return conn_type From 1b5e7ce0253c896f5166b5ffd1c2614090cc75a1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 10:23:02 -0800 Subject: [PATCH 3170/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 16a3bdaa7da9e9..c75c0003697d00 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 16a3bdaa7da9e9f7c0572d3a3fdbfd79f29c2b9d +Subproject commit c75c0003697d00f52cedb68d4c1b05b7e95991e0 diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 8ec4f95ffd6d4e..06bdec0cac86ef 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 8ec4f95ffd6d4e837cf0f3dd28649fb09afd0caf +Subproject commit 06bdec0cac86ef2339e0b4d8a4616ee24619956f From ce1febe28bb538c9d6db59449caf4da9dcf23f7e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 11:25:45 -0800 Subject: [PATCH 3171/3617] debug line needs var not msg --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 09ee34277a04aa..640c987790fdf7 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -78,7 +78,7 @@ # If distros start backporting SNI, can make a new conditional based on whether this works: # python -c 'from ssl import SSLContext' -- debug: msg=get_url_result +- debug: var=get_url_result - name: Assert that SNI works with this python version assert: that: From bad1c173b87a7b68fc0ae79b35376fc31e8cc5d7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 11:36:36 -0800 Subject: [PATCH 3172/3617] Update core submodule for mysql_db fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index c75c0003697d00..b4a3fdd4933788 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit c75c0003697d00f52cedb68d4c1b05b7e95991e0 +Subproject commit b4a3fdd493378853c0b6ab35d5d8bcf52612a4a0 From 8c6f56f982fce50d5b030928e425740a30d4f86c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 11:46:26 -0800 Subject: [PATCH 3173/3617] kennetreitz.org times out but www.kennethreitz.org is fine --- test/integration/roles/test_lookups/tasks/main.yml | 6 +++--- test/integration/roles/test_uri/tasks/main.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml index 5ca29e27c1e618..3c5e066ee34f7d 100644 --- a/test/integration/roles/test_lookups/tasks/main.yml +++ b/test/integration/roles/test_lookups/tasks/main.yml @@ -177,7 +177,7 @@ - name: Test that retrieving a url with invalid cert fails set_fact: - web_data: "{{ lookup('url', 'https://kennethreitz.org/') }}" + web_data: "{{ lookup('url', 'https://www.kennethreitz.org/') }}" ignore_errors: True register: url_invalid_cert @@ -188,9 +188,9 @@ - name: Test that retrieving a url with invalid cert with validate_certs=False works set_fact: - web_data: "{{ lookup('url', 'https://kennethreitz.org/', validate_certs=False) }}" + web_data: "{{ lookup('url', 'https://www.kennethreitz.org/', validate_certs=False) }}" register: url_no_validate_cert - assert: that: - - "'kennethreitz.org' in web_data" + - "'www.kennethreitz.org' in web_data" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 7300578982d448..18229e6b7cff5d 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -94,7 +94,7 @@ - name: test https fetch to a site with mismatched hostname and certificate uri: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result @@ -117,7 +117,7 @@ - name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no get_url: - url: "https://kennethreitz.org/" + url: "https://www.kennethreitz.org/" dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result From 5929ffc7c3b79b830edeebdb8542b53c3c0a15b3 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 16:01:56 -0500 Subject: [PATCH 3174/3617] Make --list-tasks respect tags Also makes the output closer to the appearance of v1 Fixes #13260 --- lib/ansible/cli/playbook.py | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index a9c0ed018dc0bc..e51d5d3993b849 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -30,6 +30,7 @@ from ansible.executor.playbook_executor import PlaybookExecutor from ansible.inventory import Inventory from ansible.parsing.dataloader import DataLoader +from ansible.playbook.play_context import PlayContext from ansible.utils.vars import load_extra_vars from ansible.vars import VariableManager @@ -152,18 +153,10 @@ def run(self): for p in results: display.display('\nplaybook: %s' % p['playbook']) - i = 1 - for play in p['plays']: - if play.name: - playname = play.name - else: - playname = '#' + str(i) - - msg = "\n PLAY: %s" % (playname) - mytags = set() - if self.options.listtags and play.tags: - mytags = mytags.union(set(play.tags)) - msg += ' TAGS: [%s]' % (','.join(mytags)) + for idx, play in enumerate(p['plays']): + msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) + mytags = set(play.tags) + msg += ' TAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) @@ -176,20 +169,21 @@ def run(self): if self.options.listtags or self.options.listtasks: taskmsg = ' tasks:' + all_vars = variable_manager.get_vars(loader=loader, play=play) + play_context = PlayContext(play=play, options=self.options) for block in play.compile(): + block = block.filter_tagged_tasks(play_context, all_vars) if not block.has_tasks(): continue - j = 1 for task in block.block: - taskmsg += "\n %s" % task - if self.options.listtags and task.tags: - taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) - j = j + 1 + if task.action == 'meta': + continue + taskmsg += "\n %s" % task.get_name() + taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) display.display(taskmsg) - i = i + 1 return 0 else: return results From d4ffc96c8039e5a79baf23be173d03c2e4c8565f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 16:30:23 -0500 Subject: [PATCH 3175/3617] Further tweaks to the output format of list tasks/tags --- lib/ansible/cli/playbook.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index e51d5d3993b849..d307abdfcc1a64 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -156,7 +156,7 @@ def run(self): for idx, play in enumerate(p['plays']): msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name) mytags = set(play.tags) - msg += ' TAGS: [%s]' % (','.join(mytags)) + msg += '\tTAGS: [%s]' % (','.join(mytags)) if self.options.listhosts: playhosts = set(inventory.get_hosts(play.hosts)) @@ -166,8 +166,11 @@ def run(self): display.display(msg) + all_tags = set() if self.options.listtags or self.options.listtasks: - taskmsg = ' tasks:' + taskmsg = '' + if self.options.listtasks: + taskmsg = ' tasks:\n' all_vars = variable_manager.get_vars(loader=loader, play=play) play_context = PlayContext(play=play, options=self.options) @@ -179,8 +182,18 @@ def run(self): for task in block.block: if task.action == 'meta': continue - taskmsg += "\n %s" % task.get_name() - taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags))) + + all_tags.update(task.tags) + if self.options.listtasks: + cur_tags = list(mytags.union(set(task.tags))) + cur_tags.sort() + taskmsg += " %s" % task.action + taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) + + if self.options.listtags: + cur_tags = list(mytags.union(all_tags)) + cur_tags.sort() + taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags) display.display(taskmsg) From 4ba7158282f148c90c72f824d6ebcd1a9953b580 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 16:33:23 -0500 Subject: [PATCH 3176/3617] Fixing a mistake from tweaking list stuff too much Use the action only if the task name is not set --- lib/ansible/cli/playbook.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py index d307abdfcc1a64..dfd06b1920876d 100644 --- a/lib/ansible/cli/playbook.py +++ b/lib/ansible/cli/playbook.py @@ -187,7 +187,10 @@ def run(self): if self.options.listtasks: cur_tags = list(mytags.union(set(task.tags))) cur_tags.sort() - taskmsg += " %s" % task.action + if task.name: + taskmsg += " %s" % task.get_name() + else: + taskmsg += " %s" % task.action taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags) if self.options.listtags: From 3057fc1753eff42fb073ae866734cb9127cbd25a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 13:46:15 -0800 Subject: [PATCH 3177/3617] Update submodule ref for mysql_user fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index b4a3fdd4933788..9366dfb63e565c 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit b4a3fdd493378853c0b6ab35d5d8bcf52612a4a0 +Subproject commit 9366dfb63e565c9e0901d714be8832fc89b275d6 From c5eda277ac6ca50cf593a724a368ad973d1a3935 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 17:51:42 -0800 Subject: [PATCH 3178/3617] Fix get_url tests in light of distros backporting SNI support --- .../roles/test_get_url/tasks/main.yml | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 640c987790fdf7..d7885f0905e887 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -16,6 +16,21 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +- name: Determine if python looks like it will support modern ssl features like SNI + command: python -c 'from ssl import SSLContext' + ignore_errors: True + register: python_test + +- name: Set python_has_sslcontext if we have it + set_fact: + python_has_ssl_context: True + when: python_test.rc == 0 + +- name: Set python_has_sslcontext False if we don't have it + set_fact: + python_has_ssl_context: False + when: python_test.rc != 0 + - name: test https fetch get_url: url="https://raw.githubusercontent.com/ansible/ansible/devel/README.md" dest={{output_dir}}/get_url.txt force=yes register: result @@ -74,7 +89,7 @@ - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result - when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + when: "{{ python_has_ssl_context }}" # If distros start backporting SNI, can make a new conditional based on whether this works: # python -c 'from ssl import SSLContext' @@ -84,11 +99,11 @@ that: - 'data_result.rc == 0' - '"failed" not in get_url_result' - when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}" + when: "{{ python_has_ssl_context }}" # If the client doesn't support SNI then get_url should have failed with a certificate mismatch - name: Assert that hostname verification failed because SNI is not supported on this version of python assert: that: - 'get_url_result["failed"]' - when: "{{ ansible_python_version | version_compare('2.7.9', '<') }}" + when: "{{ not python_has_ssl_context }}" From 12c0bb9414224517c6b15ec1d58aedd45d40703d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 20:52:49 -0500 Subject: [PATCH 3179/3617] Use --source instead of -e for awk in integration Makefile --- test/integration/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/Makefile b/test/integration/Makefile index a2d91f96f1a165..dcd30f0b836c2b 100644 --- a/test/integration/Makefile +++ b/test/integration/Makefile @@ -193,5 +193,5 @@ test_lookup_paths: no_log: # This test expects 7 loggable vars and 0 non loggable ones, if either mismatches it fails, run the ansible-playbook command to debug - [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk -e 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] + [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk --source 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ] From 1f3eec293bad4add2e52fbc52a7bbdcc912c3ab8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 20:06:53 -0800 Subject: [PATCH 3180/3617] Install an updated version of pycrypto on Ubuntu12 from pip --- .../roles/ansible_test_deps/tasks/main.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index ac133730ec5bfc..0b9e58c6598a15 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -67,6 +67,14 @@ - rubygems-integration when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "14.04" +# Not sure why CentOS 6 is working without this.... +#- name: Install Red Hat 6 ansible dependencies +# yum: name="{{ item }}" state=installed +# sudo: true +# with_items: +# - python-crypto2.6 +# when: ansible_distribution in ('CentOS', 'RedHat') and ansible_distribution_major_version == "6" + - name: Install ansible pip deps sudo: true pip: name="{{ item }}" @@ -75,6 +83,13 @@ - Jinja2 - paramiko +- name: Install ubuntu 12.04 ansible pip deps + sudo: true + pip: name="{{ item }}" + with_items: + - pycrypto + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" + - name: Remove tty sudo requirement sudo: true lineinfile: "dest=/etc/sudoers regexp='^Defaults[ , ]*requiretty' line='#Defaults requiretty'" From 3143b352c53e2beeecec996d4ca80fa7a4293f93 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 17 Dec 2015 23:07:28 -0500 Subject: [PATCH 3181/3617] Add ca-certificates update to the integration deps playbook --- .../roles/ansible_test_deps/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 0b9e58c6598a15..85fad6a7fbb0f8 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -53,6 +53,10 @@ - python-virtualenv when: ansible_os_family == 'Debian' +- name: update ca certificates + yum: name=ca-certificates state=latest + when: ansible_os_family == 'RedHat' + - name: Install ubuntu 12.04 ansible dependencies apt: name="{{ item }}" state=installed update_cache=yes sudo: true From a391d6f89ab906d585e623f58789b39fb0797faf Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 20:09:48 -0800 Subject: [PATCH 3182/3617] Add state=latest to pip install of pycrypto --- .../roles/ansible_test_deps/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 85fad6a7fbb0f8..897a4e54edb188 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -89,7 +89,7 @@ - name: Install ubuntu 12.04 ansible pip deps sudo: true - pip: name="{{ item }}" + pip: name="{{ item }}" state=latest with_items: - pycrypto when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04" From 44e30e49dd4b678ff21d308d0e8b00b769de75e1 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 07:47:23 -0500 Subject: [PATCH 3183/3617] Add awk to integration test deps list --- .../roles/ansible_test_deps/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 897a4e54edb188..25b19d040e81f3 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -33,6 +33,7 @@ - openssl - make - gcc + - gawk - python-devel - libselinux-python - python-virtualenv @@ -49,6 +50,7 @@ - mercurial - git - unzip + - gawk - python-dev - python-virtualenv when: ansible_os_family == 'Debian' From 1debc2da44e05282fea216e4b6e14e83d50bb4ea Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 10:34:27 -0500 Subject: [PATCH 3184/3617] Do a full yum update to make sure packages are latest version For the deps setup of integration tests, as we sometimes see odd errors we can't reproduce, which may be related to slightly out of date package dependencies. --- .../roles/ansible_test_deps/tasks/main.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 25b19d040e81f3..17198cdc41f18c 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -55,8 +55,12 @@ - python-virtualenv when: ansible_os_family == 'Debian' -- name: update ca certificates - yum: name=ca-certificates state=latest +#- name: update ca certificates +# yum: name=ca-certificates state=latest +# when: ansible_os_family == 'RedHat' + +- name: update all rpm packages + yum: name=* state=latest when: ansible_os_family == 'RedHat' - name: Install ubuntu 12.04 ansible dependencies From a3dcb910b8b8ad1c1ff65c31102cccd68ed31bf9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 10:58:55 -0500 Subject: [PATCH 3185/3617] Fixing bugs with {changed,failed}_when and until with registered vars * Saving of the registered variable was occuring after the tests for changed/failed_when. * Each of the above fields and until were being post_validated too early, so variables which were not defined at that time were causing task failures. Fixes #13591 --- lib/ansible/executor/task_executor.py | 11 +++++------ lib/ansible/playbook/task.py | 21 +++++++++++++++++++++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 5d7430fad25062..b0a5157a525b01 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -387,7 +387,6 @@ def _execute(self, variables=None): # make a copy of the job vars here, in case we need to update them # with the registered variable value later on when testing conditions - #vars_copy = variables.copy() vars_copy = variables.copy() display.debug("starting attempt loop") @@ -404,6 +403,11 @@ def _execute(self, variables=None): return dict(unreachable=True, msg=to_unicode(e)) display.debug("handler run complete") + # update the local copy of vars with the registered value, if specified, + # or any facts which may have been generated by the module execution + if self._task.register: + vars_copy[self._task.register] = result + if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout # response, so we parse it here and replace the result @@ -433,11 +437,6 @@ def _evaluate_failed_when_result(result): return failed_when_result return False - # update the local copy of vars with the registered value, if specified, - # or any facts which may have been generated by the module execution - if self._task.register: - vars_copy[self._task.register] = result - if 'ansible_facts' in result: vars_copy.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 17f1952e39c4f5..825ee502691d36 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -260,6 +260,27 @@ def _post_validate_environment(self, attr, value, templar): break return templar.template(value, convert_bare=True) + def _post_validate_changed_when(self, attr, value, templar): + ''' + changed_when is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + + def _post_validate_failed_when(self, attr, value, templar): + ''' + failed_when is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + + def _post_validate_until(self, attr, value, templar): + ''' + until is evaluated after the execution of the task is complete, + and should not be templated during the regular post_validate step. + ''' + return value + def get_vars(self): all_vars = dict() if self._block: From f2364ecf5f9abcb11112dc7fe7c7eaffb6703bd1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 08:10:57 -0800 Subject: [PATCH 3186/3617] Add a Fedora latest host into the mix --- test/utils/ansible-playbook_integration_runner/main.yml | 7 ++++++- .../roles/ansible_test_deps/tasks/main.yml | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 5d15541490f4ba..9bcda9c71ec3ce 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -22,7 +22,12 @@ image: "ami-96a818fe" ssh_user: "centos" platform: "centos-7-x86_64" - + - distribution: "Fedora" + version: "23" + image: "ami-518bfb3b" + ssh_user: "fedora" + platform: "fedora-23-x86_64" + tasks: - debug: var=ansible_version - include: ec2.yml diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 17198cdc41f18c..16bdde79a058da 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -16,10 +16,10 @@ - name: Install RH epel yum: name="epel-release" state=installed sudo: true - when: ansible_os_family == 'RedHat' + when: ansible_distribution in ('CentOS', 'RedHat') - name: Install RH ansible dependencies - yum: name="{{ item }}" state=installed + package: name="{{ item }}" state=installed sudo: true with_items: - python-pip From 0c154e81f055e07c78acedc8ac310a8011ff8274 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 11:30:14 -0500 Subject: [PATCH 3187/3617] Make integration tests run in parallel with async --- .../roles/run_integration/tasks/main.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 2114567d1522de..980d4a4d32b1fe 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -10,11 +10,21 @@ register: results - shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + async: 3600 + poll: 0 + register: async_test_results sudo: true environment: TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" args: chdir: "{{ results.stdout }}/ansible" + +- name: poll for test results + async_status: + jid: "{{async_test_results.ansible_job_id}}" register: test_results + until: test_results.finished + retries: 360 + wait: 10 ignore_errors: true From 73a0153b8e3e26ac095e140f6ffa6f8a1d756ff6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 12:44:57 -0500 Subject: [PATCH 3188/3617] Fix typo in integration test runner role --- .../roles/run_integration/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 980d4a4d32b1fe..3eba82854435e0 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -26,5 +26,5 @@ register: test_results until: test_results.finished retries: 360 - wait: 10 + delay: 10 ignore_errors: true From 5d798c2725475b045fb06b46cba08c39bfcfeda8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 18 Dec 2015 12:14:03 -0500 Subject: [PATCH 3189/3617] added missing features to changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 005171ec9a95b3..0a5e7e2b7c103f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -371,6 +371,8 @@ allowed in future versions: explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. * ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. +* random cows are more random +* when: now gets the registered var after the first iteration, making it possible to break out of item loops * Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: ``` From 5dbd7c18a1011e5bc922731574815c22a80d5bc6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 18 Dec 2015 13:57:58 -0500 Subject: [PATCH 3190/3617] added note about add_hosts --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a5e7e2b7c103f..17180993a2f8a1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -370,6 +370,7 @@ allowed in future versions: * We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. * environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default. +* add_hosts is much stricter about host name and will prevent invalid names from being added. * ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. * random cows are more random * when: now gets the registered var after the first iteration, making it possible to break out of item loops From 1cc83dd0d968c264c3da4982aa2a658d2e4aeb51 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 11:50:06 -0800 Subject: [PATCH 3191/3617] Make tests that use kennethreitz retry. --- test/integration/roles/test_get_url/tasks/main.yml | 9 +++++++++ test/integration/roles/test_uri/tasks/main.yml | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index d7885f0905e887..cbf3b345f18926 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -47,6 +47,12 @@ dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result + # kennethreitz having trouble staying up. Eventually need to install our own + # certs & web server to test this... also need to install and test it with + # a proxy so the complications are inevitable + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/shouldnotexist.html" @@ -65,6 +71,9 @@ dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/kreitz.html" diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml index 18229e6b7cff5d..9ce05938b62cf7 100644 --- a/test/integration/roles/test_uri/tasks/main.yml +++ b/test/integration/roles/test_uri/tasks/main.yml @@ -98,6 +98,12 @@ dest: "{{ output_dir }}/shouldnotexist.html" ignore_errors: True register: result + # kennethreitz having trouble staying up. Eventually need to install our own + # certs & web server to test this... also need to install and test it with + # a proxy so the complications are inevitable + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/shouldnotexist.html" @@ -121,6 +127,9 @@ dest: "{{ output_dir }}/kreitz.html" validate_certs: no register: result + until: "'read operation timed out' not in result.msg" + retries: 30 + delay: 10 - stat: path: "{{ output_dir }}/kreitz.html" From 02f65eaa805f39a15e35a813bcd6a1fdc24ade8c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 14:59:05 -0500 Subject: [PATCH 3192/3617] Make integration runner ec2 add_hosts use valid host names --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 59e15f0da1a877..d4740d957089e6 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -30,7 +30,7 @@ - name: Add hosts group temporary inventory group with pem path add_host: - name: "{{ item.1.platform }} {{ ec2.results[item.0]['instances'][0]['public_ip'] }}" + name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" groups: dynamic_hosts ansible_ssh_host: "{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" ansible_ssh_private_key_file: '{{ pem_path }}' From 0823a2c16f923bd950399dd879b5440356cb8411 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 18 Dec 2015 15:33:44 -0500 Subject: [PATCH 3193/3617] Removing update all for test deps, it didn't fix the problem --- .../roles/ansible_test_deps/tasks/main.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 16bdde79a058da..234eb70f92ad00 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -55,12 +55,8 @@ - python-virtualenv when: ansible_os_family == 'Debian' -#- name: update ca certificates -# yum: name=ca-certificates state=latest -# when: ansible_os_family == 'RedHat' - -- name: update all rpm packages - yum: name=* state=latest +- name: update ca certificates + yum: name=ca-certificates state=latest when: ansible_os_family == 'RedHat' - name: Install ubuntu 12.04 ansible dependencies From 68fe3d856f3a58d4cf84053a803bb5e286d61773 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 14:04:51 -0800 Subject: [PATCH 3194/3617] Fedora 23 needs to have python2 packages installed --- test/utils/ansible-playbook_integration_runner/main.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 9bcda9c71ec3ce..8683ffd5440302 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -33,6 +33,15 @@ - include: ec2.yml when: groups['dynamic_hosts'] is not defined +# Have to hardcode these per-slave. We can't even run setup yet so we can't +# introspect what they have. +- hosts: dynamic_hosts + sudo: true + tasks: + - name: Install packages that let setup and package manager modules run + raw: dnf install -y python2 python2-dnf libselinux-python + when: "{{ inventory_hostname }} == 'fedora-23-x86_64'" + - hosts: dynamic_hosts sudo: true vars: From ec60bfbb3f0b88d37b91a2deae2bf6b79a1091dc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 14:36:17 -0800 Subject: [PATCH 3195/3617] Ubuntu images with hvm ssd --- test/utils/ansible-playbook_integration_runner/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 8683ffd5440302..b8942172bce2bd 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -4,12 +4,12 @@ slaves: - distribution: "Ubuntu" version: "12.04" - image: "ami-2ccc7a44" + image: "ami-309ddf5a" ssh_user: "ubuntu" platform: "ubuntu-12.04-x86_64" - distribution: "Ubuntu" version: "14.04" - image: "ami-9a562df2" + image: "ami-d06632ba" ssh_user: "ubuntu" platform: "ubuntu-14.04-x86_64" - distribution: "CentOS" From 26e5bcdb39517e8247e59ac038db7dd641cbb7fa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 14:38:54 -0800 Subject: [PATCH 3196/3617] Bugfix the fedora 23 install task --- test/utils/ansible-playbook_integration_runner/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index b8942172bce2bd..e82e0dea3f2026 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -37,10 +37,11 @@ # introspect what they have. - hosts: dynamic_hosts sudo: true + gather_facts: False tasks: - name: Install packages that let setup and package manager modules run raw: dnf install -y python2 python2-dnf libselinux-python - when: "{{ inventory_hostname }} == 'fedora-23-x86_64'" + when: "'{{ inventory_hostname }}' == 'fedora-23-x86_64'" - hosts: dynamic_hosts sudo: true From 78dde62710bd63f931bce21cf4352994a5a36873 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 15:14:38 -0800 Subject: [PATCH 3197/3617] What is going on here --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index d4740d957089e6..c6971486ec30e1 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -28,6 +28,8 @@ - name: Wait a little longer for centos pause: seconds=20 +- debug: var=ec2.results + - name: Add hosts group temporary inventory group with pem path add_host: name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" From f7ed33378e234542950b992499e848a8284cc2fa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 15:42:41 -0800 Subject: [PATCH 3198/3617] Fix the fedora host detection --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 -- test/utils/ansible-playbook_integration_runner/main.yml | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index c6971486ec30e1..d4740d957089e6 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -28,8 +28,6 @@ - name: Wait a little longer for centos pause: seconds=20 -- debug: var=ec2.results - - name: Add hosts group temporary inventory group with pem path add_host: name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}" diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index e82e0dea3f2026..4aa17d11c1fae6 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -41,7 +41,7 @@ tasks: - name: Install packages that let setup and package manager modules run raw: dnf install -y python2 python2-dnf libselinux-python - when: "'{{ inventory_hostname }}' == 'fedora-23-x86_64'" + when: "'fedora-23' in '{{ inventory_hostname }}'" - hosts: dynamic_hosts sudo: true From 3197eeaaa8d49c862fcb98165bcb254c74e10f4e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 22:16:49 -0800 Subject: [PATCH 3199/3617] update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 9366dfb63e565c..15c1c0cca79196 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 9366dfb63e565c9e0901d714be8832fc89b275d6 +Subproject commit 15c1c0cca79196d4dde630db2a7eee90367051cc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 06bdec0cac86ef..c6829752d85239 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 06bdec0cac86ef2339e0b4d8a4616ee24619956f +Subproject commit c6829752d852398c255704cd5d7faa54342e143e From 07a00593066cb439f0b9aea4e815259cc8a2ec75 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 22:23:25 -0800 Subject: [PATCH 3200/3617] update submodule ref for doc fix --- lib/ansible/modules/core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 15c1c0cca79196..fcb3397df7944f 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 15c1c0cca79196d4dde630db2a7eee90367051cc +Subproject commit fcb3397df7944ff15ea698b5717c06e8fc7d43ba From d2ad17e88f5f1bc2ed7282ec4322aaffd869834a Mon Sep 17 00:00:00 2001 From: Matt Clay Date: Sat, 19 Dec 2015 00:08:49 -0800 Subject: [PATCH 3201/3617] Fixed import typo for memcache module in tests. The typo caused the test for the memcached cache plugin to be skipped even when the necessary memcache python module was installed. --- test/units/plugins/cache/test_cache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py index 0547ba55bf0126..cd82e1ef2c8ed3 100644 --- a/test/units/plugins/cache/test_cache.py +++ b/test/units/plugins/cache/test_cache.py @@ -26,7 +26,7 @@ HAVE_MEMCACHED = True try: - import memcached + import memcache except ImportError: HAVE_MEMCACHED = False else: From 6127a8585e8eaea159ed5fd91c3ddb61b2d25dc8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 19 Dec 2015 11:45:59 -0500 Subject: [PATCH 3202/3617] removed invocation info as it is not no_log aware This was added in 1.9 and 2.0 tried to copy, but since it cannot obey no_log restrictions I commented it out. I did not remove as it is still very useful for module invocation debugging. --- lib/ansible/plugins/action/__init__.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e9b18651d66b72..c363a47ec32e41 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -82,13 +82,14 @@ def run(self, tmp=None, task_vars=None): * Module parameters. These are stored in self._task.args """ - # store the module invocation details into the results results = {} - if self._task.async == 0: - results['invocation'] = dict( - module_name = self._task.action, - module_args = self._task.args, - ) + # This does not respect no_log set by module args, left here for debugging module invocation + #if self._task.async == 0: + # # store the module invocation details into the results + # results['invocation'] = dict( + # module_name = self._task.action, + # module_args = self._task.args, + # ) return results def _configure_module(self, module_name, module_args, task_vars=None): From c63ae9948543a3f73ae17dc4eecae7b22fb62947 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 10:10:38 -0800 Subject: [PATCH 3203/3617] Make sure that yum is present on redhat family systems (makes things also work on fedora systems where dnf is the default) --- .../roles/ansible_test_deps/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 234eb70f92ad00..89f7382a1e4295 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -37,6 +37,8 @@ - python-devel - libselinux-python - python-virtualenv + - yum + - yum-metadata-parser when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies From 2936682f004d9d3fc349e31113607636e971b71b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:09:20 -0800 Subject: [PATCH 3204/3617] Revert "removed invocation info as it is not no_log aware" This reverts commit 6127a8585e8eaea159ed5fd91c3ddb61b2d25dc8. --- lib/ansible/plugins/action/__init__.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index c363a47ec32e41..e9b18651d66b72 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -82,14 +82,13 @@ def run(self, tmp=None, task_vars=None): * Module parameters. These are stored in self._task.args """ + # store the module invocation details into the results results = {} - # This does not respect no_log set by module args, left here for debugging module invocation - #if self._task.async == 0: - # # store the module invocation details into the results - # results['invocation'] = dict( - # module_name = self._task.action, - # module_args = self._task.args, - # ) + if self._task.async == 0: + results['invocation'] = dict( + module_name = self._task.action, + module_args = self._task.args, + ) return results def _configure_module(self, module_name, module_args, task_vars=None): From d32a885e98f9154f5c74afba482b4299a2e2be5e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:24:59 -0800 Subject: [PATCH 3205/3617] Make return invocation information so that our sanitized copy will take precedence over what the executor knows. --- lib/ansible/module_utils/basic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 62b8cadfd61c2b..4870ed096dd4c4 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1431,7 +1431,6 @@ def _log_invocation(self): self.log(msg, log_args=log_args) - def _set_cwd(self): try: cwd = os.getcwd() @@ -1524,6 +1523,8 @@ def exit_json(self, **kwargs): self.add_path_info(kwargs) if not 'changed' in kwargs: kwargs['changed'] = False + if 'invocation' not in kwargs: + kwargs['invocation'] = self.params kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) @@ -1534,6 +1535,8 @@ def fail_json(self, **kwargs): self.add_path_info(kwargs) assert 'msg' in kwargs, "implementation error -- msg to explain the error is required" kwargs['failed'] = True + if 'invocation' not in kwargs: + kwargs['invocation'] = self.params kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) From 51cca87d67823f4edfc4e05bf3e5a4070e494113 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:27:16 -0800 Subject: [PATCH 3206/3617] Also need redhat-rpm-config to compile pycrypto --- .../roles/ansible_test_deps/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index 89f7382a1e4295..de08126b82de25 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -39,6 +39,7 @@ - python-virtualenv - yum - yum-metadata-parser + - redhat-rpm-config when: ansible_os_family == 'RedHat' - name: Install Debian ansible dependencies From 8ffc1fa838d7e984f4a99568021660cbbd243550 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:31:46 -0800 Subject: [PATCH 3207/3617] Comment to explain why we strip _ansible_notify specially --- lib/ansible/plugins/action/normal.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index bf93fdad2d73f7..f9b55e1ff57aa8 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -28,11 +28,13 @@ def run(self, tmp=None, task_vars=None): results = super(ActionModule, self).run(tmp, task_vars) results.update(self._execute_module(tmp=tmp, task_vars=task_vars)) - # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. - for field in ('ansible_notify',): + # + # We don't want modules to determine that running the module fires + # notify handlers. That's for the playbook to decide. + for field in ('_ansible_notify',): if field in results: results.pop(field) From 224d5963361deb33107e5f38fd28a4d5197f931e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 11:51:16 -0800 Subject: [PATCH 3208/3617] Remove args from get_name() as we can't tell if any of the args are no_log --- lib/ansible/playbook/task.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index 825ee502691d36..fb7578647451b0 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -107,11 +107,10 @@ def get_name(self): elif self.name: return self.name else: - flattened_args = self._merge_kv(self.args) if self._role: - return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args) + return "%s : %s" % (self._role.get_name(), self.action) else: - return "%s %s" % (self.action, flattened_args) + return "%s" % (self.action,) def _merge_kv(self, ds): if ds is None: From 9abef1a1d7e8df5e580e17ef4a54cec280fbc7dc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 12:39:48 -0800 Subject: [PATCH 3209/3617] Troubleshooting has reduced us to this --- test/integration/roles/test_get_url/tasks/main.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index cbf3b345f18926..54debc06d10388 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -96,12 +96,22 @@ register: get_url_result ignore_errors: True +- name: TROUBLESHOOTING + shell: curl https://foo.sni.velox.ch/ > /var/tmp/velox.html + register: trouble + ignore_errors: True + when: "{{ python_has_ssl_context }}" + +- debug: var=trouble + when: "{{ python_has_ssl_context }}" + +- debug: var=get_url_result + when: "{{ python_has_ssl_context }}" + - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" -# If distros start backporting SNI, can make a new conditional based on whether this works: -# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: From e66c070e5c0d50f0a90fcd3b73044a6faeef7c81 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 13:00:58 -0800 Subject: [PATCH 3210/3617] Add package module to squash list --- lib/ansible/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 7f74358dd5dca9..5df9602246ae4a 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -201,7 +201,7 @@ def load_config_file(): # the module takes both, bad things could happen. # In the future we should probably generalize this even further # (mapping of param: squash field) -DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True) +DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True) # paths DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True) DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True) From bb2935549f38a83670baadb74041ef98902e0640 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 19 Dec 2015 16:14:56 -0500 Subject: [PATCH 3211/3617] corrected service detection in docker versions now if 1 == bash it falls back into tool detection --- lib/ansible/module_utils/facts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py index 94a5a11f726cce..796ebc92bdd181 100644 --- a/lib/ansible/module_utils/facts.py +++ b/lib/ansible/module_utils/facts.py @@ -555,8 +555,8 @@ def get_service_mgr_facts(self): if proc_1 is None: rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True) - if proc_1 in ['init', '/sbin/init']: - # many systems return init, so this cannot be trusted + if proc_1 in ['init', '/sbin/init', 'bash']: + # many systems return init, so this cannot be trusted, bash is from docker proc_1 = None # if not init/None it should be an identifiable or custom init, so we are done! From e2d9f4e2f272c6010b0c00257aa695c1606e05ab Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 15:49:06 -0800 Subject: [PATCH 3212/3617] Fix unittests for return of invocation from fail_json and exit_json --- test/units/module_utils/basic/test_exit_json.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py index 66610ec3ed3329..931447f8ab6f5c 100644 --- a/test/units/module_utils/basic/test_exit_json.py +++ b/test/units/module_utils/basic/test_exit_json.py @@ -56,7 +56,7 @@ def test_exit_json_no_args_exits(self): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=False)) + self.assertEquals(return_val, dict(changed=False, invocation={})) def test_exit_json_args_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -67,7 +67,7 @@ def test_exit_json_args_exits(self): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", changed=False)) + self.assertEquals(return_val, dict(msg="message", changed=False, invocation={})) def test_fail_json_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -78,13 +78,13 @@ def test_fail_json_exits(self): else: self.assertEquals(ctx.exception.code, 1) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", failed=True)) + self.assertEquals(return_val, dict(msg="message", failed=True, invocation={})) def test_exit_json_proper_changed(self): with self.assertRaises(SystemExit) as ctx: self.module.exit_json(changed=True, msg='success') return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=True, msg='success')) + self.assertEquals(return_val, dict(changed=True, msg='success', invocation={})) @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): @@ -94,19 +94,22 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), (dict(username='person', password='password12345'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), (dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', - not_secret='following the leader', changed=False, msg='here') + not_secret='following the leader', changed=False, msg='here', + invocation=dict(password=OMIT, token=None, username='person')), ), ) From 3ec0104128103c4c37c117b5ef4548733245bcf4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 19 Dec 2015 12:49:06 -0500 Subject: [PATCH 3213/3617] Fixing bugs in conditional testing with until and some integration runner tweaks --- lib/ansible/executor/task_executor.py | 8 ++--- lib/ansible/playbook/conditional.py | 36 +++++++++---------- lib/ansible/playbook/task.py | 2 +- .../main.yml | 2 +- .../roles/ansible_test_deps/tasks/main.yml | 1 + .../roles/run_integration/tasks/main.yml | 17 ++++----- 6 files changed, 34 insertions(+), 32 deletions(-) diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index b0a5157a525b01..c8b6fa179bcd46 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -35,7 +35,7 @@ from ansible.utils.encrypt import key_for_hostname from ansible.utils.listify import listify_lookup_plugin_terms from ansible.utils.unicode import to_unicode -from ansible.vars.unsafe_proxy import UnsafeProxy +from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var try: from __main__ import display @@ -406,7 +406,7 @@ def _execute(self, variables=None): # update the local copy of vars with the registered value, if specified, # or any facts which may have been generated by the module execution if self._task.register: - vars_copy[self._task.register] = result + vars_copy[self._task.register] = wrap_var(result.copy()) if self._task.async > 0: # the async_wrapper module returns dumped JSON via its stdout @@ -453,7 +453,7 @@ def _evaluate_failed_when_result(result): if attempt < retries - 1: cond = Conditional(loader=self._loader) - cond.when = self._task.until + cond.when = [ self._task.until ] if cond.evaluate_conditional(templar, vars_copy): break @@ -466,7 +466,7 @@ def _evaluate_failed_when_result(result): # do the final update of the local variables here, for both registered # values and any facts which may have been created if self._task.register: - variables[self._task.register] = result + variables[self._task.register] = wrap_var(result) if 'ansible_facts' in result: variables.update(result['ansible_facts']) diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py index fc178e2fa1df83..c8c6a9359ec186 100644 --- a/lib/ansible/playbook/conditional.py +++ b/lib/ansible/playbook/conditional.py @@ -22,7 +22,7 @@ from jinja2.exceptions import UndefinedError from ansible.compat.six import text_type -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleUndefinedVariable from ansible.playbook.attribute import FieldAttribute from ansible.template import Templar @@ -89,16 +89,22 @@ def _check_conditional(self, conditional, templar, all_vars): # make sure the templar is using the variables specifed to this method templar.set_available_variables(variables=all_vars) - conditional = templar.template(conditional) - if not isinstance(conditional, basestring) or conditional == "": - return conditional - - # a Jinja2 evaluation that results in something Python can eval! - presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional - conditional = templar.template(presented, fail_on_undefined=False) - - val = conditional.strip() - if val == presented: + try: + conditional = templar.template(conditional) + if not isinstance(conditional, text_type) or conditional == "": + return conditional + + # a Jinja2 evaluation that results in something Python can eval! + presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional + conditional = templar.template(presented) + val = conditional.strip() + if val == "True": + return True + elif val == "False": + return False + else: + raise AnsibleError("unable to evaluate conditional: %s" % original) + except (AnsibleUndefinedVariable, UndefinedError) as e: # the templating failed, meaning most likely a # variable was undefined. If we happened to be # looking for an undefined variable, return True, @@ -108,11 +114,5 @@ def _check_conditional(self, conditional, templar, all_vars): elif "is defined" in original: return False else: - raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented)) - elif val == "True": - return True - elif val == "False": - return False - else: - raise AnsibleError("unable to evaluate conditional: %s" % original) + raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e)) diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py index fb7578647451b0..62b8cbc999bace 100644 --- a/lib/ansible/playbook/task.py +++ b/lib/ansible/playbook/task.py @@ -82,7 +82,7 @@ class Task(Base, Conditional, Taggable, Become): _poll = FieldAttribute(isa='int') _register = FieldAttribute(isa='string') _retries = FieldAttribute(isa='int', default=3) - _until = FieldAttribute(isa='list') + _until = FieldAttribute(isa='string') def __init__(self, block=None, role=None, task_include=None): ''' constructors a task, without the Task.load classmethod, it will be pretty blank ''' diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 4aa17d11c1fae6..27c4ae51b0d9ae 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -74,4 +74,4 @@ - name: Fail shell: 'echo "{{ inventory_hostname }}, Failed" && exit 1' - when: "test_results.rc != 0" + when: "'rc' not in test_results or test_results.rc != 0" diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index de08126b82de25..d9611497e91028 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -59,6 +59,7 @@ when: ansible_os_family == 'Debian' - name: update ca certificates + sudo: true yum: name=ca-certificates state=latest when: ansible_os_family == 'RedHat' diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 3eba82854435e0..2d01999dbfd10d 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -6,10 +6,12 @@ - name: Get ansible source dir sudo: false - shell: "cd ~ && pwd" + shell: "cd ~/ansible && pwd" register: results -- shell: ". hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" + args: + chdir: "{{ results.stdout }}" async: 3600 poll: 0 register: async_test_results @@ -17,14 +19,13 @@ environment: TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}" CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}" - args: - chdir: "{{ results.stdout }}/ansible" - name: poll for test results - async_status: - jid: "{{async_test_results.ansible_job_id}}" + async_status: jid="{{async_test_results.ansible_job_id}}" register: test_results until: test_results.finished - retries: 360 - delay: 10 + retries: 120 + delay: 30 ignore_errors: true + +- debug: var=test_results From 3da312da9c1a92d5e8f47f3274338e4ef476b5a6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 19 Dec 2015 23:11:25 -0800 Subject: [PATCH 3214/3617] Switch from yum to package when installing sudo so that dnf is handled as well --- .../roles/ansible_test_deps/tasks/main.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml index d9611497e91028..832138527f9f56 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml @@ -4,14 +4,8 @@ when: ansible_os_family == 'Debian' - name: Install sudo - yum: name=sudo state=installed + package: name=sudo state=installed ignore_errors: true - when: ansible_os_family == 'RedHat' - -- name: Install sudo - apt: name=sudo state=installed - ignore_errors: true - when: ansible_os_family == 'Debian' - name: Install RH epel yum: name="epel-release" state=installed From 6ec58bbd5f86bd4f2ca8aa6e7af78ee8ef28ee98 Mon Sep 17 00:00:00 2001 From: Branko Majic Date: Sun, 20 Dec 2015 14:19:20 +0100 Subject: [PATCH 3215/3617] Adding documentation for the 'dig' lookup (#13126). --- docsite/rst/playbooks_lookups.rst | 106 ++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst index 25560e284d4b2e..3c2222c337b5be 100644 --- a/docsite/rst/playbooks_lookups.rst +++ b/docsite/rst/playbooks_lookups.rst @@ -240,6 +240,112 @@ If you're not using 2.0 yet, you can do something similar with the credstash too debug: msg="Poor man's credstash lookup! {{ lookup('pipe', 'credstash -r us-west-1 get my-other-password') }}" +.. _dns_lookup: + +The DNS Lookup (dig) +```````````````````` +.. versionadded:: 1.9.0 + +.. warning:: This lookup depends on the `dnspython `_ + library. + +The ``dig`` lookup runs queries against DNS servers to retrieve DNS records for +a specific name (*FQDN* - fully qualified domain name). It is possible to lookup any DNS record in this manner. + +There is a couple of different syntaxes that can be used to specify what record +should be retrieved, and for which name. It is also possible to explicitly +specify the DNS server(s) to use for lookups. + +In its simplest form, the ``dig`` lookup plugin can be used to retrieve an IPv4 +address (DNS ``A`` record) associated with *FQDN*: + +.. note:: If you need to obtain the ``AAAA`` record (IPv6 address), you must + specify the record type explicitly. Syntax for specifying the record + type is described below. + +.. note:: The trailing dot in most of the examples listed is purely optional, + but is specified for completeness/correctness sake. + +:: + + - debug: msg="The IPv4 address for example.com. is {{ lookup('dig', 'example.com.')}}" + +In addition to (default) ``A`` record, it is also possible to specify a different +record type that should be queried. This can be done by either passing-in +additional parameter of format ``qtype=TYPE`` to the ``dig`` lookup, or by +appending ``/TYPE`` to the *FQDN* being queried. For example:: + + - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com.', 'qtype=TXT') }}" + - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com./TXT') }}" + +If multiple values are associated with the requested record, the results will be +returned as a comma-separated list. In such cases you may want to pass option +``wantlist=True`` to the plugin, which will result in the record values being +returned as a list over which you can iterate later on:: + + - debug: msg="One of the MX records for gmail.com. is {{ item }}" + with_items: "{{ lookup('dig', 'gmail.com./MX', wantlist=True) }}" + +In case of reverse DNS lookups (``PTR`` records), you can also use a convenience +syntax of format ``IP_ADDRESS/PTR``. The following three lines would produce the +same output:: + + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8/PTR') }}" + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa./PTR') }}" + - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa.', 'qtype=PTR') }}" + +By default, the lookup will rely on system-wide configured DNS servers for +performing the query. It is also possible to explicitly specify DNS servers to +query using the ``@DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N`` notation. This +needs to be passed-in as an additional parameter to the lookup. For example:: + + - debug: msg="Querying 8.8.8.8 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@8.8.8.8') }}" + +In some cases the DNS records may hold a more complex data structure, or it may +be useful to obtain the results in a form of a dictionary for future +processing. The ``dig`` lookup supports parsing of a number of such records, +with the result being returned as a dictionary. This way it is possible to +easily access such nested data. This return format can be requested by +passing-in the ``flat=0`` option to the lookup. For example:: + + - debug: msg="XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}" + with_items: "{{ lookup('dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}" + +Take note that due to the way Ansible lookups work, you must pass the +``wantlist=True`` argument to the lookup, otherwise Ansible will report errors. + +Currently the dictionary results are supported for the following records: + +.. note:: *ALL* is not a record per-se, merely the listed fields are available + for any record results you retrieve in the form of a dictionary. + +========== ============================================================================= +Record Fields +---------- ----------------------------------------------------------------------------- +*ALL* owner, ttl, type +A address +AAAA address +CNAME target +DNAME target +DLV algorithm, digest_type, key_tag, digest +DNSKEY flags, algorithm, protocol, key +DS algorithm, digest_type, key_tag, digest +HINFO cpu, os +LOC latitude, longitude, altitude, size, horizontal_precision, vertical_precision +MX preference, exchange +NAPTR order, preference, flags, service, regexp, replacement +NS target +NSEC3PARAM algorithm, flags, iterations, salt +PTR target +RP mbox, txt +SOA mname, rname, serial, refresh, retry, expire, minimum +SPF strings +SRV priority, weight, port, target +SSHFP algorithm, fp_type, fingerprint +TLSA usage, selector, mtype, cert +TXT strings +========== ============================================================================= + .. _more_lookups: More Lookups From b90506341ac77c4885efe754ae401b90b0f61a7f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 08:06:26 -0800 Subject: [PATCH 3216/3617] Fixes for tests that assumed yum as package manager for systems that have dnf --- .../roles/ec2_elb_instance_setup/tasks/main.yml | 7 ++++++- .../roles/setup_postgresql_db/tasks/main.yml | 8 ++++---- test/integration/roles/test_apt/tasks/main.yml | 1 - .../test_docker/tasks/docker-setup-rht.yml | 17 ++++++++--------- .../roles/test_unarchive/tasks/main.yml | 4 ++++ test/integration/roles/test_yum/tasks/main.yml | 2 ++ 6 files changed, 24 insertions(+), 15 deletions(-) diff --git a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml index 341392b00c76f2..79584893ed84af 100644 --- a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml +++ b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml @@ -5,7 +5,12 @@ # install apache on the ec2 instances - name: install apache on new ec2 instances - yum: name=httpd + package: name=httpd + when: ansible_os_family == 'RedHat' + +- name: install apache on new ec2 instances + package: name=apache + when: ansible_os_family == 'Debian' - name: start and enable apache service: name=httpd state=started enabled=yes diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml index fbcc9cab72536f..c25318a2adcab2 100644 --- a/test/integration/roles/setup_postgresql_db/tasks/main.yml +++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml @@ -9,9 +9,9 @@ # Make sure we start fresh - name: remove rpm dependencies for postgresql test - yum: name={{ item }} state=absent + package: name={{ item }} state=absent with_items: postgresql_packages - when: ansible_pkg_mgr == 'yum' + when: ansible_os_family == "RedHat" - name: remove dpkg dependencies for postgresql test apt: name={{ item }} state=absent @@ -35,9 +35,9 @@ when: ansible_os_family == "Debian" - name: install rpm dependencies for postgresql test - yum: name={{ item }} state=latest + package: name={{ item }} state=latest with_items: postgresql_packages - when: ansible_pkg_mgr == 'yum' + when: ansible_os_family == "RedHat" - name: install dpkg dependencies for postgresql test apt: name={{ item }} state=latest diff --git a/test/integration/roles/test_apt/tasks/main.yml b/test/integration/roles/test_apt/tasks/main.yml index 8976087371d4fc..552b543d2d3d7b 100644 --- a/test/integration/roles/test_apt/tasks/main.yml +++ b/test/integration/roles/test_apt/tasks/main.yml @@ -1,4 +1,3 @@ -# test code for the yum module # (c) 2014, James Tanner # This file is part of Ansible diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml index 3ba234ecffca5f..c25821c3be0d64 100644 --- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml +++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml @@ -1,18 +1,17 @@ -- name: Install docker packages (yum) - yum: +- name: Install docker packages (rht family) + package: state: present name: docker-io,docker-registry,python-docker-py,nginx -- name: Install netcat - yum: +- name: Install netcat (Fedora) + package: state: present name: nmap-ncat - # RHEL7 as well... - when: ansible_distribution == 'Fedora' + when: ansible_distribution == 'Fedora' or (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('>=', 7)) -- name: Install netcat - yum: +- name: Install netcat (RHEL) + package: state: present name: nc - when: ansible_distribution != 'Fedora' + when: ansible_distribution != 'Fedora' and (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('<', 7)) diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml index c26d3aeb101d8d..e4f438e5256e0f 100644 --- a/test/integration/roles/test_unarchive/tasks/main.yml +++ b/test/integration/roles/test_unarchive/tasks/main.yml @@ -21,6 +21,10 @@ yum: name=zip state=latest when: ansible_pkg_mgr == 'yum' +- name: Ensure zip is present to create test archive (dnf) + dnf: name=zip state=latest + when: ansible_pkg_mgr == 'dnf' + - name: Ensure zip is present to create test archive (apt) apt: name=zip state=latest when: ansible_pkg_mgr == 'apt' diff --git a/test/integration/roles/test_yum/tasks/main.yml b/test/integration/roles/test_yum/tasks/main.yml index 5df887ae9f9bad..b17af6b465b8e8 100644 --- a/test/integration/roles/test_yum/tasks/main.yml +++ b/test/integration/roles/test_yum/tasks/main.yml @@ -16,6 +16,8 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +# Note: We install the yum package onto Fedora so that this will work on dnf systems +# We want to test that for people who don't want to upgrade their systems. - include: 'yum.yml' when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora'] From 5fef2c429763db8d088a20c97320936ee06e7fc8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 09:11:53 -0800 Subject: [PATCH 3217/3617] Try updating the centos7 image to a newer version (trying to resolve issue being unable to connect to some webservers) --- test/utils/ansible-playbook_integration_runner/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml index 27c4ae51b0d9ae..f1bd26b7eadc33 100644 --- a/test/utils/ansible-playbook_integration_runner/main.yml +++ b/test/utils/ansible-playbook_integration_runner/main.yml @@ -19,7 +19,7 @@ platform: "centos-6.5-x86_64" - distribution: "CentOS" version: "7" - image: "ami-96a818fe" + image: "ami-61bbf104" ssh_user: "centos" platform: "centos-7-x86_64" - distribution: "Fedora" From 6ae04c1e4f698629610030a74f5bb5fc501f5a1e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 20 Dec 2015 12:37:24 -0500 Subject: [PATCH 3218/3617] Fix logic in PlayIterator when inserting tasks during rescue/always Because the fail_state is potentially non-zero in these block sections, the prior logic led to included tasks not being inserted at all. Related issue: #13605 --- lib/ansible/executor/play_iterator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 795eed2a8c1a49..534f216c30a134 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -397,7 +397,7 @@ def _search_state(state, task): def _insert_tasks_into_state(self, state, task_list): # if we've failed at all, or if the task list is empty, just return the current state - if state.fail_state != self.FAILED_NONE or not task_list: + if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list: return state if state.run_state == self.ITERATING_TASKS: From 8d7892cc7b7a95c4efda003c8b187d1bc4875a5f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 10:13:33 -0800 Subject: [PATCH 3219/3617] Done troubleshooting Revert "Troubleshooting has reduced us to this" This reverts commit 9abef1a1d7e8df5e580e17ef4a54cec280fbc7dc. --- test/integration/roles/test_get_url/tasks/main.yml | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 54debc06d10388..cbf3b345f18926 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -96,22 +96,12 @@ register: get_url_result ignore_errors: True -- name: TROUBLESHOOTING - shell: curl https://foo.sni.velox.ch/ > /var/tmp/velox.html - register: trouble - ignore_errors: True - when: "{{ python_has_ssl_context }}" - -- debug: var=trouble - when: "{{ python_has_ssl_context }}" - -- debug: var=get_url_result - when: "{{ python_has_ssl_context }}" - - command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" +# If distros start backporting SNI, can make a new conditional based on whether this works: +# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: From 3792a586b51ce598ab71bfab004a4bd97f004101 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 11:33:42 -0800 Subject: [PATCH 3220/3617] Since the velox test server seems to be dropping using iptables to drop requests from aws, test via a different website instead --- .../roles/test_get_url/tasks/main.yml | 45 +++++++++++++++---- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index cbf3b345f18926..a0ff3797a87356 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -85,23 +85,51 @@ - "result.changed == true" - "stat_result.stat.exists == true" -# SNI Tests -# SNI is only built into the stdlib from python-2.7.9 onwards +# At the moment, AWS can't make an https request to velox.ch... connection +# timed out. So we'll use a different test until/unless the problem is resolved +## SNI Tests +## SNI is only built into the stdlib from python-2.7.9 onwards +#- name: Test that SNI works +# get_url: +# # A test site that returns a page with information on what SNI information +# # the client sent. A failure would have the string: did not send a TLS server name indication extension +# url: 'https://foo.sni.velox.ch/' +# dest: "{{ output_dir }}/sni.html" +# register: get_url_result +# ignore_errors: True +# +#- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" +# register: data_result +# when: "{{ python_has_ssl_context }}" +# +#- debug: var=get_url_result +#- name: Assert that SNI works with this python version +# assert: +# that: +# - 'data_result.rc == 0' +# - '"failed" not in get_url_result' +# when: "{{ python_has_ssl_context }}" +# +## If the client doesn't support SNI then get_url should have failed with a certificate mismatch +#- name: Assert that hostname verification failed because SNI is not supported on this version of python +# assert: +# that: +# - 'get_url_result["failed"]' +# when: "{{ not python_has_ssl_context }}" + +# These tests are just side effects of how the site is hosted. It's not +# specifically a test site. So the tests may break due to the hosting changing - name: Test that SNI works get_url: - # A test site that returns a page with information on what SNI information - # the client sent. A failure would have the string: did not send a TLS server name indication extension - url: 'https://foo.sni.velox.ch/' + url: 'https://www.mnot.net/blog/2014/05/09/if_you_can_read_this_youre_sniing' dest: "{{ output_dir }}/sni.html" register: get_url_result ignore_errors: True -- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html" +- command: "grep '

If You Can Read This, You're SNIing

' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" -# If distros start backporting SNI, can make a new conditional based on whether this works: -# python -c 'from ssl import SSLContext' - debug: var=get_url_result - name: Assert that SNI works with this python version assert: @@ -116,3 +144,4 @@ that: - 'get_url_result["failed"]' when: "{{ not python_has_ssl_context }}" +# End hacky SNI test section From 21ca0ce1ce12eb4e487d479abdc355972d2c2309 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 11:46:49 -0800 Subject: [PATCH 3221/3617] Fix test playbook syntax --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index a0ff3797a87356..630287c98712de 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -126,7 +126,7 @@ register: get_url_result ignore_errors: True -- command: "grep '

If You Can Read This, You're SNIing

' {{ output_dir}}/sni.html" +- command: "grep '

If You Can Read This, You\\'re SNIing

' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" From 6963955cb4a607c8548669136cb266c25d9f9ceb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 20 Dec 2015 11:51:32 -0800 Subject: [PATCH 3222/3617] And change the task a little more since different shlex versions are handling the quotes differently --- test/integration/roles/test_get_url/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml index 630287c98712de..9ed0549ec47124 100644 --- a/test/integration/roles/test_get_url/tasks/main.yml +++ b/test/integration/roles/test_get_url/tasks/main.yml @@ -126,7 +126,7 @@ register: get_url_result ignore_errors: True -- command: "grep '

If You Can Read This, You\\'re SNIing

' {{ output_dir}}/sni.html" +- command: "grep '

If You Can Read This, You.re SNIing

' {{ output_dir}}/sni.html" register: data_result when: "{{ python_has_ssl_context }}" From b85b92ecdd03429fd84d384a495fbb5894da9ab0 Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 14 Dec 2015 14:23:44 +0100 Subject: [PATCH 3223/3617] cloudstack: test_cs_instance: more integration tests cloudstack: extend test_cs_instance addressing recovering cloudstack: test_cs_instance: add tests for using display_name as indentifier. --- .../roles/test_cs_instance/tasks/absent.yml | 20 ++ .../tasks/absent_display_name.yml | 43 +++++ .../roles/test_cs_instance/tasks/cleanup.yml | 6 - .../roles/test_cs_instance/tasks/main.yml | 5 + .../roles/test_cs_instance/tasks/present.yml | 37 +++- .../tasks/present_display_name.yml | 176 ++++++++++++++++++ .../roles/test_cs_instance/tasks/setup.yml | 8 - 7 files changed, 272 insertions(+), 23 deletions(-) create mode 100644 test/integration/roles/test_cs_instance/tasks/absent_display_name.yml create mode 100644 test/integration/roles/test_cs_instance/tasks/present_display_name.yml diff --git a/test/integration/roles/test_cs_instance/tasks/absent.yml b/test/integration/roles/test_cs_instance/tasks/absent.yml index bafb3ec9e7621b..eeab47a61d790c 100644 --- a/test/integration/roles/test_cs_instance/tasks/absent.yml +++ b/test/integration/roles/test_cs_instance/tasks/absent.yml @@ -21,3 +21,23 @@ that: - instance|success - not instance|changed + +- name: test recover to stopped state and update a deleted instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + state: stopped + register: instance +- name: verify test recover to stopped state and update a deleted instance + assert: + that: + - instance|success + - instance|changed + - instance.state == "Stopped" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + +# force expunge, only works with admin permissions +- cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: expunged + failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml new file mode 100644 index 00000000000000..35fa6dff34f1e2 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml @@ -0,0 +1,43 @@ +--- +- name: test destroy instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.state == "Destroyed" + +- name: test destroy instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: absent + register: instance +- name: verify destroy instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + +- name: test recover to stopped state and update a deleted instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + state: stopped + register: instance +- name: verify test recover to stopped state and update a deleted instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.state == "Stopped" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + +# force expunge, only works with admin permissions +- cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: expunged + failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/cleanup.yml b/test/integration/roles/test_cs_instance/tasks/cleanup.yml index 63192dbd608c8d..e6b6550dfa1358 100644 --- a/test/integration/roles/test_cs_instance/tasks/cleanup.yml +++ b/test/integration/roles/test_cs_instance/tasks/cleanup.yml @@ -28,9 +28,3 @@ assert: that: - sg|success - -# force expunge, only works with admin permissions -- cs_instance: - name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - state: expunged - failed_when: false diff --git a/test/integration/roles/test_cs_instance/tasks/main.yml b/test/integration/roles/test_cs_instance/tasks/main.yml index d1a67e178100a8..d6475a4766448f 100644 --- a/test/integration/roles/test_cs_instance/tasks/main.yml +++ b/test/integration/roles/test_cs_instance/tasks/main.yml @@ -4,3 +4,8 @@ - include: tags.yml - include: absent.yml - include: cleanup.yml + +- include: setup.yml +- include: present_display_name.yml +- include: absent_display_name.yml +- include: cleanup.yml diff --git a/test/integration/roles/test_cs_instance/tasks/present.yml b/test/integration/roles/test_cs_instance/tasks/present.yml index 10242a57fd2d54..ad3d391ef9cefa 100644 --- a/test/integration/roles/test_cs_instance/tasks/present.yml +++ b/test/integration/roles/test_cs_instance/tasks/present.yml @@ -1,4 +1,12 @@ --- +- name: setup instance to be absent + cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent + register: instance +- name: verify instance to be absent + assert: + that: + - instance|success + - name: test create instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -21,7 +29,6 @@ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" - not instance.tags - - name: test create instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -44,7 +51,6 @@ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" - not instance.tags - - name: test running instance not updated cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -60,7 +66,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" - - name: test stopping instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -76,7 +81,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Stopped" - - name: test stopping instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -89,7 +93,6 @@ - not instance|changed - instance.state == "Stopped" - - name: test updating stopped instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -106,7 +109,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Stopped" - - name: test starting instance cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -122,7 +124,6 @@ - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Running" - - name: test starting instance idempotence cs_instance: name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" @@ -133,6 +134,9 @@ that: - instance|success - not instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" - instance.state == "Running" - name: test force update running instance @@ -147,7 +151,7 @@ - instance|success - instance|changed - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" @@ -163,6 +167,21 @@ - instance|success - not instance|changed - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" - - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" - instance.service_offering == "{{ test_cs_instance_offering_1 }}" - instance.state == "Running" + +- name: test restore instance + cs_instance: + name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + state: restored + register: instance +- name: verify restore instance + assert: + that: + - instance|success + - instance|changed + - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" diff --git a/test/integration/roles/test_cs_instance/tasks/present_display_name.yml b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml new file mode 100644 index 00000000000000..c1882149d9d808 --- /dev/null +++ b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml @@ -0,0 +1,176 @@ +--- +- name: setup instance with display_name to be absent + cs_instance: display_name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent + register: instance +- name: verify instance with display_name to be absent + assert: + that: + - instance|success + +- name: test create instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + +- name: test create instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + affinity_group: "{{ cs_resource_prefix }}-ag" + security_group: "{{ cs_resource_prefix }}-sg" + ssh_key: "{{ cs_resource_prefix }}-sshkey" + tags: [] + register: instance +- name: verify create instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey" + - not instance.tags + +- name: test running instance with display_name not updated + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_2 }}" + register: instance +- name: verify running instance with display_name not updated + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test stopping instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Stopped" + +- name: test stopping instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: stopped + register: instance +- name: verify stopping instance idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.state == "Stopped" + +- name: test updating stopped instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_2 }}" + register: instance +- name: verify updating stopped instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Stopped" + +- name: test starting instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Running" + +- name: test starting instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + state: started + register: instance +- name: verify starting instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_2 }}" + - instance.state == "Running" + +- name: test force update running instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + force: true + register: instance +- name: verify force update running instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test force update running instance with display_name idempotence + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + service_offering: "{{ test_cs_instance_offering_1 }}" + force: true + register: instance +- name: verify force update running instance with display_name idempotence + assert: + that: + - instance|success + - not instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" + - instance.state == "Running" + +- name: test restore instance with display_name + cs_instance: + display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + template: "{{ test_cs_instance_template }}" + state: restored + register: instance +- name: verify restore instance with display_name + assert: + that: + - instance|success + - instance|changed + - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}" + - instance.service_offering == "{{ test_cs_instance_offering_1 }}" diff --git a/test/integration/roles/test_cs_instance/tasks/setup.yml b/test/integration/roles/test_cs_instance/tasks/setup.yml index 32f3ff13e248e6..0039ce8f1be1e8 100644 --- a/test/integration/roles/test_cs_instance/tasks/setup.yml +++ b/test/integration/roles/test_cs_instance/tasks/setup.yml @@ -22,11 +22,3 @@ assert: that: - sg|success - -- name: setup instance to be absent - cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent - register: instance -- name: verify instance to be absent - assert: - that: - - instance|success From 3a57d9472c6788ce6fbb700108fbc776527fc3df Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 20 Dec 2015 17:55:39 -0500 Subject: [PATCH 3224/3617] Save output of integration test results to files we can archive --- .../roles/run_integration/tasks/main.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 2d01999dbfd10d..f67f088246ce18 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -28,4 +28,14 @@ delay: 30 ignore_errors: true -- debug: var=test_results +- name: save stdout test results for each host + local_action: copy + args: + dest: "{{sync_dir}}/{{inventory_hostname}}.stdout_results.txt" + content: "{{test_results.stdout}}" + +- name: save stderr test results for each host + local_action: copy + args: + dest: "{{sync_dir}}/{{inventory_hostname}}.stderr_results.txt" + content: "{{test_results.stderr}}" From 54455a06e55756b31493fd25b1871146c8fe6ab2 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sun, 20 Dec 2015 21:32:37 -0500 Subject: [PATCH 3225/3617] Disable docker test for Fedora, due to broken packaging --- test/integration/destructive.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml index 626124d14f1fd8..3e8cca385e64eb 100644 --- a/test/integration/destructive.yml +++ b/test/integration/destructive.yml @@ -17,5 +17,5 @@ - { role: test_mysql_db, tags: test_mysql_db} - { role: test_mysql_user, tags: test_mysql_user} - { role: test_mysql_variables, tags: test_mysql_variables} - - { role: test_docker, tags: test_docker} + - { role: test_docker, tags: test_docker, when: ansible_distribution != "Fedora" } - { role: test_zypper, tags: test_zypper} From a4674906c60da6035345c2bbe89983b5a6e3b69d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Mon, 21 Dec 2015 13:01:58 -0500 Subject: [PATCH 3226/3617] Merge role params into variables separately from other variables Fixes #13617 --- lib/ansible/playbook/role/__init__.py | 6 ++++++ lib/ansible/vars/__init__.py | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py index f308954f52818e..ce82573dc03186 100644 --- a/lib/ansible/playbook/role/__init__.py +++ b/lib/ansible/playbook/role/__init__.py @@ -265,6 +265,12 @@ def get_inherited_vars(self, dep_chain=[], include_params=True): inherited_vars = combine_vars(inherited_vars, parent._role_params) return inherited_vars + def get_role_params(self): + params = {} + for dep in self.get_all_dependencies(): + params = combine_vars(params, dep._role_params) + return params + def get_vars(self, dep_chain=[], include_params=True): all_vars = self.get_inherited_vars(dep_chain, include_params=include_params) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 1184ec5049243d..699333a589668e 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -308,6 +308,7 @@ def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=Tru if not C.DEFAULT_PRIVATE_ROLE_VARS: for role in play.get_roles(): + all_vars = combine_vars(all_vars, role.get_role_params()) all_vars = combine_vars(all_vars, role.get_vars(include_params=False)) if task: From 593d80c63d408012550850eb06d85387588cee3b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 21 Dec 2015 13:14:51 -0500 Subject: [PATCH 3227/3617] role search path clarified --- docsite/rst/playbooks_roles.rst | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst index c6c01db5d484b0..2e1173acda99a2 100644 --- a/docsite/rst/playbooks_roles.rst +++ b/docsite/rst/playbooks_roles.rst @@ -191,11 +191,8 @@ This designates the following behaviors, for each role 'x': - If roles/x/handlers/main.yml exists, handlers listed therein will be added to the play - If roles/x/vars/main.yml exists, variables listed therein will be added to the play - If roles/x/meta/main.yml exists, any role dependencies listed therein will be added to the list of roles (1.3 and later) -- Any copy tasks can reference files in roles/x/files/ without having to path them relatively or absolutely -- Any script tasks can reference scripts in roles/x/files/ without having to path them relatively or absolutely -- Any template tasks can reference files in roles/x/templates/ without having to path them relatively or absolutely -- Any include tasks can reference files in roles/x/tasks/ without having to path them relatively or absolutely - +- Any copy, script, template or include tasks (in the role) can reference files in roles/x/files/ without having to path them relatively or absolutely + In Ansible 1.4 and later you can configure a roles_path to search for roles. Use this to check all of your common roles out to one location, and share them easily between multiple playbook projects. See :doc:`intro_configuration` for details about how to set this up in ansible.cfg. From 75e94e0cba538c9ed532374b219c45e91fd89db8 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 21 Dec 2015 13:06:48 -0500 Subject: [PATCH 3228/3617] allow for non standard hostnames * Changed parse_addresses to throw exceptions instead of passing None * Switched callers to trap and pass through the original values. * Added very verbose notice * Look at deprecating this and possibly validate at plugin instead fixes #13608 --- lib/ansible/inventory/__init__.py | 21 ++++++++++++--------- lib/ansible/inventory/ini.py | 11 +++++++---- lib/ansible/parsing/utils/addresses.py | 22 +++++++++++----------- lib/ansible/plugins/action/add_host.py | 10 +++++++--- test/units/parsing/test_addresses.py | 14 ++++++++++++-- 5 files changed, 49 insertions(+), 29 deletions(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 95e193f381a86e..095118e50eb0bd 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -109,7 +109,12 @@ def parse_inventory(self, host_list): pass elif isinstance(host_list, list): for h in host_list: - (host, port) = parse_address(h, allow_ranges=False) + try: + (host, port) = parse_address(h, allow_ranges=False) + except AnsibleError as e: + display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_string(e)) + host = h + port = None all.add_host(Host(host, port)) elif self._loader.path_exists(host_list): #TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins' @@ -228,15 +233,13 @@ def split_host_pattern(cls, pattern): # If it doesn't, it could still be a single pattern. This accounts for # non-separator uses of colons: IPv6 addresses and [x:y] host ranges. else: - (base, port) = parse_address(pattern, allow_ranges=True) - if base: + try: + (base, port) = parse_address(pattern, allow_ranges=True) patterns = [pattern] - - # The only other case we accept is a ':'-separated list of patterns. - # This mishandles IPv6 addresses, and is retained only for backwards - # compatibility. - - else: + except: + # The only other case we accept is a ':'-separated list of patterns. + # This mishandles IPv6 addresses, and is retained only for backwards + # compatibility. patterns = re.findall( r'''(?: # We want to match something comprising: [^\s:\[\]] # (anything other than whitespace or ':[]' diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py index 537fde1ef9e3d2..9224ef2d23db81 100644 --- a/lib/ansible/inventory/ini.py +++ b/lib/ansible/inventory/ini.py @@ -23,7 +23,7 @@ import re from ansible import constants as C -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleParserError from ansible.inventory.host import Host from ansible.inventory.group import Group from ansible.inventory.expand_hosts import detect_range @@ -264,9 +264,12 @@ def _expand_hostpattern(self, hostpattern): # Can the given hostpattern be parsed as a host with an optional port # specification? - (pattern, port) = parse_address(hostpattern, allow_ranges=True) - if not pattern: - self._raise_error("Can't parse '%s' as host[:port]" % hostpattern) + try: + (pattern, port) = parse_address(hostpattern, allow_ranges=True) + except: + # not a recognizable host pattern + pattern = hostpattern + port = None # Once we have separated the pattern, we expand it into list of one or # more hostnames, depending on whether it contains any [x:y] ranges. diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py index 387f05c627f178..ebfd850ac6a886 100644 --- a/lib/ansible/parsing/utils/addresses.py +++ b/lib/ansible/parsing/utils/addresses.py @@ -20,6 +20,7 @@ __metaclass__ = type import re +from ansible.errors import AnsibleParserError, AnsibleError # Components that match a numeric or alphanumeric begin:end or begin:end:step # range expression inside square brackets. @@ -162,6 +163,7 @@ $ '''.format(label=label), re.X|re.I|re.UNICODE ), + } def parse_address(address, allow_ranges=False): @@ -183,8 +185,8 @@ def parse_address(address, allow_ranges=False): # First, we extract the port number if one is specified. port = None - for type in ['bracketed_hostport', 'hostport']: - m = patterns[type].match(address) + for matching in ['bracketed_hostport', 'hostport']: + m = patterns[matching].match(address) if m: (address, port) = m.groups() port = int(port) @@ -194,22 +196,20 @@ def parse_address(address, allow_ranges=False): # numeric ranges, or a hostname with alphanumeric ranges. host = None - for type in ['ipv4', 'ipv6', 'hostname']: - m = patterns[type].match(address) + for matching in ['ipv4', 'ipv6', 'hostname']: + m = patterns[matching].match(address) if m: host = address continue # If it isn't any of the above, we don't understand it. - if not host: - return (None, None) - - # If we get to this point, we know that any included ranges are valid. If - # the caller is prepared to handle them, all is well. Otherwise we treat - # it as a parse failure. + raise AnsibleError("Not a valid network hostname: %s" % address) + # If we get to this point, we know that any included ranges are valid. + # If the caller is prepared to handle them, all is well. + # Otherwise we treat it as a parse failure. if not allow_ranges and '[' in host: - return (None, None) + raise AnsibleParserError("Detected range in host but was asked to ignore ranges") return (host, port) diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py index 4bf43f14009a64..b3aec20437e74b 100644 --- a/lib/ansible/plugins/action/add_host.py +++ b/lib/ansible/plugins/action/add_host.py @@ -53,9 +53,13 @@ def run(self, tmp=None, task_vars=None): new_name = self._task.args.get('name', self._task.args.get('hostname', None)) display.vv("creating host via 'add_host': hostname=%s" % new_name) - name, port = parse_address(new_name, allow_ranges=False) - if not name: - raise AnsibleError("Invalid inventory hostname: %s" % new_name) + try: + name, port = parse_address(new_name, allow_ranges=False) + except: + # not a parsable hostname, but might still be usable + name = new_name + port = None + if port: self._task.args['ansible_ssh_port'] = port diff --git a/test/units/parsing/test_addresses.py b/test/units/parsing/test_addresses.py index 870cbb0a14ae21..a688d0253bdd27 100644 --- a/test/units/parsing/test_addresses.py +++ b/test/units/parsing/test_addresses.py @@ -71,7 +71,12 @@ def test_without_ranges(self): for t in self.tests: test = self.tests[t] - (host, port) = parse_address(t) + try: + (host, port) = parse_address(t) + except: + host = None + port = None + assert host == test[0] assert port == test[1] @@ -79,6 +84,11 @@ def test_with_ranges(self): for t in self.range_tests: test = self.range_tests[t] - (host, port) = parse_address(t, allow_ranges=True) + try: + (host, port) = parse_address(t, allow_ranges=True) + except: + host = None + port = None + assert host == test[0] assert port == test[1] From 08b580decce79deac3c7c2d828d6a8ef9dd6e70c Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 14:09:02 -0500 Subject: [PATCH 3229/3617] Parallelize make command for integration test runner Also adds a new var, used by the prepare_tests role, to prevent it from deleting the temp test directory at the start of each play to avoid any potential race conditions --- test/integration/roles/prepare_tests/tasks/main.yml | 1 + .../roles/run_integration/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/test/integration/roles/prepare_tests/tasks/main.yml b/test/integration/roles/prepare_tests/tasks/main.yml index 3641880baa1536..7983ea52361ab1 100644 --- a/test/integration/roles/prepare_tests/tasks/main.yml +++ b/test/integration/roles/prepare_tests/tasks/main.yml @@ -22,6 +22,7 @@ always_run: True tags: - prepare + when: clean_working_dir|default("yes")|bool - name: create the test directory file: name={{output_dir}} state=directory diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index f67f088246ce18..8a306a8ada4d63 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -9,7 +9,7 @@ shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && make {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j4 {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 From 6d6822e66e43658c01b68bab2ed897e0ef31c784 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 14:37:17 -0500 Subject: [PATCH 3230/3617] Kick up the integration runner test image size --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index d4740d957089e6..55619776d907b5 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.medium' + instance_type: 'm3.large' image: '{{ item.image }}' wait: true region: 'us-east-1' From 45afa642c3a69d209fefd7debfb38df9d8b757fd Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 15:48:58 -0500 Subject: [PATCH 3231/3617] Integration test runner tweaks --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- .../roles/run_integration/tasks/main.yml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 55619776d907b5..8a48f0ce6e2683 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.large' + instance_type: 'm3.xlarge' image: '{{ item.image }}' wait: true region: 'us-east-1' diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 8a306a8ada4d63..6b37d85c2e7556 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -3,13 +3,14 @@ synchronize: src: "{{ sync_dir }}/" dest: "~/ansible" + no_log: true - name: Get ansible source dir sudo: false shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j4 {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j2 {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 @@ -27,6 +28,7 @@ retries: 120 delay: 30 ignore_errors: true + no_log: true - name: save stdout test results for each host local_action: copy From 8119ea37afe5e94a1d98cec9fe7ae760b10a9adc Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 15:55:16 -0500 Subject: [PATCH 3232/3617] Dropping instance size back down since we're not doing parallel builds --- test/utils/ansible-playbook_integration_runner/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml index 8a48f0ce6e2683..55619776d907b5 100644 --- a/test/utils/ansible-playbook_integration_runner/ec2.yml +++ b/test/utils/ansible-playbook_integration_runner/ec2.yml @@ -2,7 +2,7 @@ ec2: group_id: 'sg-07bb906d' # jenkins-slave_new count: 1 - instance_type: 'm3.xlarge' + instance_type: 'm3.large' image: '{{ item.image }}' wait: true region: 'us-east-1' From d22bbbf52c08e03b63d6045768f3000531f875e9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 21 Dec 2015 16:11:53 -0500 Subject: [PATCH 3233/3617] Actually disable parallel makes for integration runner --- .../roles/run_integration/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml index 6b37d85c2e7556..a833c96558dd0f 100644 --- a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml +++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml @@ -10,7 +10,7 @@ shell: "cd ~/ansible && pwd" register: results -- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make -j2 {{ run_integration_make_target }}" +- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make {{ run_integration_make_target }}" args: chdir: "{{ results.stdout }}" async: 3600 From 0c013f592a31c06baac7aadf27d23598f6abe931 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 21 Dec 2015 13:52:41 -0800 Subject: [PATCH 3234/3617] Transform the command we pass to subprocess into a byte string in _low_level-exec_command --- lib/ansible/plugins/action/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e9b18651d66b72..e88a55a15cc8a1 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -487,7 +487,8 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - + # We may need to revisit this later. + cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd From bbdfaf052209242fbd262860aeda81e59d694243 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 00:24:35 -0500 Subject: [PATCH 3235/3617] move hostvars.vars to vars this fixes duplication under hostvars and exposes all vars in the vars dict which makes dynamic reference possible on 'non hostvars' --- lib/ansible/vars/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py index 699333a589668e..4135ff1768743a 100644 --- a/lib/ansible/vars/__init__.py +++ b/lib/ansible/vars/__init__.py @@ -259,8 +259,6 @@ def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=Tru except KeyError: pass - all_vars['vars'] = all_vars.copy() - if play: all_vars = combine_vars(all_vars, play.get_vars()) @@ -343,6 +341,8 @@ def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=Tru all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars) #VARIABLE_CACHE[cache_entry] = all_vars + if task or play: + all_vars['vars'] = all_vars.copy() debug("done with get_vars()") return all_vars From c60749c9222c8139042a0f4280d6622b209de550 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 22 Dec 2015 09:14:12 -0600 Subject: [PATCH 3236/3617] Also convert ints to bool for type=bool --- lib/ansible/module_utils/basic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 62b8cadfd61c2b..8a135b300f1d3c 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1274,7 +1274,7 @@ def _check_type_bool(self, value): if isinstance(value, bool): return value - if isinstance(value, basestring): + if isinstance(value, basestring) or isinstance(value, int): return self.boolean(value) raise TypeError('%s cannot be converted to a bool' % type(value)) From b310d0ce76c05bb7a7a47aa7b7537b9adc916171 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 07:22:44 -0800 Subject: [PATCH 3237/3617] Update the developing doc to modern method of specifying bool argspec values --- docsite/rst/developing_modules.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index fde4b5704b6222..39bfd9e3d9cfc1 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -247,7 +247,7 @@ And instantiating the module class like:: argument_spec = dict( state = dict(default='present', choices=['present', 'absent']), name = dict(required=True), - enabled = dict(required=True, choices=BOOLEANS), + enabled = dict(required=True, type='bool'), something = dict(aliases=['whatever']) ) ) @@ -335,7 +335,7 @@ and guidelines: * If you have a company module that returns facts specific to your installations, a good name for this module is `site_facts`. -* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "choices=BOOLEANS" and a module.boolean(value) casting function. +* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'" and a module.boolean(value) casting function. * Include a minimum of dependencies if possible. If there are dependencies, document them at the top of the module file, and have the module raise JSON error messages when the import fails. From b33f72636a3b7f3a256185afde1aae3d9703235e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 07:25:50 -0800 Subject: [PATCH 3238/3617] Also remove the bool casting function info (transparent to module writer now) --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 39bfd9e3d9cfc1..141f81bd08b0d8 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -335,7 +335,7 @@ and guidelines: * If you have a company module that returns facts specific to your installations, a good name for this module is `site_facts`. -* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'" and a module.boolean(value) casting function. +* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'". * Include a minimum of dependencies if possible. If there are dependencies, document them at the top of the module file, and have the module raise JSON error messages when the import fails. From c4da5840b5e38aea1740e68f7100256c93dfbb17 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 08:22:02 -0800 Subject: [PATCH 3239/3617] Convert to bytes later so that make_become_command can jsut operate on text type. --- lib/ansible/plugins/action/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e88a55a15cc8a1..765ba663164099 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -487,8 +487,6 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - # We may need to revisit this later. - cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd @@ -505,7 +503,7 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, cmd = self._play_context.make_become_cmd(cmd, executable=executable) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) - rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) + rc, stdout, stderr = self._connection.exec_command(to_bytes(cmd, errors='strict'), in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type From b22d998d1d9acbda6f458ea99d7e5266d69e035c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Tue, 22 Dec 2015 16:30:29 +0100 Subject: [PATCH 3240/3617] Fix make tests-py3 on devel. Fix for https://github.com/ansible/ansible/issues/13638. --- test/units/plugins/action/test_action.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index 0e47b6a53818c4..dcd04375959325 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -42,14 +42,14 @@ def test_sudo_only_if_user_differs(self): play_context.become = True play_context.become_user = play_context.remote_user = 'root' - play_context.make_become_cmd = Mock(return_value='CMD') + play_context.make_become_cmd = Mock(return_value=b'CMD') - action_base._low_level_execute_command('ECHO', sudoable=True) + action_base._low_level_execute_command(b'ECHO', sudoable=True) play_context.make_become_cmd.assert_not_called() play_context.remote_user = 'apo' - action_base._low_level_execute_command('ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None) + action_base._low_level_execute_command(b'ECHO', sudoable=True) + play_context.make_become_cmd.assert_called_once_with(b'ECHO', executable=None) play_context.make_become_cmd.reset_mock() @@ -57,7 +57,7 @@ def test_sudo_only_if_user_differs(self): C.BECOME_ALLOW_SAME_USER = True try: play_context.remote_user = 'root' - action_base._low_level_execute_command('ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None) + action_base._low_level_execute_command(b'ECHO SAME', sudoable=True) + play_context.make_become_cmd.assert_called_once_with(b'ECHO SAME', executable=None) finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 010839aedc5d903b7ef2fac1b564642cd036e95e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 17:15:58 -0500 Subject: [PATCH 3241/3617] fix no_log disclosure when using aliases --- lib/ansible/module_utils/basic.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 4aee3b4169da35..91ea874d859873 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -516,6 +516,7 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self._debug = False self.aliases = {} + self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug'] if add_file_common_args: for k, v in FILE_COMMON_ARGUMENTS.items(): @@ -524,6 +525,14 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, self.params = self._load_params() + # append to legal_inputs and then possibly check against them + try: + self.aliases = self._handle_aliases() + except Exception, e: + # use exceptions here cause its not safe to call vail json until no_log is processed + print('{"failed": true, "msg": "Module alias error: %s"}' % str(e)) + sys.exit(1) + # Save parameter values that should never be logged self.no_log_values = set() # Use the argspec to determine which args are no_log @@ -538,10 +547,6 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, # reset to LANG=C if it's an invalid/unavailable locale self._check_locale() - self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug'] - - # append to legal_inputs and then possibly check against them - self.aliases = self._handle_aliases() self._check_arguments(check_invalid_arguments) @@ -1064,6 +1069,7 @@ def _check_locale(self): self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e) def _handle_aliases(self): + # this uses exceptions as it happens before we can safely call fail_json aliases_results = {} #alias:canon for (k,v) in self.argument_spec.items(): self._legal_inputs.append(k) @@ -1072,11 +1078,11 @@ def _handle_aliases(self): required = v.get('required', False) if default is not None and required: # not alias specific but this is a good place to check this - self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k) + raise Exception("internal error: required and default are mutually exclusive for %s" % k) if aliases is None: continue if type(aliases) != list: - self.fail_json(msg='internal error: aliases must be a list') + raise Exception('internal error: aliases must be a list') for alias in aliases: self._legal_inputs.append(alias) aliases_results[alias] = k From 202b92179d247e508fe4190edc28614b136a5b89 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 22:09:45 -0500 Subject: [PATCH 3242/3617] corrected role path search order the unfraking was matching roles in current dir as it always returns a full path, pushed to the bottom as match of last resort fixes #13645 --- lib/ansible/playbook/role/definition.py | 70 ++++++++++++------------- 1 file changed, 34 insertions(+), 36 deletions(-) diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py index 7e8f47e9be8640..0af49cec91c6a7 100644 --- a/lib/ansible/playbook/role/definition.py +++ b/lib/ansible/playbook/role/definition.py @@ -135,46 +135,44 @@ def _load_role_path(self, role_name): append it to the default role path ''' - role_path = unfrackpath(role_name) + # we always start the search for roles in the base directory of the playbook + role_search_paths = [ + os.path.join(self._loader.get_basedir(), u'roles'), + self._loader.get_basedir(), + ] + + # also search in the configured roles path + if C.DEFAULT_ROLES_PATH: + configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) + role_search_paths.extend(configured_paths) + + # finally, append the roles basedir, if it was set, so we can + # search relative to that directory for dependent roles + if self._role_basedir: + role_search_paths.append(self._role_basedir) + + # create a templar class to template the dependency names, in + # case they contain variables + if self._variable_manager is not None: + all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) + else: + all_vars = dict() + + templar = Templar(loader=self._loader, variables=all_vars) + role_name = templar.template(role_name) + + # now iterate through the possible paths and return the first one we find + for path in role_search_paths: + path = templar.template(path) + role_path = unfrackpath(os.path.join(path, role_name)) + if self._loader.path_exists(role_path): + return (role_name, role_path) + # if not found elsewhere try to extract path from name + role_path = unfrackpath(role_name) if self._loader.path_exists(role_path): role_name = os.path.basename(role_name) return (role_name, role_path) - else: - # we always start the search for roles in the base directory of the playbook - role_search_paths = [ - os.path.join(self._loader.get_basedir(), u'roles'), - u'./roles', - self._loader.get_basedir(), - u'./' - ] - - # also search in the configured roles path - if C.DEFAULT_ROLES_PATH: - configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep) - role_search_paths.extend(configured_paths) - - # finally, append the roles basedir, if it was set, so we can - # search relative to that directory for dependent roles - if self._role_basedir: - role_search_paths.append(self._role_basedir) - - # create a templar class to template the dependency names, in - # case they contain variables - if self._variable_manager is not None: - all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play) - else: - all_vars = dict() - - templar = Templar(loader=self._loader, variables=all_vars) - role_name = templar.template(role_name) - - # now iterate through the possible paths and return the first one we find - for path in role_search_paths: - path = templar.template(path) - role_path = unfrackpath(os.path.join(path, role_name)) - if self._loader.path_exists(role_path): - return (role_name, role_path) raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds) From 957b376f9eb959f4f3627a622f7776a26442bf9c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 22:45:25 -0500 Subject: [PATCH 3243/3617] better module error handling * now module errors clearly state msg=MODULE FAILURE * module's stdout and stderr go into module_stdout and module_stderr keys which only appear during parsing failure * invocation module_args are deleted from results provided by action plugin as errors can keep us from overwriting and then disclosing info that was meant to be kept hidden due to no_log * fixed invocation module_args set by basic.py as it was creating different keys as the invocation in action plugin base. * results now merge --- lib/ansible/module_utils/basic.py | 4 ++-- lib/ansible/plugins/action/__init__.py | 5 +++-- lib/ansible/plugins/action/normal.py | 5 ++++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 91ea874d859873..0391035e883086 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -1530,7 +1530,7 @@ def exit_json(self, **kwargs): if not 'changed' in kwargs: kwargs['changed'] = False if 'invocation' not in kwargs: - kwargs['invocation'] = self.params + kwargs['invocation'] = {'module_args': self.params} kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) @@ -1542,7 +1542,7 @@ def fail_json(self, **kwargs): assert 'msg' in kwargs, "implementation error -- msg to explain the error is required" kwargs['failed'] = True if 'invocation' not in kwargs: - kwargs['invocation'] = self.params + kwargs['invocation'] = {'module_args': self.params} kwargs = remove_values(kwargs, self.no_log_values) self.do_cleanup_files() print(self.jsonify(kwargs)) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 765ba663164099..5383f8afd4345c 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -460,9 +460,10 @@ def _execute_module(self, module_name=None, module_args=None, tmp=None, task_var if 'stderr' in res and res['stderr'].startswith(u'Traceback'): data['exception'] = res['stderr'] else: - data['msg'] = res.get('stdout', u'') + data['msg'] = "MODULE FAILURE" + data['module_stdout'] = res.get('stdout', u'') if 'stderr' in res: - data['msg'] += res['stderr'] + data['module_stderr'] = res['stderr'] # pre-split stdout into lines, if stdout is in the data and there # isn't already a stdout_lines value there diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py index f9b55e1ff57aa8..932ad8309c30fa 100644 --- a/lib/ansible/plugins/action/normal.py +++ b/lib/ansible/plugins/action/normal.py @@ -18,6 +18,7 @@ __metaclass__ = type from ansible.plugins.action import ActionBase +from ansible.utils.vars import merge_hash class ActionModule(ActionBase): @@ -27,7 +28,9 @@ def run(self, tmp=None, task_vars=None): task_vars = dict() results = super(ActionModule, self).run(tmp, task_vars) - results.update(self._execute_module(tmp=tmp, task_vars=task_vars)) + # remove as modules might hide due to nolog + del results['invocation']['module_args'] + results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars)) # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. From 809c9af68cac56180b336d6ebe29d70b9d10ac14 Mon Sep 17 00:00:00 2001 From: Matt Roberts Date: Wed, 23 Dec 2015 08:18:46 +0000 Subject: [PATCH 3244/3617] Update playbooks_intro.rst If you follow the documentation through in order you shouldn't have read about modules yet. --- docsite/rst/playbooks_intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst index 28c809f013266f..55cd3359be6c73 100644 --- a/docsite/rst/playbooks_intro.rst +++ b/docsite/rst/playbooks_intro.rst @@ -41,7 +41,7 @@ Each playbook is composed of one or more 'plays' in a list. The goal of a play is to map a group of hosts to some well defined roles, represented by things ansible calls tasks. At a basic level, a task is nothing more than a call -to an ansible module, which you should have learned about in earlier chapters. +to an ansible module (see :doc:`Modules`). By composing a playbook of multiple 'plays', it is possible to orchestrate multi-machine deployments, running certain steps on all From 42b9a206ada579000a64cdcb7a0c82ecfd99c451 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Wed, 23 Dec 2015 11:44:30 +0100 Subject: [PATCH 3245/3617] Fix last commit, make it python3 compatible (and py24) --- lib/ansible/module_utils/basic.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 91ea874d859873..f9dc964e67626c 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -528,7 +528,8 @@ def __init__(self, argument_spec, bypass_checks=False, no_log=False, # append to legal_inputs and then possibly check against them try: self.aliases = self._handle_aliases() - except Exception, e: + except Exception: + e = get_exception() # use exceptions here cause its not safe to call vail json until no_log is processed print('{"failed": true, "msg": "Module alias error: %s"}' % str(e)) sys.exit(1) From b201cf2ee13a9e4e1c5dc222043e3f1c84940044 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 23 Dec 2015 10:29:59 -0500 Subject: [PATCH 3246/3617] switched from pythonic None to generic null --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index 141f81bd08b0d8..d3781b2f7fd0e2 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -481,7 +481,7 @@ Module checklist * The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work * Documentation: Make sure it exists * `required` should always be present, be it true or false - * If `required` is false you need to document `default`, even if the default is 'None' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. + * If `required` is false you need to document `default`, even if the default is 'null' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code. * `default` is not needed for `required: true` * Remove unnecessary doc like `aliases: []` or `choices: []` * The version is not a float number and value the current development version From d89d7951e6fb84cdb04cc35e0aa962d59fe6f553 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 23 Dec 2015 11:45:07 -0500 Subject: [PATCH 3247/3617] fixed tests to follow new invocation structure also added maxdiff setting to see issues clearly when they happen --- .../module_utils/basic/test_exit_json.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py index 931447f8ab6f5c..27bbb0f9e560f6 100644 --- a/test/units/module_utils/basic/test_exit_json.py +++ b/test/units/module_utils/basic/test_exit_json.py @@ -31,8 +31,11 @@ from ansible.module_utils.basic import heuristic_log_sanitize from ansible.module_utils.basic import return_values, remove_values +empty_invocation = {u'module_args': {}} + @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitJson(unittest.TestCase): + def setUp(self): self.COMPLEX_ARGS = basic.MODULE_COMPLEX_ARGS basic.MODULE_COMPLEX_ARGS = '{}' @@ -56,7 +59,7 @@ def test_exit_json_no_args_exits(self): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=False, invocation={})) + self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation)) def test_exit_json_args_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -67,7 +70,7 @@ def test_exit_json_args_exits(self): else: self.assertEquals(ctx.exception.code, 0) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", changed=False, invocation={})) + self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation)) def test_fail_json_exits(self): with self.assertRaises(SystemExit) as ctx: @@ -78,13 +81,13 @@ def test_fail_json_exits(self): else: self.assertEquals(ctx.exception.code, 1) return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(msg="message", failed=True, invocation={})) + self.assertEquals(return_val, dict(msg="message", failed=True, invocation=empty_invocation)) def test_exit_json_proper_changed(self): with self.assertRaises(SystemExit) as ctx: self.module.exit_json(changed=True, msg='success') return_val = json.loads(self.fake_stream.getvalue()) - self.assertEquals(return_val, dict(changed=True, msg='success', invocation={})) + self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation)) @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)") class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): @@ -95,21 +98,21 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase): not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), (dict(username='person', password='password12345'), dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), (dict(username='person', password='$ecret k3y'), dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/', not_secret='following the leader', msg='here'), dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/', not_secret='following the leader', changed=False, msg='here', - invocation=dict(password=OMIT, token=None, username='person')), + invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))), ), ) @@ -122,6 +125,7 @@ def tearDown(self): sys.stdout = self.old_stdout def test_exit_json_removes_values(self): + self.maxDiff = None for args, return_val, expected in self.dataset: sys.stdout = StringIO() basic.MODULE_COMPLEX_ARGS = json.dumps(args) @@ -137,6 +141,7 @@ def test_exit_json_removes_values(self): self.assertEquals(json.loads(sys.stdout.getvalue()), expected) def test_fail_json_removes_values(self): + self.maxDiff = None for args, return_val, expected in self.dataset: expected = copy.deepcopy(expected) del expected['changed'] From fd7e01696f659e1a147887087c87e2bad9742209 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 23 Dec 2015 17:16:21 -0500 Subject: [PATCH 3248/3617] updated submodule refs to pick up module changes --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index fcb3397df7944f..002028748f0809 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit fcb3397df7944ff15ea698b5717c06e8fc7d43ba +Subproject commit 002028748f080961ade801c30e194bfd4ba043ce diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index c6829752d85239..19e496c69c22fc 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit c6829752d852398c255704cd5d7faa54342e143e +Subproject commit 19e496c69c22fc7ec1e3c8306b363a812b85d386 From deac4d00b22f9e0288f5e3c4633e07a7f937d47c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 24 Dec 2015 11:32:40 -0800 Subject: [PATCH 3249/3617] bigip changes as requested by bcoca and abadger: * Fix to error if validate_cert is True and python doesn't support it. * Only globally disable certificate checking if really needed. Use bigip verify parameter if available instead. * Remove public disable certificate function to make it less likely people will attempt to reuse that --- lib/ansible/module_utils/f5.py | 36 ++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py index e04e6b2f1ec063..ba336377e7d022 100644 --- a/lib/ansible/module_utils/f5.py +++ b/lib/ansible/module_utils/f5.py @@ -51,19 +51,35 @@ def f5_argument_spec(): def f5_parse_arguments(module): if not bigsuds_found: module.fail_json(msg="the python bigsuds module is required") - if not module.params['validate_certs']: - disable_ssl_cert_validation() + + if module.params['validate_certs']: + import ssl + if not hasattr(ssl, 'SSLContext'): + module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task') + return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition'],module.params['validate_certs']) -def bigip_api(bigip, user, password): - api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) - return api +def bigip_api(bigip, user, password, validate_certs): + try: + # bigsuds >= 1.0.3 + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs) + except TypeError: + # bigsuds < 1.0.3, no verify param + if validate_certs: + # Note: verified we have SSLContext when we parsed params + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + else: + import ssl + if hasattr(ssl, 'SSLContext'): + # Really, you should never do this. It disables certificate + # verification *globally*. But since older bigip libraries + # don't give us a way to toggle verification we need to + # disable it at the global level. + # From https://www.python.org/dev/peps/pep-0476/#id29 + ssl._create_default_https_context = ssl._create_unverified_context + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) -def disable_ssl_cert_validation(): - # You probably only want to do this for testing and never in production. - # From https://www.python.org/dev/peps/pep-0476/#id29 - import ssl - ssl._create_default_https_context = ssl._create_unverified_context + return api # Fully Qualified name (with the partition) def fq_name(partition,name): From cd9e18d0e52c1915132614e6e2946a26968e3091 Mon Sep 17 00:00:00 2001 From: Stephen Medina Date: Fri, 25 Dec 2015 08:56:08 -0800 Subject: [PATCH 3250/3617] clarify idempotence explanation Small typo; wasn't sure what to replace it with. --- docsite/rst/intro_adhoc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst index 9e104d5836fe29..61ba33523a6811 100644 --- a/docsite/rst/intro_adhoc.rst +++ b/docsite/rst/intro_adhoc.rst @@ -112,7 +112,7 @@ For example, using double rather than single quotes in the above example would evaluate the variable on the box you were on. So far we've been demoing simple command execution, but most Ansible modules usually do not work like -simple scripts. They make the remote system look like you state, and run the commands necessary to +simple scripts. They make the remote system look like a state, and run the commands necessary to get it there. This is commonly referred to as 'idempotence', and is a core design goal of Ansible. However, we also recognize that running arbitrary commands is equally important, so Ansible easily supports both. From d70a97b562da1b06d21a86fd1c7619bfa2b6a2e6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 25 Dec 2015 12:17:22 -0800 Subject: [PATCH 3251/3617] Update submodule refs --- lib/ansible/modules/extras | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index 19e496c69c22fc..f6a7b6dd1f7be9 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit 19e496c69c22fc7ec1e3c8306b363a812b85d386 +Subproject commit f6a7b6dd1f7be93ba640c50bf26adeeabb5af46f From 0b92abaf67de53349bb4d2733f49750d9a4d8277 Mon Sep 17 00:00:00 2001 From: Etherdaemon Date: Sun, 27 Dec 2015 21:31:59 +1000 Subject: [PATCH 3252/3617] Proposed fix for ansible/ansible-modules-extras#1348 due to datetime.datetime type not being matched --- lib/ansible/module_utils/basic.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 6fd382aa4901e2..89d595a0bf33ac 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -65,6 +65,7 @@ import pwd import platform import errno +import datetime from itertools import repeat, chain try: @@ -423,10 +424,13 @@ def remove_values(value, no_log_strings): for omit_me in no_log_strings: if omit_me in stringy_value: return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER' + elif isinstance(value, datetime.datetime): + value = value.isoformat() else: raise TypeError('Value of unknown type: %s, %s' % (type(value), value)) return value + def heuristic_log_sanitize(data, no_log_values=None): ''' Remove strings that look like passwords from log messages ''' # Currently filters: From c489b271d152820ab11b73d11877f8805318cd7a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 27 Dec 2015 14:17:20 -0500 Subject: [PATCH 3253/3617] updated release cycle to 4 months instead of 2 --- docsite/rst/intro_installation.rst | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst index e986ffd70f6a8f..a5ed83a3027260 100644 --- a/docsite/rst/intro_installation.rst +++ b/docsite/rst/intro_installation.rst @@ -27,12 +27,11 @@ What Version To Pick? ````````````````````` Because it runs so easily from source and does not require any installation of software on remote -machines, many users will actually track the development version. +machines, many users will actually track the development version. -Ansible's release cycles are usually about two months long. Due to this -short release cycle, minor bugs will generally be fixed in the next release versus maintaining -backports on the stable branch. Major bugs will still have maintenance releases when needed, though -these are infrequent. +Ansible's release cycles are usually about four months long. Due to this short release cycle, +minor bugs will generally be fixed in the next release versus maintaining backports on the stable branch. +Major bugs will still have maintenance releases when needed, though these are infrequent. If you are wishing to run the latest released version of Ansible and you are running Red Hat Enterprise Linux (TM), CentOS, Fedora, Debian, or Ubuntu, we recommend using the OS package manager. From 20005660313b5abc4188704fc3a37a4c25f83e62 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 28 Dec 2015 10:24:28 -0500 Subject: [PATCH 3254/3617] minor fix to become docs --- docsite/rst/become.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst index 64628515c6cb9a..7597643f8839be 100644 --- a/docsite/rst/become.rst +++ b/docsite/rst/become.rst @@ -1,5 +1,5 @@ -Ansible Privilege Escalation -++++++++++++++++++++++++++++ +Become (Privilege Escalation) ++++++++++++++++++++++++++++++ Ansible can use existing privilege escalation systems to allow a user to execute tasks as another. @@ -7,17 +7,17 @@ Ansible can use existing privilege escalation systems to allow a user to execute Become `````` -Before 1.9 Ansible mostly allowed the use of sudo and a limited use of su to allow a login/remote user to become a different user -and execute tasks, create resources with the 2nd user's permissions. As of 1.9 'become' supersedes the old sudo/su, while still -being backwards compatible. This new system also makes it easier to add other privilege escalation tools like pbrun (Powerbroker), -pfexec and others. +Before 1.9 Ansible mostly allowed the use of `sudo` and a limited use of `su` to allow a login/remote user to become a different user +and execute tasks, create resources with the 2nd user's permissions. As of 1.9 `become` supersedes the old sudo/su, while still +being backwards compatible. This new system also makes it easier to add other privilege escalation tools like `pbrun` (Powerbroker), +`pfexec` and others. New directives -------------- become - equivalent to adding 'sudo:' or 'su:' to a play or task, set to 'true'/'yes' to activate privilege escalation + equivalent to adding `sudo:` or `su:` to a play or task, set to 'true'/'yes' to activate privilege escalation become_user equivalent to adding 'sudo_user:' or 'su_user:' to a play or task, set to user with desired privileges From 56454d6a9135fb18e5d0545b9162b940cbcb8a78 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 28 Dec 2015 12:25:27 -0500 Subject: [PATCH 3255/3617] added newer vars to 'reset_vars' these vars pass back info to the task about the connection moved to their own block at start at file for readability and added the newer standard vars --- lib/ansible/playbook/play_context.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py index 81223500adf879..6b19f4c1723144 100644 --- a/lib/ansible/playbook/play_context.py +++ b/lib/ansible/playbook/play_context.py @@ -125,6 +125,18 @@ 'remote_user', ) +RESET_VARS = ( + 'ansible_connection', + 'ansible_ssh_host', + 'ansible_ssh_pass', + 'ansible_ssh_port', + 'ansible_ssh_user', + 'ansible_ssh_private_key_file', + 'ansible_ssh_pipelining', + 'ansible_user', + 'ansible_host', + 'ansible_port', +) class PlayContext(Base): @@ -505,7 +517,8 @@ def update_vars(self, variables): # TODO: should we be setting the more generic values here rather than # the more specific _ssh_ ones? - for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file', 'ansible_ssh_pipelining']: + for special_var in RESET_VARS: + if special_var not in variables: for prop, varnames in MAGIC_VARIABLE_MAPPING.items(): if special_var in varnames: From 2d11cfab92f9d26448461b4bc81f466d1910a15e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 29 Dec 2015 11:40:18 -0500 Subject: [PATCH 3256/3617] Squashed commit of the following: commit 24efa310b58c431b4d888a6315d1285da918f670 Author: James Cammarata Date: Tue Dec 29 11:23:52 2015 -0500 Adding an additional test for copy exclusion Adds a negative test for the situation when an exclusion doesn't exist in the target to be copied. commit 643ba054877cf042177d65e6e2958178bdd2fe88 Merge: e6ee59f 66a8f7e Author: James Cammarata Date: Tue Dec 29 10:59:18 2015 -0500 Merge branch 'speedup' of https://github.com/chrismeyersfsu/ansible into chrismeyersfsu-speedup commit 66a8f7e873ca90f7848e47b04d9b62aed23a45df Author: Chris Meyers Date: Mon Dec 28 09:47:00 2015 -0500 better api and tests added * _copy_results = deepcopy for better performance * _copy_results_exclude to deepcopy but exclude certain fields. Pop fields that do not need to be deep copied. Re-assign popped fields after deep copy so we don't modify the original, to be copied, object. * _copy_results_exclude unit tests commit 93490960ff4e75f38a7cc6f6d49f10f949f1a7da Author: Chris Meyers Date: Fri Dec 25 23:17:26 2015 -0600 remove uneeded deepcopy fields --- lib/ansible/plugins/callback/__init__.py | 19 ++++- test/units/plugins/callback/test_callback.py | 82 ++++++++++++++++++++ 2 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 test/units/plugins/callback/test_callback.py diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index 7371fe0a51e8d5..cc2a9ad0e75c7c 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -59,9 +59,20 @@ def __init__(self, display=None): version = getattr(self, 'CALLBACK_VERSION', '1.0') self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version)) - def _copy_result(self, result): - ''' helper for callbacks, so they don't all have to include deepcopy ''' - return deepcopy(result) + ''' helper for callbacks, so they don't all have to include deepcopy ''' + _copy_result = deepcopy + + def _copy_result_exclude(self, result, exclude): + values = [] + for e in exclude: + values.append(getattr(result, e)) + setattr(result, e, None) + + result_copy = deepcopy(result) + for i,e in enumerate(exclude): + setattr(result, e, values[i]) + + return result_copy def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): if result.get('_ansible_no_log', False): @@ -130,7 +141,7 @@ def _get_item(self, result): def _process_items(self, result): for res in result._result['results']: - newres = self._copy_result(result) + newres = self._copy_result_exclude(result, ['_result']) res['item'] = self._get_item(res) newres._result = res if 'failed' in res and res['failed']: diff --git a/test/units/plugins/callback/test_callback.py b/test/units/plugins/callback/test_callback.py new file mode 100644 index 00000000000000..54964ac9df2ed0 --- /dev/null +++ b/test/units/plugins/callback/test_callback.py @@ -0,0 +1,82 @@ +# (c) 2012-2014, Chris Meyers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Make coding more python3-ish +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from six import PY3 +from copy import deepcopy + +from ansible.compat.tests import unittest +from ansible.compat.tests.mock import patch, mock_open + +from ansible.plugins.callback import CallbackBase +import ansible.plugins.callback as callish + +class TestCopyResultExclude(unittest.TestCase): + def setUp(self): + class DummyClass(): + def __init__(self): + self.bar = [ 1, 2, 3 ] + self.a = { + "b": 2, + "c": 3, + } + self.b = { + "c": 3, + "d": 4, + } + self.foo = DummyClass() + self.cb = CallbackBase() + + def tearDown(self): + pass + + def test_copy_logic(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertEqual(self.foo.bar, res.bar) + + def test_copy_deep(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertNotEqual(id(self.foo.bar), id(res.bar)) + + def test_no_exclude(self): + res = self.cb._copy_result_exclude(self.foo, ()) + self.assertEqual(self.foo.bar, res.bar) + self.assertEqual(self.foo.a, res.a) + self.assertEqual(self.foo.b, res.b) + + def test_exclude(self): + res = self.cb._copy_result_exclude(self.foo, ['bar', 'b']) + self.assertIsNone(res.bar) + self.assertIsNone(res.b) + self.assertEqual(self.foo.a, res.a) + + def test_result_unmodified(self): + bar_id = id(self.foo.bar) + a_id = id(self.foo.a) + res = self.cb._copy_result_exclude(self.foo, ['bar', 'a']) + + self.assertEqual(self.foo.bar, [ 1, 2, 3 ]) + self.assertEqual(bar_id, id(self.foo.bar)) + + self.assertEqual(self.foo.a, dict(b=2, c=3)) + self.assertEqual(a_id, id(self.foo.a)) + + self.assertRaises(AttributeError, self.cb._copy_result_exclude, self.foo, ['a', 'c', 'bar']) + From d3deb24ead59d5fdbecad3c946848537f95772ad Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 29 Dec 2015 15:41:00 -0500 Subject: [PATCH 3257/3617] output color is now configurable --- examples/ansible.cfg | 11 ++++++ lib/ansible/cli/galaxy.py | 25 +++++++------- lib/ansible/constants.py | 11 ++++++ lib/ansible/executor/task_executor.py | 2 +- lib/ansible/playbook/__init__.py | 3 +- lib/ansible/plugins/callback/default.py | 46 ++++++++++++------------- lib/ansible/plugins/callback/minimal.py | 17 +++++---- lib/ansible/plugins/callback/oneline.py | 14 ++++---- lib/ansible/utils/color.py | 3 +- lib/ansible/utils/display.py | 14 ++++---- 10 files changed, 86 insertions(+), 60 deletions(-) diff --git a/examples/ansible.cfg b/examples/ansible.cfg index ec3ddf20641301..b357738b39c5f8 100644 --- a/examples/ansible.cfg +++ b/examples/ansible.cfg @@ -262,3 +262,14 @@ # the default behaviour that copies the existing context or uses the user default # needs to be changed to use the file system dependent context. #special_context_filesystems=nfs,vboxsf,fuse,ramfs + +[colors] +#verbose = blue +#warn = bright purple +#error = red +#debug = dark gray +#deprecate = purple +#skip = cyan +#unreachable = red +#ok = green +#changed = yellow diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 34afa03c9f7c2f..476a7d0f897f33 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -514,7 +514,7 @@ def execute_search(self): tags=self.options.tags, author=self.options.author, page_size=page_size) if response['count'] == 0: - display.display("No roles match your search.", color="yellow") + display.display("No roles match your search.", color=C.COLOR_ERROR) return True data = '' @@ -570,10 +570,10 @@ def execute_import(self): colors = { 'INFO': 'normal', - 'WARNING': 'yellow', - 'ERROR': 'red', - 'SUCCESS': 'green', - 'FAILED': 'red' + 'WARNING': C.COLOR_WARN, + 'ERROR': C.COLOR_ERROR, + 'SUCCESS': C.COLOR_OK, + 'FAILED': C.COLOR_ERROR, } if len(self.args) < 2: @@ -592,11 +592,10 @@ def execute_import(self): # found multiple roles associated with github_user/github_repo display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo), color='yellow') - display.display("The following Galaxy roles are being updated:" + u'\n', color='yellow') + display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED) for t in task: - display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color='yellow') - display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), - color='yellow') + display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED) + display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED) return 0 # found a single role as expected display.display("Successfully submitted import request %d" % task[0]['id']) @@ -633,17 +632,17 @@ def execute_setup(self): # None found display.display("No integrations found.") return 0 - display.display(u'\n' + "ID Source Repo", color="green") - display.display("---------- ---------- ----------", color="green") + display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK) + display.display("---------- ---------- ----------", color=C.COLOR_OK) for secret in secrets: display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'], - secret['github_repo']),color="green") + secret['github_repo']),color=C.COLOR_OK) return 0 if self.options.remove_id: # Remove a secret self.api.remove_secret(self.options.remove_id) - display.display("Secret removed. Integrations using this secret will not longer work.", color="green") + display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK) return 0 if len(self.args) < 4: diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py index 5df9602246ae4a..9b84825d6bcf41 100644 --- a/lib/ansible/constants.py +++ b/lib/ansible/constants.py @@ -268,6 +268,17 @@ def load_config_file(): DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_" STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True ) +# colors +COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue') +COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple') +COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red') +COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray') +COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple') +COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan') +COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red') +COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green') +COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow') + # non-configurable things MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script'] MODULE_NO_JSON = ['command', 'shell', 'raw'] diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index c8b6fa179bcd46..4a2d30a2cd29d5 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -393,7 +393,7 @@ def _execute(self, variables=None): result = None for attempt in range(retries): if attempt > 0: - display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color="dark gray") + display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color=C.COLOR_DEBUG) result['attempts'] = attempt + 1 display.debug("running the handler") diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py index 0ae443f84360a2..947224d61fcbc2 100644 --- a/lib/ansible/playbook/__init__.py +++ b/lib/ansible/playbook/__init__.py @@ -25,6 +25,7 @@ from ansible.playbook.play import Play from ansible.playbook.playbook_include import PlaybookInclude from ansible.plugins import get_all_plugin_loaders +from ansible import constants as C try: from __main__ import display @@ -87,7 +88,7 @@ def _load_playbook_data(self, file_name, variable_manager): if pb is not None: self._entries.extend(pb._entries) else: - display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan') + display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color=C.COLOR_SKIP) else: entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader) self._entries.append(entry_obj) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index e515945bba516b..421104ee83741d 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -44,7 +44,7 @@ def v2_runner_on_failed(self, result, ignore_errors=False): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] @@ -53,12 +53,12 @@ def v2_runner_on_failed(self, result, ignore_errors=False): self._process_items(result) else: if delegated_vars: - self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) if result._task.ignore_errors: - self._display.display("...ignoring", color='cyan') + self._display.display("...ignoring", color=C.COLOR_SKIP) def v2_runner_on_ok(self, result): @@ -71,13 +71,13 @@ def v2_runner_on_ok(self, result): msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' + color = C.COLOR_CHANGED else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() - color = 'green' + color = C.COLOR_OK if result._task.loop and 'results' in result._result: self._process_items(result) @@ -97,17 +97,17 @@ def v2_runner_on_skipped(self, result): msg = "skipping: [%s]" % result._host.get_name() if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color='cyan') + self._display.display(msg, color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red') + self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) def v2_playbook_on_no_hosts_matched(self): - self._display.display("skipping: no hosts matched", color='cyan') + self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP) def v2_playbook_on_no_hosts_remaining(self): self._display.banner("NO MORE HOSTS LEFT") @@ -117,7 +117,7 @@ def v2_playbook_on_task_start(self, task, is_conditional): if self._display.verbosity > 2: path = task.get_path() if path: - self._display.display("task path: %s" % path, color='dark gray') + self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) def v2_playbook_on_cleanup_task_start(self, task): self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip()) @@ -155,13 +155,13 @@ def v2_playbook_item_on_ok(self, result): msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "changed: [%s]" % result._host.get_name() - color = 'yellow' + color = C.COLOR_CHANGED else: if delegated_vars: msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) else: msg = "ok: [%s]" % result._host.get_name() - color = 'green' + color = C.COLOR_OK msg += " => (item=%s)" % (result._result['item'],) @@ -179,15 +179,15 @@ def v2_playbook_item_on_failed(self, result): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] if delegated_vars: - self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red') + self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) else: - self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red') + self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR) self._handle_warnings(result._result) @@ -195,12 +195,12 @@ def v2_playbook_item_on_skipped(self, result): msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item']) if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result: msg += " => %s" % self._dump_results(result._result) - self._display.display(msg, color='cyan') + self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_include(self, included_file): msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) - color = 'cyan' - self._display.display(msg, color='cyan') + color = C.COLOR_SKIP + self._display.display(msg, color=C.COLOR_SKIP) def v2_playbook_on_stats(self, stats): self._display.banner("PLAY RECAP") @@ -211,10 +211,10 @@ def v2_playbook_on_stats(self, stats): self._display.display(u"%s : %s %s %s %s" % ( hostcolor(h, t), - colorize(u'ok', t['ok'], 'green'), - colorize(u'changed', t['changed'], 'yellow'), - colorize(u'unreachable', t['unreachable'], 'red'), - colorize(u'failed', t['failures'], 'red')), + colorize(u'ok', t['ok'], C.COLOR_OK), + colorize(u'changed', t['changed'], C.COLOR_CHANGED), + colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), + colorize(u'failed', t['failures'], C.COLOR_ERROR)), screen_only=True ) diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py index 71f9f5dfeef01b..9fa257af747744 100644 --- a/lib/ansible/plugins/callback/minimal.py +++ b/lib/ansible/plugins/callback/minimal.py @@ -53,29 +53,32 @@ def v2_runner_on_failed(self, result, ignore_errors=False): else: msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color='red') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR) else: - self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red') + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR) def v2_runner_on_ok(self, result): self._clean_results(result._result, result._task.action) if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color=C.COLOR_OK) else: - self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green') + if 'changed' in result._result and result._result['changed']: + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_CHANGED) + else: + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_OK) self._handle_warnings(result._result) def v2_runner_on_skipped(self, result): - self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') + self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): - self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='yellow') + self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE) def v2_on_file_diff(self, result): if 'diff' in result._result and result._result['diff']: diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py index a99b680c05ce20..0f6283fd441bf0 100644 --- a/lib/ansible/plugins/callback/oneline.py +++ b/lib/ansible/plugins/callback/oneline.py @@ -52,24 +52,24 @@ def v2_runner_on_failed(self, result, ignore_errors=False): msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','') if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color=C.COLOR_ERROR) else: - self._display.display(msg, color='red') + self._display.display(msg, color=C.COLOR_ERROR) # finally, remove the exception from the result so it's not shown every time del result._result['exception'] - self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red') + self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR) def v2_runner_on_ok(self, result): if result._task.action in C.MODULE_NO_JSON: - self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green') + self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK) else: - self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green') + self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK) def v2_runner_on_unreachable(self, result): - self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow') + self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color=C.COLOR_UNREACHABLE) def v2_runner_on_skipped(self, result): - self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan') + self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP) diff --git a/lib/ansible/utils/color.py b/lib/ansible/utils/color.py index 55060ace0408d7..81a05d749e1712 100644 --- a/lib/ansible/utils/color.py +++ b/lib/ansible/utils/color.py @@ -62,7 +62,8 @@ 'purple': u'0;35', 'bright red': u'1;31', 'yellow': u'0;33', 'bright purple': u'1;35', 'dark gray': u'1;30', 'bright yellow': u'1;33', - 'normal': u'0' + 'magenta': u'0;35', 'bright magenta': u'1;35', + 'normal': u'0' , } def stringc(text, color): diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py index 3d51f17de4765e..8700a5101865f1 100644 --- a/lib/ansible/utils/display.py +++ b/lib/ansible/utils/display.py @@ -145,7 +145,7 @@ def display(self, msg, color=None, stderr=False, screen_only=False, log_only=Fal # characters that are invalid in the user's locale msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr)) - if color == 'red': + if color == C.COLOR_ERROR: logger.error(msg2) else: logger.info(msg2) @@ -168,7 +168,7 @@ def vvvvvv(self, msg, host=None): def debug(self, msg): if C.DEFAULT_DEBUG: debug_lock.acquire() - self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray') + self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG) debug_lock.release() def verbose(self, msg, host=None, caplevel=2): @@ -176,9 +176,9 @@ def verbose(self, msg, host=None, caplevel=2): #msg = utils.sanitize_output(msg) if self.verbosity > caplevel: if host is None: - self.display(msg, color='blue') + self.display(msg, color=C.COLOR_VERBOSE) else: - self.display("<%s> %s" % (host, msg), color='blue', screen_only=True) + self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, screen_only=True) def deprecated(self, msg, version=None, removed=False): ''' used to print out a deprecation message.''' @@ -199,7 +199,7 @@ def deprecated(self, msg, version=None, removed=False): new_msg = "\n".join(wrapped) + "\n" if new_msg not in self._deprecations: - self.display(new_msg.strip(), color='purple', stderr=True) + self.display(new_msg.strip(), color=C.COLOR_DEPRECATE, stderr=True) self._deprecations[new_msg] = 1 def warning(self, msg): @@ -207,7 +207,7 @@ def warning(self, msg): wrapped = textwrap.wrap(new_msg, self.columns) new_msg = "\n".join(wrapped) + "\n" if new_msg not in self._warns: - self.display(new_msg, color='bright purple', stderr=True) + self.display(new_msg, color=C.COLOR_WARN, stderr=True) self._warns[new_msg] = 1 def system_warning(self, msg): @@ -258,7 +258,7 @@ def error(self, msg, wrap_text=True): else: new_msg = msg if new_msg not in self._errors: - self.display(new_msg, color='red', stderr=True) + self.display(new_msg, color=C.COLOR_ERROR, stderr=True) self._errors[new_msg] = 1 @staticmethod From 5accc9858739d2184235bf8722b83ff7bcc97056 Mon Sep 17 00:00:00 2001 From: mgarstecki Date: Wed, 30 Dec 2015 11:57:12 +0100 Subject: [PATCH 3258/3617] Correction of a double negation The sentence seemed to imply that return codes from modules are significant, while they are not. The second part of the sentence confirms this, as it advises to use standard return codes only for future proofing. --- docsite/rst/developing_modules.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst index d3781b2f7fd0e2..5d664d56313579 100644 --- a/docsite/rst/developing_modules.rst +++ b/docsite/rst/developing_modules.rst @@ -347,7 +347,7 @@ and guidelines: * In the event of failure, a key of 'failed' should be included, along with a string explanation in 'msg'. Modules that raise tracebacks (stacktraces) are generally considered 'poor' modules, though Ansible can deal with these returns and will automatically convert anything unparseable into a failed result. If you are using the AnsibleModule common Python code, the 'failed' element will be included for you automatically when you call 'fail_json'. -* Return codes from modules are not actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing. +* Return codes from modules are actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing. * As results from many hosts will be aggregated at once, modules should return only relevant output. Returning the entire contents of a log file is generally bad form. From 946b82bef71d3b2d4ecf07ec937b650634bc84a0 Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Wed, 30 Dec 2015 18:21:34 +0100 Subject: [PATCH 3259/3617] shred ansible-vault tmp_file. Also when editor is interruped. --- lib/ansible/parsing/vault/__init__.py | 35 ++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index d8cf66feca42c3..b7304d156fecac 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -219,7 +219,27 @@ class VaultEditor: def __init__(self, password): self.vault = VaultLib(password) - + + def _shred_file(self, tmp_path): + """securely destroy a decrypted file.""" + def generate_data(length): + import string, random + chars = string.ascii_lowercase + string.ascii_uppercase + string.digits + return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) + + if not os.path.isfile(tmp_path): + # file is already gone + return + + ld = os.path.getsize(tmp_path) + passes = 3 + with open(tmp_path, "w") as fh: + for _ in range(int(passes)): + data = generate_data(ld) + fh.write(data) + fh.seek(0, 0) + os.remove(tmp_path) + def _edit_file_helper(self, filename, existing_data=None, force_save=False): # Create a tempfile @@ -229,12 +249,18 @@ def _edit_file_helper(self, filename, existing_data=None, force_save=False): self.write_data(existing_data, tmp_path) # drop the user into an editor on the tmp file - call(self._editor_shell_command(tmp_path)) + try: + call(self._editor_shell_command(tmp_path)) + except: + # whatever happens, destroy the decrypted file + self._shred_file(tmp_path) + raise + tmpdata = self.read_data(tmp_path) # Do nothing if the content has not changed if existing_data == tmpdata and not force_save: - os.remove(tmp_path) + self._shred_file(tmp_path) return # encrypt new data and write out to tmp @@ -329,7 +355,7 @@ def write_data(self, data, filename): sys.stdout.write(bytes) else: if os.path.isfile(filename): - os.remove(filename) + self._shred_file(filename) with open(filename, "wb") as fh: fh.write(bytes) @@ -338,6 +364,7 @@ def shuffle_files(self, src, dest): # overwrite dest with src if os.path.isfile(dest): prev = os.stat(dest) + # old file 'dest' was encrypted, no need to _shred_file os.remove(dest) shutil.move(src, dest) From e39e8ba308364f16e3b74db96b15415ab97b5f52 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 30 Dec 2015 13:49:39 -0500 Subject: [PATCH 3260/3617] Fix logic mistake in unarchive action plugin --- lib/ansible/plugins/action/unarchive.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py index cd89b936fedc77..b6c43a3c595024 100644 --- a/lib/ansible/plugins/action/unarchive.py +++ b/lib/ansible/plugins/action/unarchive.py @@ -69,13 +69,13 @@ def run(self, tmp=None, task_vars=None): source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source) remote_checksum = self._remote_checksum(dest, all_vars=task_vars) - if remote_checksum != '3': + if remote_checksum == '4': result['failed'] = True - result['msg'] = "dest '%s' must be an existing dir" % dest + result['msg'] = "python isn't present on the system. Unable to compute checksum" return result - elif remote_checksum == '4': + elif remote_checksum != '3': result['failed'] = True - result['msg'] = "python isn't present on the system. Unable to compute checksum" + result['msg'] = "dest '%s' must be an existing dir" % dest return result if copy: From 5c34be15b1c800a513a88005c6e6b05f360dfef1 Mon Sep 17 00:00:00 2001 From: Thilo Uttendorfer Date: Thu, 31 Dec 2015 02:31:38 +0100 Subject: [PATCH 3261/3617] Fix unsupported format character --- lib/ansible/utils/module_docs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py index 4a90c3caca1a61..14a5d030565d37 100755 --- a/lib/ansible/utils/module_docs.py +++ b/lib/ansible/utils/module_docs.py @@ -67,7 +67,7 @@ def get_docstring(filename, verbose=False): theid = t.id except AttributeError as e: # skip errors can happen when trying to use the normal code - display.warning("Failed to assign id for %t on %s, skipping" % (t, filename)) + display.warning("Failed to assign id for %s on %s, skipping" % (t, filename)) continue if 'DOCUMENTATION' in theid: From c4d2dbfcdbf8743760d658f1bcbec23e912514a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yannig=20Perr=C3=A9?= Date: Fri, 1 Jan 2016 15:55:51 +0100 Subject: [PATCH 3262/3617] Replace to_string by to_unicode. Fix https://github.com/ansible/ansible/issues/13707 --- lib/ansible/inventory/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py index 095118e50eb0bd..885005960f504d 100644 --- a/lib/ansible/inventory/__init__.py +++ b/lib/ansible/inventory/__init__.py @@ -112,7 +112,7 @@ def parse_inventory(self, host_list): try: (host, port) = parse_address(h, allow_ranges=False) except AnsibleError as e: - display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_string(e)) + display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e)) host = h port = None all.add_host(Host(host, port)) From 6f2f7a79b34910a75e6eafde5a7872b3e7bcb770 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 1 Jan 2016 21:52:41 -0500 Subject: [PATCH 3263/3617] add support for diff in file settings this allows modules to report on what specifically changed when using common file functions --- lib/ansible/module_utils/basic.py | 61 ++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 6fd382aa4901e2..1366bfceb40953 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -751,7 +751,7 @@ def set_default_selinux_context(self, path, changed): context = self.selinux_default_context(path) return self.set_context_if_different(path, context, False) - def set_context_if_different(self, path, context, changed): + def set_context_if_different(self, path, context, changed, diff=None): if not HAVE_SELINUX or not self.selinux_enabled(): return changed @@ -772,6 +772,14 @@ def set_context_if_different(self, path, context, changed): new_context[i] = cur_context[i] if cur_context != new_context: + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['secontext'] = cur_context + if 'after' not in diff: + diff['after'] = {} + diff['after']['secontext'] = new_context + try: if self.check_mode: return True @@ -785,7 +793,7 @@ def set_context_if_different(self, path, context, changed): changed = True return changed - def set_owner_if_different(self, path, owner, changed): + def set_owner_if_different(self, path, owner, changed, diff=None): path = os.path.expanduser(path) if owner is None: return changed @@ -798,6 +806,15 @@ def set_owner_if_different(self, path, owner, changed): except KeyError: self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner) if orig_uid != uid: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['owner'] = orig_uid + if 'after' not in diff: + diff['after'] = {} + diff['after']['owner'] = uid + if self.check_mode: return True try: @@ -807,7 +824,7 @@ def set_owner_if_different(self, path, owner, changed): changed = True return changed - def set_group_if_different(self, path, group, changed): + def set_group_if_different(self, path, group, changed, diff=None): path = os.path.expanduser(path) if group is None: return changed @@ -820,6 +837,15 @@ def set_group_if_different(self, path, group, changed): except KeyError: self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group) if orig_gid != gid: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['group'] = orig_gid + if 'after' not in diff: + diff['after'] = {} + diff['after']['group'] = gid + if self.check_mode: return True try: @@ -829,7 +855,7 @@ def set_group_if_different(self, path, group, changed): changed = True return changed - def set_mode_if_different(self, path, mode, changed): + def set_mode_if_different(self, path, mode, changed, diff=None): path = os.path.expanduser(path) path_stat = os.lstat(path) @@ -851,6 +877,15 @@ def set_mode_if_different(self, path, mode, changed): prev_mode = stat.S_IMODE(path_stat.st_mode) if prev_mode != mode: + + if diff is not None: + if 'before' not in diff: + diff['before'] = {} + diff['before']['mode'] = prev_mode + if 'after' not in diff: + diff['after'] = {} + diff['after']['mode'] = mode + if self.check_mode: return True # FIXME: comparison against string above will cause this to be executed @@ -984,27 +1019,27 @@ def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms): or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm] return reduce(or_reduce, perms, 0) - def set_fs_attributes_if_different(self, file_args, changed): + def set_fs_attributes_if_different(self, file_args, changed, diff=None): # set modes owners and context as needed changed = self.set_context_if_different( - file_args['path'], file_args['secontext'], changed + file_args['path'], file_args['secontext'], changed, diff ) changed = self.set_owner_if_different( - file_args['path'], file_args['owner'], changed + file_args['path'], file_args['owner'], changed, diff ) changed = self.set_group_if_different( - file_args['path'], file_args['group'], changed + file_args['path'], file_args['group'], changed, diff ) changed = self.set_mode_if_different( - file_args['path'], file_args['mode'], changed + file_args['path'], file_args['mode'], changed, diff ) return changed - def set_directory_attributes_if_different(self, file_args, changed): - return self.set_fs_attributes_if_different(file_args, changed) + def set_directory_attributes_if_different(self, file_args, changed, diff=None): + return self.set_fs_attributes_if_different(file_args, changed, diff) - def set_file_attributes_if_different(self, file_args, changed): - return self.set_fs_attributes_if_different(file_args, changed) + def set_file_attributes_if_different(self, file_args, changed, diff=None): + return self.set_fs_attributes_if_different(file_args, changed, diff) def add_path_info(self, kwargs): ''' From 210cf06d9ac8e62b15d6f34e9c63c1b98986a1d5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Sat, 2 Jan 2016 00:31:09 -0500 Subject: [PATCH 3264/3617] Tweak how strategies evaluate failed hosts via the iterator and bug fixes * Added additional methods to the iterator code to assess host failures while also taking into account the block rescue/always states * Fixed bugs in the free strategy, where results were not always being processed after being collected * Added some prettier printing to the state output from iterator Fixes #13699 --- lib/ansible/executor/play_iterator.py | 46 ++++++++++++++++++++++++-- lib/ansible/plugins/strategy/free.py | 12 ++----- lib/ansible/plugins/strategy/linear.py | 5 +-- 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py index 534f216c30a134..147e46e5aa792e 100644 --- a/lib/ansible/executor/play_iterator.py +++ b/lib/ansible/executor/play_iterator.py @@ -57,14 +57,32 @@ def __init__(self, blocks): self.always_child_state = None def __repr__(self): - return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % ( + def _run_state_to_string(n): + states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"] + try: + return states[n] + except IndexError: + return "UNKNOWN STATE" + + def _failed_state_to_string(n): + states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"} + if n == 0: + return "FAILED_NONE" + else: + ret = [] + for i in (1, 2, 4, 8): + if n & i: + ret.append(states[i]) + return "|".join(ret) + + return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % ( self.cur_block, self.cur_regular_task, self.cur_rescue_task, self.cur_always_task, self.cur_role, - self.run_state, - self.fail_state, + _run_state_to_string(self.run_state), + _failed_state_to_string(self.fail_state), self.pending_setup, self.tasks_child_state, self.rescue_child_state, @@ -347,6 +365,28 @@ def mark_host_failed(self, host): def get_failed_hosts(self): return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE) + def _check_failed_state(self, state): + if state is None: + return False + elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state): + return True + elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state): + return True + elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state): + return True + elif state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE: + if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0: + return False + elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0: + return False + else: + return True + return False + + def is_failed(self, host): + s = self.get_host_state(host) + return self._check_failed_state(s) + def get_original_task(self, host, task): ''' Finds the task in the task list which matches the UUID of the given task. diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index f4fc1226a1f542..976d33abba042a 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -78,7 +78,7 @@ def run(self, iterator, play_context): (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state) display.debug("free host task: %s" % task) - if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task: + if not iterator.is_failed(host) and host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done @@ -135,7 +135,7 @@ def run(self, iterator, play_context): if last_host == starting_host: break - results = self._process_pending_results(iterator) + results = self._wait_on_pending_results(iterator) host_results.extend(results) try: @@ -176,13 +176,7 @@ def run(self, iterator, play_context): display.debug("done adding collected blocks to iterator") # pause briefly so we don't spin lock - time.sleep(0.05) - - try: - results = self._wait_on_pending_results(iterator) - host_results.extend(results) - except Exception as e: - pass + time.sleep(0.001) # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 7bb227dbaea857..bfa2c37ce437cd 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -54,7 +54,8 @@ def _get_next_task_lockstep(self, hosts, iterator): host_tasks = {} display.debug("building list of next tasks for hosts") for host in hosts: - host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) + if not iterator.is_failed(host): + host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True) display.debug("done building task lists") num_setups = 0 @@ -98,7 +99,7 @@ def _advance_selected_hosts(hosts, cur_block, cur_state): rvals = [] display.debug("starting to advance hosts") for host in hosts: - host_state_task = host_tasks[host.name] + host_state_task = host_tasks.get(host.name) if host_state_task is None: continue (s, t) = host_state_task From 7193d27acc7719b25b70eb4709964d0c93796162 Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Mon, 4 Jan 2016 17:19:35 +0100 Subject: [PATCH 3265/3617] add os.fsync() so that the shredding data (hopefully) hits the drive --- lib/ansible/parsing/vault/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index b7304d156fecac..1eca0cd57146cc 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -235,9 +235,10 @@ def generate_data(length): passes = 3 with open(tmp_path, "w") as fh: for _ in range(int(passes)): + fh.seek(0, 0) data = generate_data(ld) fh.write(data) - fh.seek(0, 0) + os.fsync(fh) os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): From 8599c566701582024c6eaeeb5cf52d249f48a49e Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 4 Jan 2016 17:46:40 +0100 Subject: [PATCH 3266/3617] Do not set 'changed' to True when using group_by Since group_by is not changing in any way to the remote system, there is no change. This also make things more consistent with the set_fact plugin. --- lib/ansible/plugins/action/group_by.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py index a891d3c70d5aa0..99f9db2a88c54a 100644 --- a/lib/ansible/plugins/action/group_by.py +++ b/lib/ansible/plugins/action/group_by.py @@ -40,6 +40,6 @@ def run(self, tmp=None, task_vars=None): group_name = self._task.args.get('key') group_name = group_name.replace(' ','-') - result['changed'] = True + result['changed'] = False result['add_group'] = group_name return result From 1e911375e850e79295d053f3e3c45c9d9d247159 Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Mon, 4 Jan 2016 18:13:59 +0100 Subject: [PATCH 3267/3617] add docs, remove unnecessary int() cast --- lib/ansible/parsing/vault/__init__.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 1eca0cd57146cc..28e819860aeac8 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -221,7 +221,22 @@ def __init__(self, password): self.vault = VaultLib(password) def _shred_file(self, tmp_path): - """securely destroy a decrypted file.""" + """Securely destroy a decrypted file + + Inspired by unix `shred', try to destroy the secrets "so that they can be + recovered only with great difficulty with specialised hardware, if at all". + + See https://github.com/ansible/ansible/pull/13700 . + + Note that: + - For flash: overwriting would have no effect (due to wear leveling). But the + added disk wear is considered insignificant. + - For other storage systems: the filesystem lies to the vfs (kernel), the disk + driver lies to the filesystem and the disk lies to the driver. But it's better + than nothing. + - most tmp dirs are now tmpfs (ramdisks), for which this is a non-issue. + """ + def generate_data(length): import string, random chars = string.ascii_lowercase + string.ascii_uppercase + string.digits @@ -234,7 +249,7 @@ def generate_data(length): ld = os.path.getsize(tmp_path) passes = 3 with open(tmp_path, "w") as fh: - for _ in range(int(passes)): + for _ in range(passes): fh.seek(0, 0) data = generate_data(ld) fh.write(data) From de529c17340074b1d96937cf4d688da0a7e3bd31 Mon Sep 17 00:00:00 2001 From: "Fuentes, Christopher" Date: Mon, 4 Jan 2016 13:52:06 -0500 Subject: [PATCH 3268/3617] minor grammar error was making me pull hair out --- docsite/rst/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst index 90b9a1cb09e5ea..e51a1751feed04 100644 --- a/docsite/rst/faq.rst +++ b/docsite/rst/faq.rst @@ -38,7 +38,7 @@ You can also dictate the connection type to be used, if you want:: foo.example.com bar.example.com -You may also wish to keep these in group variables instead, or file in them in a group_vars/ file. +You may also wish to keep these in group variables instead, or file them in a group_vars/ file. See the rest of the documentation for more information about how to organize variables. .. _use_ssh: From 151e09d129d63ce485d42d3f6cf0915bb8bd8cee Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Tue, 5 Jan 2016 01:34:45 +0100 Subject: [PATCH 3269/3617] use unix shred if possible, otherwise fast custom impl; do not shred encrypted file --- lib/ansible/parsing/vault/__init__.py | 90 ++++++++++++++++++--------- 1 file changed, 62 insertions(+), 28 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index 28e819860aeac8..bcd038c8b8d34b 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -219,41 +219,67 @@ class VaultEditor: def __init__(self, password): self.vault = VaultLib(password) + + def _shred_file_custom(self, tmp_path): + """"Destroy a file, when shred (core-utils) is not available - def _shred_file(self, tmp_path): - """Securely destroy a decrypted file + Unix `shred' destroys files "so that they can be recovered only with great difficulty with + specialised hardware, if at all". It is based on the method from the paper + "Secure Deletion of Data from Magnetic and Solid-State Memory", + Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996). - Inspired by unix `shred', try to destroy the secrets "so that they can be - recovered only with great difficulty with specialised hardware, if at all". + We do not go to that length to re-implement shred in Python; instead, overwriting with a block + of random data should suffice. See https://github.com/ansible/ansible/pull/13700 . - - Note that: - - For flash: overwriting would have no effect (due to wear leveling). But the - added disk wear is considered insignificant. - - For other storage systems: the filesystem lies to the vfs (kernel), the disk - driver lies to the filesystem and the disk lies to the driver. But it's better - than nothing. - - most tmp dirs are now tmpfs (ramdisks), for which this is a non-issue. """ - def generate_data(length): - import string, random - chars = string.ascii_lowercase + string.ascii_uppercase + string.digits - return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) + file_len = os.path.getsize(tmp_path) - if not os.path.isfile(tmp_path): - # file is already gone - return - - ld = os.path.getsize(tmp_path) passes = 3 - with open(tmp_path, "w") as fh: + with open(tmp_path, "wb") as fh: for _ in range(passes): fh.seek(0, 0) - data = generate_data(ld) - fh.write(data) + # get a random chunk of data + data = os.urandom(min(1024*1024*2, file_len)) + bytes_todo = file_len + while bytes_todo > 0: + chunk = data[:bytes_todo] + fh.write(chunk) + bytes_todo -= len(chunk) + + assert(fh.tell() == file_len) os.fsync(fh) + + + def _shred_file(self, tmp_path): + """Securely destroy a decrypted file + + Note standard limitations of GNU shred apply (For flash, overwriting would have no effect + due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never + guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks), + it is a non-issue. + + Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is + a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on + a custom shredding method. + """ + + if not os.path.isfile(tmp_path): + # file is already gone + return + + try: + r = call(['shred', tmp_path]) + except OSError as e: + # shred is not available on this system, or some other error occured. + self._shred_file_custom(tmp_path) + r = 0 + + if r != 0: + # we could not successfully execute unix shred; therefore, do custom shred. + self._shred_file_custom(tmp_path) + os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): @@ -262,7 +288,7 @@ def _edit_file_helper(self, filename, existing_data=None, force_save=False): _, tmp_path = tempfile.mkstemp() if existing_data: - self.write_data(existing_data, tmp_path) + self.write_data(existing_data, tmp_path, shred=False) # drop the user into an editor on the tmp file try: @@ -300,7 +326,7 @@ def decrypt_file(self, filename, output_file=None): ciphertext = self.read_data(filename) plaintext = self.vault.decrypt(ciphertext) - self.write_data(plaintext, output_file or filename) + self.write_data(plaintext, output_file or filename, shred=False) def create_file(self, filename): """ create a new encrypted file """ @@ -365,13 +391,21 @@ def read_data(self, filename): return data - def write_data(self, data, filename): + def write_data(self, data, filename, shred=True): + """write data to given path + + if shred==True, make sure that the original data is first shredded so + that is cannot be recovered + """ bytes = to_bytes(data, errors='strict') if filename == '-': sys.stdout.write(bytes) else: if os.path.isfile(filename): - self._shred_file(filename) + if shred: + self._shred_file(filename) + else: + os.remove(filename) with open(filename, "wb") as fh: fh.write(bytes) From 0d7c3284595c34f53c903995b8dff5fc65303c89 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Mon, 4 Jan 2016 19:52:37 -0500 Subject: [PATCH 3270/3617] fixed css minification make target for docsite --- docsite/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/Makefile b/docsite/Makefile index 92129f78514672..15347f84bf9cfd 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -43,4 +43,4 @@ modules: $(FORMATTER) ../hacking/templates/rst.j2 PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ staticmin: - cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css + cat _themes/srtd/static/css/theme.css | sed -e 's/^[ ]*//g; s/[ ]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css From 692ef6dcc90cf696b4bc25bedb979150adf6e7b9 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Mon, 4 Jan 2016 19:58:51 -0500 Subject: [PATCH 3271/3617] made docsite ads configurable by marketing --- docsite/_themes/srtd/layout.html | 22 ++++++++++++---------- docsite/_themes/srtd/static/css/theme.css | 21 ++------------------- 2 files changed, 14 insertions(+), 29 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 16f0d8d2663d44..1408be8165d90d 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -166,7 +166,7 @@

- +
@@ -189,15 +189,17 @@
- - -
- -
-
- -
-
+ + {% include "breadcrumbs.html" %}
diff --git a/docsite/_themes/srtd/static/css/theme.css b/docsite/_themes/srtd/static/css/theme.css index 4f7cbc8caafefd..246e513b799218 100644 --- a/docsite/_themes/srtd/static/css/theme.css +++ b/docsite/_themes/srtd/static/css/theme.css @@ -4723,33 +4723,16 @@ span[id*='MathJax-Span'] { padding: 0.4045em 1.618em; } - .DocSiteBanner { - width: 100%; display: flex; display: -webkit-flex; + justify-content: center; + -webkit-justify-content: center; flex-wrap: wrap; -webkit-flex-wrap: wrap; - justify-content: space-between; - -webkit-justify-content: space-between; - background-color: #ff5850; margin-bottom: 25px; } .DocSiteBanner-imgWrapper { max-width: 100%; } - -@media screen and (max-width: 1403px) { - .DocSiteBanner { - width: 100%; - display: flex; - display: -webkit-flex; - flex-wrap: wrap; - -webkit-flex-wrap: wrap; - justify-content: center; - -webkit-justify-content: center; - background-color: #fff; - margin-bottom: 25px; - } -} From 1c3b16c2ddf42c687738687cbc1a708cd05d2112 Mon Sep 17 00:00:00 2001 From: John Mitchell Date: Mon, 4 Jan 2016 20:02:01 -0500 Subject: [PATCH 3272/3617] udpate copyright date --- docsite/_themes/srtd/footer.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html index b70cfde7ad80dd..30b02a8978b067 100644 --- a/docsite/_themes/srtd/footer.html +++ b/docsite/_themes/srtd/footer.html @@ -13,7 +13,7 @@

- © Copyright 2015 Ansible, Inc.. + © Copyright 2016 Ansible, Inc.. {%- if last_updated %} {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} From 559ba467c09b112ecd7dc8681888b6631fcacba3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 11:11:50 -0800 Subject: [PATCH 3273/3617] Revert "Convert to bytes later so that make_become_command can jsut operate on text type." This reverts commit c4da5840b5e38aea1740e68f7100256c93dfbb17. Going to do this in the connection plugins --- lib/ansible/plugins/action/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index 5383f8afd4345c..e54898b6db3c57 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -488,6 +488,8 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' + # We may need to revisit this later. + cmd = to_bytes(cmd, errors='strict') if executable is not None: cmd = executable + ' -c ' + cmd @@ -504,7 +506,7 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, cmd = self._play_context.make_become_cmd(cmd, executable=executable) display.debug("_low_level_execute_command(): executing: %s" % (cmd,)) - rc, stdout, stderr = self._connection.exec_command(to_bytes(cmd, errors='strict'), in_data=in_data, sudoable=sudoable) + rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable) # stdout and stderr may be either a file-like or a bytes object. # Convert either one to a text type From 1ed3a018eb27dd06b08dbad57a162c2865abb635 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 11:12:14 -0800 Subject: [PATCH 3274/3617] Revert "Fix make tests-py3 on devel. Fix for https://github.com/ansible/ansible/issues/13638." This reverts commit e70061334aa99bee466295980f4cd4146096dc29. Going to do this in the connection plugins --- test/units/plugins/action/test_action.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py index dcd04375959325..0e47b6a53818c4 100644 --- a/test/units/plugins/action/test_action.py +++ b/test/units/plugins/action/test_action.py @@ -42,14 +42,14 @@ def test_sudo_only_if_user_differs(self): play_context.become = True play_context.become_user = play_context.remote_user = 'root' - play_context.make_become_cmd = Mock(return_value=b'CMD') + play_context.make_become_cmd = Mock(return_value='CMD') - action_base._low_level_execute_command(b'ECHO', sudoable=True) + action_base._low_level_execute_command('ECHO', sudoable=True) play_context.make_become_cmd.assert_not_called() play_context.remote_user = 'apo' - action_base._low_level_execute_command(b'ECHO', sudoable=True) - play_context.make_become_cmd.assert_called_once_with(b'ECHO', executable=None) + action_base._low_level_execute_command('ECHO', sudoable=True) + play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None) play_context.make_become_cmd.reset_mock() @@ -57,7 +57,7 @@ def test_sudo_only_if_user_differs(self): C.BECOME_ALLOW_SAME_USER = True try: play_context.remote_user = 'root' - action_base._low_level_execute_command(b'ECHO SAME', sudoable=True) - play_context.make_become_cmd.assert_called_once_with(b'ECHO SAME', executable=None) + action_base._low_level_execute_command('ECHO SAME', sudoable=True) + play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None) finally: C.BECOME_ALLOW_SAME_USER = become_allow_same_user From 8d57ffd16bd1025f7b04127fec760c13aca6d6dd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 22 Dec 2015 11:12:41 -0800 Subject: [PATCH 3275/3617] Revert "Transform the command we pass to subprocess into a byte string in _low_level-exec_command" This reverts commit 0c013f592a31c06baac7aadf27d23598f6abe931. Going to do this in the connection plugin --- lib/ansible/plugins/action/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py index e54898b6db3c57..3f4fff588e91ed 100644 --- a/lib/ansible/plugins/action/__init__.py +++ b/lib/ansible/plugins/action/__init__.py @@ -488,8 +488,7 @@ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, verbatim, then this won't work. May have to use some sort of replacement strategy (python3 could use surrogateescape) ''' - # We may need to revisit this later. - cmd = to_bytes(cmd, errors='strict') + if executable is not None: cmd = executable + ' -c ' + cmd From 9e32099b5e0535c2daf656e9d619e9a2efe9d3b6 Mon Sep 17 00:00:00 2001 From: Bruno Almeida do Lago Date: Tue, 5 Jan 2016 16:48:49 +1300 Subject: [PATCH 3276/3617] Added OpenStack dynamic inventory example Added an example illustrating how to use the OpenStack dynamic inventory script to the "Dynamic Inventory" section. --- docsite/rst/intro_dynamic_inventory.rst | 71 +++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst index 5f491ebc2eff3e..85feaa143bdf73 100644 --- a/docsite/rst/intro_dynamic_inventory.rst +++ b/docsite/rst/intro_dynamic_inventory.rst @@ -206,6 +206,77 @@ explicitly clear the cache, you can run the ec2.py script with the ``--refresh-c # ./ec2.py --refresh-cache +.. _openstack_example: + +Example: OpenStack External Inventory Script +```````````````````````````````````````````` + +If you use an OpenStack based cloud, instead of manually maintaining your own inventory file, you can use the openstack.py dynamic inventory to pull information about your compute instances directly from OpenStack. + +You can download the latest version of the OpenStack inventory script at: https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + +You can use the inventory script explicitly (by passing the `-i openstack.py` argument to Ansible) or implicitly (by placing the script at `/etc/ansible/hosts`). + +Explicit use of inventory script +++++++++++++++++++++++++++++++++ + +Download the latest version of the OpenStack dynamic inventory script and make it executable:: + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + chmod +x openstack.py + +Source an OpenStack RC file:: + + source openstack.rc + +.. note:: + + An OpenStack RC file contains the environment variables required by the client tools to establish a connection with the cloud provider, such as the authentication URL, user name, password and region name. For more information on how to download, create or source an OpenStack RC file, please refer to http://docs.openstack.org/cli-reference/content/cli_openrc.html. + +You can confirm the file has been successfully sourced by running a simple command, such as `nova list` and ensuring it return no errors. + +.. note:: + + The OpenStack command line clients are required to run the `nova list` command. For more information on how to install them, please refer to http://docs.openstack.org/cli-reference/content/install_clients.html. + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:: + + ./openstack.py --list + +After a few moments you should see some JSON output with information about your compute instances. + +Once you confirm the dynamic inventory script is working as expected, you can tell Ansible to use the `openstack.py` script as an inventory file, as illustrated below:: + +ansible -i openstack.py all -m ping + +Implicit use of inventory script +++++++++++++++++++++++++++++++++ + +Download the latest version of the OpenStack dynamic inventory script, make it executable and copy it to `/etc/ansible/hosts`:: + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py + chmod +x openstack.py + sudo cp openstack.py /etc/ansible/hosts + +Download the sample configuration file, modify it to suit your needs and copy it to /etc/ansible/openstack.yml + + wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.yml + vi openstack.yml + sudo cp openstack.yml /etc/ansible/ + +You can test the OpenStack dynamic inventory script manually to confirm it is working as expected:: + + /etc/ansible/hosts --list + +After a few moments you should see some JSON output with information about your compute instances. + +Refresh the cache ++++++++++++++++++ + +Note that the OpenStack dynamic inventory script will cache results to avoid repeated API calls. To explicitly clear the cache, you can run the openstack.py (or hosts) script with the --refresh parameter: + + ./openstack.py --refresh + .. _other_inventory_scripts: Other inventory scripts From c0a8cd950b909983cdc763f80495595d68597089 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 4 Jan 2016 19:23:12 -0800 Subject: [PATCH 3277/3617] Fix problems with non-ascii values passed as part of the command to connection plugins @drybjed discovered this with non-ascii environment variables and command line arguments to script and raw module. --- lib/ansible/plugins/connection/__init__.py | 1 + lib/ansible/plugins/connection/chroot.py | 2 + lib/ansible/plugins/connection/docker.py | 7 ++- lib/ansible/plugins/connection/jail.py | 6 ++- lib/ansible/plugins/connection/libvirt_lxc.py | 6 ++- lib/ansible/plugins/connection/local.py | 11 ++++- lib/ansible/plugins/connection/ssh.py | 17 +++++-- lib/ansible/plugins/connection/zone.py | 8 ++-- test/integration/unicode-test-script | 7 +++ test/integration/unicode.yml | 45 +++++++++++++++++++ 10 files changed, 97 insertions(+), 13 deletions(-) create mode 100755 test/integration/unicode-test-script diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py index 06616bac4cad19..ff00bc023809ff 100644 --- a/lib/ansible/plugins/connection/__init__.py +++ b/lib/ansible/plugins/connection/__init__.py @@ -91,6 +91,7 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): @property def connected(self): + '''Read-only property holding whether the connection to the remote host is active or closed.''' return self._connected def _become_method_supported(self): diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py index c86ea1fc355680..ba41ffb5d8825c 100644 --- a/lib/ansible/plugins/connection/chroot.py +++ b/lib/ansible/plugins/connection/chroot.py @@ -30,6 +30,7 @@ from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase from ansible.module_utils.basic import is_executable +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -90,6 +91,7 @@ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] display.vvv("EXEC %s" % (local_cmd), host=self.chroot) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py index 4e08f56a095eff..ce556a1431bbcb 100644 --- a/lib/ansible/plugins/connection/docker.py +++ b/lib/ansible/plugins/connection/docker.py @@ -36,6 +36,7 @@ import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -125,7 +126,8 @@ def exec_command(self, cmd, in_data=None, sudoable=False): # -i is needed to keep stdin open which allows pipelining to work local_cmd = [self.docker_cmd, "exec", '-i', self._play_context.remote_addr, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self._play_context.remote_addr) + display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -159,6 +161,7 @@ def put_file(self, in_path, out_path): if self.can_copy_bothways: # only docker >= 1.8.1 can do this natively args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ] + args = map(to_bytes, args) p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: @@ -170,6 +173,7 @@ def put_file(self, in_path, out_path): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c", "dd of={0} bs={1}".format(out_path, BUFSIZE)] + args = map(to_bytes, args) with open(in_path, 'rb') as in_file: try: p = subprocess.Popen(args, stdin=in_file, @@ -192,6 +196,7 @@ def fetch_file(self, in_path, out_path): out_dir = os.path.dirname(out_path) args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir] + args = map(to_bytes, args) p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py index e665692543ad80..8f88b6ad28fb58 100644 --- a/lib/ansible/plugins/connection/jail.py +++ b/lib/ansible/plugins/connection/jail.py @@ -30,6 +30,7 @@ from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -83,7 +84,7 @@ def list_jails(self): return stdout.split() def get_jail_path(self): - p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'], + p = subprocess.Popen([self.jls_cmd, '-j', to_bytes(self.jail), '-q', 'path'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -109,7 +110,8 @@ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.jail) + display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py index dc82d984040160..3bfff8b1c35ed2 100644 --- a/lib/ansible/plugins/connection/libvirt_lxc.py +++ b/lib/ansible/plugins/connection/libvirt_lxc.py @@ -30,6 +30,7 @@ from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -65,7 +66,7 @@ def _search_executable(self, executable): return cmd def _check_domain(self, domain): - p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', domain], + p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.communicate() if p.returncode: @@ -89,7 +90,8 @@ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh' local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd] - display.vvv("EXEC %s" % (local_cmd), host=self.lxc) + display.vvv("EXEC %s" % (local_cmd,), host=self.lxc) + local_cmd = map(to_bytes, local_cmd) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py index e69281d0f3b0c0..29b1e9a5ca2919 100644 --- a/lib/ansible/plugins/connection/local.py +++ b/lib/ansible/plugins/connection/local.py @@ -25,10 +25,13 @@ import fcntl import getpass +from ansible.compat.six import text_type, binary_type + import ansible.constants as C from ansible.errors import AnsibleError, AnsibleFileNotFound from ansible.plugins.connection import ConnectionBase +from ansible.utils.unicode import to_bytes try: from __main__ import display @@ -69,9 +72,15 @@ def exec_command(self, cmd, in_data=None, sudoable=True): raise AnsibleError("Internal Error: this module does not support optimized module pipelining") executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None - display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) + display.vvv(u"{0} EXEC {1}".format(self._play_context.remote_addr, cmd)) # FIXME: cwd= needs to be set to the basedir of the playbook display.debug("opening command with Popen()") + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = map(to_bytes, cmd) + p = subprocess.Popen( cmd, shell=isinstance(cmd, basestring), diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py index a2abcf20aee903..074f6aaa8ae5ef 100644 --- a/lib/ansible/plugins/connection/ssh.py +++ b/lib/ansible/plugins/connection/ssh.py @@ -33,6 +33,7 @@ from ansible.plugins.connection import ConnectionBase from ansible.utils.path import unfrackpath, makedirs_safe from ansible.utils.unicode import to_bytes, to_unicode +from ansible.compat.six import text_type, binary_type try: from __main__ import display @@ -320,7 +321,7 @@ def _run(self, cmd, in_data, sudoable=True): ''' display_cmd = map(pipes.quote, cmd) - display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host) + display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host) # Start the given command. If we don't need to pipeline data, we can try # to use a pseudo-tty (ssh will have been invoked with -tt). If we are @@ -328,6 +329,12 @@ def _run(self, cmd, in_data, sudoable=True): # old pipes. p = None + + if isinstance(cmd, (text_type, binary_type)): + cmd = to_bytes(cmd) + else: + cmd = map(to_bytes, cmd) + if not in_data: try: # Make sure stdin is a proper pty to avoid tcgetattr errors @@ -365,7 +372,7 @@ def _run(self, cmd, in_data, sudoable=True): # only when using ssh. Otherwise we can send initial data straightaway. state = states.index('ready_to_send') - if 'ssh' in cmd: + if b'ssh' in cmd: if self._play_context.prompt: # We're requesting escalation with a password, so we have to # wait for a password prompt. @@ -538,7 +545,7 @@ def _run(self, cmd, in_data, sudoable=True): stdin.close() if C.HOST_KEY_CHECKING: - if cmd[0] == "sshpass" and p.returncode == 6: + if cmd[0] == b"sshpass" and p.returncode == 6: raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.') controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr @@ -600,7 +607,7 @@ def exec_command(self, *args, **kwargs): raise AnsibleConnectionFailure("Failed to connect to the host via ssh.") except (AnsibleConnectionFailure, Exception) as e: if attempt == remaining_tries - 1: - raise e + raise else: pause = 2 ** attempt - 1 if pause > 30: @@ -674,6 +681,8 @@ def close(self): # temporarily disabled as we are forced to currently close connections after every task because of winrm # if self._connected and self._persistent: # cmd = self._build_command('ssh', '-O', 'stop', self.host) + # + # cmd = map(to_bytes, cmd) # p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # stdout, stderr = p.communicate() diff --git a/lib/ansible/plugins/connection/zone.py b/lib/ansible/plugins/connection/zone.py index 75d7db545d61f6..b65c80b73fb701 100644 --- a/lib/ansible/plugins/connection/zone.py +++ b/lib/ansible/plugins/connection/zone.py @@ -31,6 +31,7 @@ from ansible import constants as C from ansible.errors import AnsibleError from ansible.plugins.connection import ConnectionBase +from ansible.utils import to_bytes try: from __main__ import display @@ -56,8 +57,8 @@ def __init__(self, play_context, new_stdin, *args, **kwargs): if os.geteuid() != 0: raise AnsibleError("zone connection requires running as root") - self.zoneadm_cmd = self._search_executable('zoneadm') - self.zlogin_cmd = self._search_executable('zlogin') + self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm')) + self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) if self.zone not in self.list_zones(): raise AnsibleError("incorrect zone name %s" % self.zone) @@ -86,7 +87,7 @@ def list_zones(self): def get_zone_path(self): #solaris10vm# zoneadm -z cswbuild list -p #-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared - process = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'], + process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -113,6 +114,7 @@ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): # this through /bin/sh -c here. Instead it goes through the shell # that zlogin selects. local_cmd = [self.zlogin_cmd, self.zone, cmd] + local_cmd = map(to_bytes, local_cmd) display.vvv("EXEC %s" % (local_cmd), host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, diff --git a/test/integration/unicode-test-script b/test/integration/unicode-test-script new file mode 100755 index 00000000000000..340f2a9f5b23cf --- /dev/null +++ b/test/integration/unicode-test-script @@ -0,0 +1,7 @@ +#!/bin/sh + +echo "Non-ascii arguments:" +echo $@ + +echo "Non-ascii Env var:" +echo $option diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml index 6e8e073a79dfc9..f38bf8f5e86ddc 100644 --- a/test/integration/unicode.yml +++ b/test/integration/unicode.yml @@ -49,6 +49,51 @@ that: - "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines" + - name: Run raw with non-ascii options + raw: "/bin/echo Zażółć gęślą jaźń" + register: results + + - name: Check that raw output the right thing + assert: + that: + - "'Zażółć gęślą jaźń' in results.stdout_lines" + + - name: Run a script with non-ascii options and environment + script: unicode-test-script --option "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that script output includes the nonascii arguments and environment values + assert: + that: + - "'--option Zażółć gęślą jaźń' in results.stdout_lines" + - "'Zażółć' in results.stdout_lines" + + - name: Ping with non-ascii environment variable and option + ping: + data: "Zażółć gęślą jaźń" + environment: + option: Zażółć + register: results + + - name: Check that ping with non-ascii data was correct + assert: + that: + - "'Zażółć gęślą jaźń' == results.ping" + + - name: Command that echos a non-ascii env var + command: "echo $option" + environment: + option: Zażółć + register: results + + - name: Check that a non-ascii env var was passed to the command module + assert: + that: + - "'Zażółć' in results.stdout_lines" + + - name: 'A play for hosts in group: ĪīĬĭ' hosts: 'ĪīĬĭ' gather_facts: true From 6470f7de2cf4cfc37fa5fef66c7e37514b6139d3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 5 Jan 2016 07:53:22 -0800 Subject: [PATCH 3278/3617] Update submodule refs --- lib/ansible/modules/core | 2 +- lib/ansible/modules/extras | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core index 002028748f0809..33014c6db1ce75 160000 --- a/lib/ansible/modules/core +++ b/lib/ansible/modules/core @@ -1 +1 @@ -Subproject commit 002028748f080961ade801c30e194bfd4ba043ce +Subproject commit 33014c6db1ce757d0ffa065e6c9924ac4db1cacc diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras index f6a7b6dd1f7be9..82a4cf84be8224 160000 --- a/lib/ansible/modules/extras +++ b/lib/ansible/modules/extras @@ -1 +1 @@ -Subproject commit f6a7b6dd1f7be93ba640c50bf26adeeabb5af46f +Subproject commit 82a4cf84be82244d0cf7d043c8cbb4f176f086db From 11ce08b9dde32c7e4b51a6fffc22f301c81181be Mon Sep 17 00:00:00 2001 From: Eric Feliksik Date: Tue, 5 Jan 2016 18:04:38 +0100 Subject: [PATCH 3279/3617] cleaner implementation and random chunk length. --- lib/ansible/parsing/vault/__init__.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py index bcd038c8b8d34b..1d4eeef465309d 100644 --- a/lib/ansible/parsing/vault/__init__.py +++ b/lib/ansible/parsing/vault/__init__.py @@ -22,6 +22,7 @@ import shutil import sys import tempfile +import random from io import BytesIO from subprocess import call from ansible.errors import AnsibleError @@ -235,20 +236,21 @@ def _shred_file_custom(self, tmp_path): """ file_len = os.path.getsize(tmp_path) + max_chunk_len = min(1024*1024*2, file_len) passes = 3 with open(tmp_path, "wb") as fh: for _ in range(passes): fh.seek(0, 0) - # get a random chunk of data - data = os.urandom(min(1024*1024*2, file_len)) - bytes_todo = file_len - while bytes_todo > 0: - chunk = data[:bytes_todo] - fh.write(chunk) - bytes_todo -= len(chunk) - - assert(fh.tell() == file_len) + # get a random chunk of data, each pass with other length + chunk_len = random.randint(max_chunk_len/2, max_chunk_len) + data = os.urandom(chunk_len) + + for _ in range(0, file_len // chunk_len): + fh.write(data) + fh.write(data[:file_len % chunk_len]) + + assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy os.fsync(fh) @@ -273,13 +275,12 @@ def _shred_file(self, tmp_path): r = call(['shred', tmp_path]) except OSError as e: # shred is not available on this system, or some other error occured. - self._shred_file_custom(tmp_path) - r = 0 + r = 1 if r != 0: # we could not successfully execute unix shred; therefore, do custom shred. self._shred_file_custom(tmp_path) - + os.remove(tmp_path) def _edit_file_helper(self, filename, existing_data=None, force_save=False): From 9972c27a9bc1dd2c9051368e082e2b366a04acbe Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 Jan 2016 18:44:09 -0500 Subject: [PATCH 3280/3617] now handles 'non file diffs' this allows modules to pass back a 'diff' dict and it will still show using the file interface --- lib/ansible/plugins/callback/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py index cc2a9ad0e75c7c..faf04b1180fae9 100644 --- a/lib/ansible/plugins/callback/__init__.py +++ b/lib/ansible/plugins/callback/__init__.py @@ -116,6 +116,10 @@ def _get_diff(self, difflist): if 'src_larger' in diff: ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger']) if 'before' in diff and 'after' in diff: + # format complex structures into 'files' + for x in ['before', 'after']: + if isinstance(diff[x], dict): + diff[x] = json.dumps(diff[x], sort_keys=True, indent=4) if 'before_header' in diff: before_header = "before: %s" % diff['before_header'] else: From f3c45adfb8670701d0b19e86787a5213bb5afb5f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 Jan 2016 19:58:06 -0500 Subject: [PATCH 3281/3617] simplified diff handling in callback no need for the copy or other complexity --- lib/ansible/plugins/callback/default.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index e515945bba516b..276ac435f4b8ec 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -137,11 +137,8 @@ def v2_playbook_on_play_start(self, play): def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: for res in result._result['results']: - newres = self._copy_result(result) - res['item'] = self._get_item(res) - newres._result = res - - self.v2_on_file_diff(newres) + if 'diff' in res: + self._display.display(self._get_diff(res['diff'])) elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) From a65543bbafbd328e7848a99d2a570f71c43a53a0 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Tue, 5 Jan 2016 14:52:06 -0600 Subject: [PATCH 3282/3617] adding password no_log and cleaning up argument spec --- lib/ansible/module_utils/vca.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py index ef89d5455696c0..9737cca8b47636 100644 --- a/lib/ansible/module_utils/vca.py +++ b/lib/ansible/module_utils/vca.py @@ -35,8 +35,8 @@ def __init__(self, msg, **kwargs): def vca_argument_spec(): return dict( - username=dict(), - password=dict(), + username=dict(type='str', aliases=['user'], required=True), + password=dict(type='str', aliases=['pass','passwd'], required=True, no_log=True), org=dict(), service_id=dict(), instance_id=dict(), From dc47c25e589f1c2b1f44867076624f0e0564b7c6 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 5 Jan 2016 22:01:01 -0500 Subject: [PATCH 3283/3617] Minor tweak to ensure diff is not empty in callback for file diffs --- lib/ansible/plugins/callback/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py index 6ca728e65f8c71..dfad6579343f0f 100644 --- a/lib/ansible/plugins/callback/default.py +++ b/lib/ansible/plugins/callback/default.py @@ -137,7 +137,7 @@ def v2_playbook_on_play_start(self, play): def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: for res in result._result['results']: - if 'diff' in res: + if 'diff' in res and res['diff']: self._display.display(self._get_diff(res['diff'])) elif 'diff' in result._result and result._result['diff']: self._display.display(self._get_diff(result._result['diff'])) From 7c8374e0f8e153368bb6a22caf7b7ada07f8d797 Mon Sep 17 00:00:00 2001 From: Abhijit Menon-Sen Date: Wed, 6 Jan 2016 20:44:19 +0530 Subject: [PATCH 3284/3617] Strip string terms before templating The earlier code did call terms.strip(), but ignored the return value instead of passing that in to templar.template(). Clearly an oversight. --- lib/ansible/utils/listify.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py index 7fe83a8fa0cc15..d834737ab58af1 100644 --- a/lib/ansible/utils/listify.py +++ b/lib/ansible/utils/listify.py @@ -31,9 +31,8 @@ def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=False, convert_bare=True): if isinstance(terms, string_types): - stripped = terms.strip() # TODO: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override - terms = templar.template(terms, convert_bare=convert_bare, fail_on_undefined=fail_on_undefined) + terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined) else: terms = templar.template(terms, fail_on_undefined=fail_on_undefined) From 11b55be5bbb90b2bc917b2637d6fcdbe1a15092d Mon Sep 17 00:00:00 2001 From: muffl0n Date: Thu, 20 Aug 2015 10:31:48 +0200 Subject: [PATCH 3285/3617] Show version without supplying a dummy action fixes #12004 parsing x2 does not seem to break anything --- lib/ansible/cli/galaxy.py | 7 +++++-- lib/ansible/cli/vault.py | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py index 476a7d0f897f33..a022d17859ca0e 100644 --- a/lib/ansible/cli/galaxy.py +++ b/lib/ansible/cli/galaxy.py @@ -50,7 +50,7 @@ class GalaxyCLI(CLI): SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup") - + def __init__(self, args): self.api = None self.galaxy = None @@ -64,6 +64,9 @@ def parse(self): epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) + # Workaround for #12004: show version without supplying a dummy action + self.parser.parse_args() + self.set_action() # options specific to actions @@ -141,7 +144,7 @@ def parse(self): return True def run(self): - + super(GalaxyCLI, self).run() # if not offline, get connect to galaxy api diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py index 9908f17e578ac9..50a6fdebdc8554 100644 --- a/lib/ansible/cli/vault.py +++ b/lib/ansible/cli/vault.py @@ -53,6 +53,9 @@ def parse(self): epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) ) + # Workaround for #12004: show version without supplying a dummy action + self.parser.parse_args() + self.set_action() # options specific to self.actions From ab2f47327a82148441140c9b98a02a6e28877153 Mon Sep 17 00:00:00 2001 From: Sandra Wills Date: Wed, 6 Jan 2016 13:59:25 -0500 Subject: [PATCH 3286/3617] removed the "wy-side-nav-search" element this is so we can use the new swiftype search and it's search input --- docsite/_themes/srtd/layout.html | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index 41b6b75c1d209c..a10b7656aabf2f 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -150,11 +150,6 @@

- -