diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000000000..812fc3b139483c --- /dev/null +++ b/.coveragerc @@ -0,0 +1,4 @@ +[report] +omit = + */python?.?/* + */site-packages/nose/* diff --git a/.gitignore b/.gitignore index 5fe1d994e3c43e..2392614453bfa6 100644 --- a/.gitignore +++ b/.gitignore @@ -19,6 +19,12 @@ rpm-build # Mac OS X stuff... .DS_Store # manpage build stuff... +docs/man/man1/ansible.1 +docs/man/man1/ansible-doc.1 +docs/man/man1/ansible-galaxy.1 +docs/man/man1/ansible-playbook.1 +docs/man/man1/ansible-pull.1 +docs/man/man1/ansible-vault.1 docs/man/man3/* # Sublime stuff *.sublime-project @@ -41,7 +47,10 @@ deb-build *.swo credentials.yml # test output +*.retry +*.out .coverage +.tox results.xml coverage.xml /test/units/cover-html diff --git a/.gitmodules b/.gitmodules index 3f14953ec8f250..a0e903430a5ff9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +1,6 @@ [submodule "lib/ansible/modules/core"] path = lib/ansible/modules/core - url = https://github.com/ansible/ansible-modules-core.git - branch = devel + url = https://github.com/ansible/ansible-modules-core [submodule "lib/ansible/modules/extras"] path = lib/ansible/modules/extras - url = https://github.com/ansible/ansible-modules-extras.git - branch = devel -[submodule "v2/ansible/modules/core"] - path = v2/ansible/modules/core - url = https://github.com/ansible/ansible-modules-core.git - branch = devel -[submodule "v2/ansible/modules/extras"] - path = v2/ansible/modules/extras - url = https://github.com/ansible/ansible-modules-extras.git - branch = devel + url = https://github.com/ansible/ansible-modules-extras diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000000000..603132f722c223 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,33 @@ +sudo: false +language: python +matrix: + include: + - env: TOXENV=py24 INTEGRATION=no + - env: TOXENV=py26 INTEGRATION=yes + python: 2.6 + - env: TOXENV=py27 INTEGRATION=yes + python: 2.7 + - env: TOXENV=py34 INTEGRATION=no + python: 3.4 + - env: TOXENV=py35 INTEGRATION=no + python: 3.5 +addons: + apt: + sources: + - deadsnakes + packages: + - python2.4 +install: + - pip install tox PyYAML Jinja2 sphinx +script: +# urllib2's defaults are not secure enough for us +- ./test/code-smell/replace-urlopen.sh . +- ./test/code-smell/use-compat-six.sh lib +- ./test/code-smell/boilerplate.sh +- ./test/code-smell/required-and-default-attributes.sh +- if test x"$TOXENV" != x'py24' ; then tox ; fi +- if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi + #- make -C docsite all +- if test x"$INTEGRATION" = x'yes' ; then source ./hacking/env-setup && cd test/integration/ && make parsing && make test_var_precedence && make unicode ; fi +after_success: + - coveralls diff --git a/CHANGELOG.md b/CHANGELOG.md index b5adaa6e5320c7..c379fb31ccc2ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,35 +1,603 @@ Ansible Changes By Release ========================== -## 1.9 "Dancing In the Street" - ACTIVE DEVELOPMENT +## 2.1 TBD - ACTIVE DEVELOPMENT + +###Major Changes: + +* added facility for modules to send back 'diff' for display when ansible is called with --diff, file, puppet and other module already implement this + +####New Modules: +* aws: ec2_vol_facts +* aws: ec2_vpc_dhcp_options.py +* aws: ec2_vpc_net_facts +* cloudstack: cs_volume +* yumrepo + +####New Filters: +* extract + +## 2.0 "Over the Hills and Far Away" + +###Major Changes: + +* Releases are now named after Led Zeppelin songs, 1.9 will be the last Van Halen named release. +* The new block/rescue/always directives allow for making task blocks and exception-like semantics +* New strategy plugins (e.g. `free`) allow control over the flow of task execution per play. The default (`linear`) will be the same as before. +* Improved error handling, with more detailed parser messages. General exception handling and display has been revamped. +* Task includes are now evaluated during execution, allowing more dynamic includes and options. Play includes are unchanged both still use the `include` directive. +* "with\_" loops can now be used with task includes since they are dynamic. +* Callback, connection, cache and lookup plugin APIs have changed. Existing plugins might require modification to work with the new versions. +* Callbacks are now shipped in the active directory and don't need to be copied, just whitelisted in ansible.cfg. +* Many API changes. Those integrating directly with Ansible's API will encounter breaking changes, but the new API is much easier to use and test. +* Settings are now more inheritable; what you set at play, block or role will be automatically inherited by the contained tasks. + This allows for new features to automatically be settable at all levels, previously we had to manually code this. +* Vars are now settable at play, block, role and task level with the `vars` directive and scoped to the tasks contained. +* Template code now retains types for bools and numbers instead of turning them into strings. + If you need the old behaviour, quote the value and it will get passed around as a string +* Empty variables and variables set to null in yaml will no longer be converted to empty strings. They will retain the value of `None`. + To go back to the old behaviour, you can override the `null_representation` setting to an empty string in your config file or + by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable. +* Added `meta: refresh_inventory` to force rereading the inventory in a play. + This re-executes inventory scripts, but does not force them to ignore any cache they might use. +* New delegate_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory_hostname (no/false) which is the default and previous behaviour. +* local connections now work with 'su' as a privilege escalation method +* Ansible 2.0 has deprecated the “ssh” from ansible_ssh_user, ansible_ssh_host, and ansible_ssh_port to become ansible_user, ansible_host, and ansible_port. +* New ssh configuration variables (`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a + per-group or per-host ssh ProxyCommand or set any other ssh options. + `ansible_ssh_extra_args` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings). +* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features. +* Backslashes used when specifying parameters in jinja2 expressions in YAML dicts sometimes needed to be escaped twice. + This has been fixed so that escaping once works. Here's an example of how playbooks need to be modified: + + ``` + # Syntax in 1.9.x + - debug: + msg: "{{ 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') }}" + # Syntax in 2.0.x + - debug: + msg: "{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}" + + # Output: + "msg": "test1 1\\3" + ``` + +* When a string with a trailing newline was specified in the playbook via yaml +dict format, the trailing newline was stripped. When specified in key=value +format the trailing newlines were kept. In v2, both methods of specifying the +string will keep the trailing newlines. If you relied on the trailing +newline being stripped you can change your playbook like this: + + ``` + # Syntax in 1.9.2 + vars: + message: > + Testing + some things + tasks: + - debug: + msg: "{{ message }}" + + # Syntax in 2.0.x + vars: + old_message: > + Testing + some things + message: "{{ old_messsage[:-1] }}" + - debug: + msg: "{{ message }}" + # Output + "msg": "Testing some things" + ``` + +* When specifying complex args as a variable, the variable must use the full jinja2 +variable syntax ('{{var_name}}') - bare variable names there are no longer accepted. +In fact, even specifying args with variables has been deprecated, and will not be +allowed in future versions: + + ``` + --- + - hosts: localhost + connection: local + gather_facts: false + vars: + my_dirs: + - { path: /tmp/3a, state: directory, mode: 0755 } + - { path: /tmp/3b, state: directory, mode: 0700 } + tasks: + - file: + args: "{{item}}" + with_items: my_dirs + ``` + +###Plugins + +* Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases +* WinRM connection plugin passes all vars named `ansible_winrm_*` to the underlying pywinrm client. This allows, for instance, `ansible_winrm_server_cert_validation=ignore` to be used with newer versions of pywinrm to disable certificate validation on Python 2.7.9+. +* WinRM connection plugin put_file is significantly faster and no longer has file size limitations. + +####Deprecated Modules (new ones in parens): + +* ec2_ami_search (ec2_ami_find) +* quantum_network (os_network) +* glance_image +* nova_compute (os_server) +* quantum_floating_ip (os_floating_ip) +* quantum_router (os_router) +* quantum_router_gateway (os_router) +* quantum_router_interface (os_router) + +####New Modules: + +* amazon: ec2_ami_copy +* amazon: ec2_ami_find +* amazon: ec2_elb_facts +* amazon: ec2_eni +* amazon: ec2_eni_facts +* amazon: ec2_remote_facts +* amazon: ec2_vpc_igw +* amazon: ec2_vpc_net +* amazon: ec2_vpc_net_facts +* amazon: ec2_vpc_route_table +* amazon: ec2_vpc_route_table_facts +* amazon: ec2_vpc_subnet +* amazon: ec2_vpc_subnet_facts +* amazon: ec2_win_password +* amazon: ecs_cluster +* amazon: ecs_task +* amazon: ecs_taskdefinition +* amazon: elasticache_subnet_group_facts +* amazon: iam +* amazon: iam_cert +* amazon: iam_policy +* amazon: route53_facts +* amazon: route53_health_check +* amazon: route53_zone +* amazon: sts_assume_role +* amazon: s3_bucket +* amazon: s3_lifecycle +* amazon: s3_logging +* amazon: sqs_queue +* amazon: sns_topic +* amazon: sts_assume_role +* apk +* bigip_gtm_wide_ip +* bundler +* centurylink: clc_aa_policy +* centurylink: clc_alert_policy +* centurylink: clc_blueprint_package +* centurylink: clc_firewall_policy +* centurylink: clc_group +* centurylink: clc_loadbalancer +* centurylink: clc_modify_server +* centurylink: clc_publicip +* centurylink: clc_server +* centurylink: clc_server_snapshot +* circonus_annotation +* consul +* consul_acl +* consul_kv +* consul_session +* cloudtrail +* cloudstack: cs_account +* cloudstack: cs_affinitygroup +* cloudstack: cs_domain +* cloudstack: cs_facts +* cloudstack: cs_firewall +* cloudstack: cs_iso +* cloudstack: cs_instance +* cloudstack: cs_instancegroup +* cloudstack: cs_ip_address +* cloudstack: cs_loadbalancer_rule +* cloudstack: cs_loadbalancer_rule_member +* cloudstack: cs_network +* cloudstack: cs_portforward +* cloudstack: cs_project +* cloudstack: cs_sshkeypair +* cloudstack: cs_securitygroup +* cloudstack: cs_securitygroup_rule +* cloudstack: cs_staticnat +* cloudstack: cs_template +* cloudstack: cs_user +* cloudstack: cs_vmsnapshot +* cronvar +* datadog_monitor +* deploy_helper +* docker: docker_login +* dpkg_selections +* elasticsearch_plugin +* expect +* find +* google: gce_tag +* hall +* ipify_facts +* iptables +* libvirt: virt_net +* libvirt: virt_pool +* maven_artifact +* openstack: os_auth +* openstack: os_client_config +* openstack: os_image +* openstack: os_image_facts +* openstack: os_floating_ip +* openstack: os_ironic +* openstack: os_ironic_node +* openstack: os_keypair +* openstack: os_network +* openstack: os_network_facts +* openstack: os_nova_flavor +* openstack: os_object +* openstack: os_port +* openstack: os_project +* openstack: os_router +* openstack: os_security_group +* openstack: os_security_group_rule +* openstack: os_server +* openstack: os_server_actions +* openstack: os_server_facts +* openstack: os_server_volume +* openstack: os_subnet +* openstack: os_subnet_facts +* openstack: os_user +* openstack: os_user_group +* openstack: os_volume +* openvswitch_db. +* osx_defaults +* pagerduty_alert +* pam_limits +* pear +* profitbricks: profitbricks +* profitbricks: profitbricks_datacenter +* profitbricks: profitbricks_nic +* profitbricks: profitbricks_volume +* profitbricks: profitbricks_volume_attachments +* profitbricks: profitbricks_snapshot +* proxmox: proxmox +* proxmox: proxmox_template +* puppet +* pushover +* pushbullet +* rax: rax_clb_ssl +* rax: rax_mon_alarm +* rax: rax_mon_check +* rax: rax_mon_entity +* rax: rax_mon_notification +* rax: rax_mon_notification_plan +* rabbitmq_binding +* rabbitmq_exchange +* rabbitmq_queue +* selinux_permissive +* sendgrid +* sensu_check +* sensu_subscription +* seport +* slackpkg +* solaris_zone +* taiga_issue +* vertica_configuration +* vertica_facts +* vertica_role +* vertica_schema +* vertica_user +* vmware: vca_fw +* vmware: vca_nat +* vmware: vmware_cluster +* vmware: vmware_datacenter +* vmware: vmware_dns_config +* vmware: vmware_dvs_host +* vmware: vmware_dvs_portgroup +* vmware: vmware_dvswitch +* vmware: vmware_host +* vmware: vmware_migrate_vmk +* vmware: vmware_portgroup +* vmware: vmware_target_canonical_facts +* vmware: vmware_vm_facts +* vmware: vmware_vm_vss_dvs_migrate +* vmware: vmware_vmkernel +* vmware: vmware_vmkernel_ip_config +* vmware: vmware_vsan_cluster +* vmware: vmware_vswitch +* vmware: vsphere_copy +* webfaction_app +* webfaction_db +* webfaction_domain +* webfaction_mailbox +* webfaction_site +* win_acl +* win_dotnet_ngen +* win_environment +* win_firewall_rule +* win_iis_virtualdirectory +* win_iis_webapplication +* win_iis_webapppool +* win_iis_webbinding +* win_iis_website +* win_lineinfile +* win_nssm +* win_package +* win_regedit +* win_scheduled_task +* win_unzip +* win_updates +* win_webpicmd +* xenserver_facts +* zabbix_host +* zabbix_hostmacro +* zabbix_screen +* znode + +####New Inventory scripts: + +* cloudstack +* fleetctl +* openvz +* nagios_ndo +* nsot +* proxmox +* rudder +* serf + +####New Lookups: + +* credstash +* hashi_vault +* ini +* shelvefile + +####New Filters: + +* combine + +####New Connection: + +* docker: for talking to docker containers on the ansible controller machine without using ssh. + +####New Callbacks: + +* logentries: plugin to send play data to logentries service +* skippy: same as default but does not display skip messages + +###Minor changes: + +* Many more tests. The new API makes things more testable and we took advantage of it. +* big_ip modules now support turning off ssl certificate validation (use only for self-signed certificates). +* Consolidated code from modules using urllib2 to normalize features, TLS and SNI support. +* synchronize module's dest_port parameter now takes precedence over the ansible_ssh_port inventory setting. +* Play output is now dynamically sized to terminal with a minimum of 80 coluumns (old default). +* vars_prompt and pause are now skipped with a warning if the play is called noninteractively (i.e. pull from cron). +* Support for OpenBSD's 'doas' privilege escalation method. +* Most vault operations can now be done over multilple files. +* ansible-vault encrypt/decrypt read from stdin if no other input file is given, and can write to a given ``--output file`` (including stdout, '-'). + This lets you avoid ever writing sensitive plaintext to disk. +* ansible-vault rekey accepts the --new-vault-password-file option. +* ansible-vault now preserves file permissions on edit and rekey and defaults to restrictive permissions for other options. +* Configuration items defined as paths (local only) now all support shell style interpolations. +* Many fixes and new options added to modules, too many to list here. +* Now you can see task file and line number when using verbosity of 3 or above. +* The ``[x-y]`` host range syntax is no longer supported. Note that ``[0:1]`` matches two hosts, i.e. the range is inclusive of its endpoints. +* We now recommend the use of `pattern1,pattern2` to combine host matching patterns. + * The use of ':' as a separator conflicts with IPv6 addresses and host ranges. It will be deprecated in the future. + * The undocumented use of ';' as a separator is now deprecated. +* modules and callbacks have been extended to support no_log to avoid data disclosure. +* new managed_syslog option has been added to control output to syslog on managed machines, no_log supersedes this settings. +* Lookup, vars and action plugin pathing has been normalized, all now follow the same sequence to find relative files. +* We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set + explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value. +* environment variables passed to remote shells now default to 'controller' settings, with fallback to en_US.UTF8 which was the previous default. +* add_hosts is much stricter about host name and will prevent invalid names from being added. +* ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour. +* random cows are more random +* when: now gets the registered var after the first iteration, making it possible to break out of item loops +* Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour: + + ``` + - debug: msg="The error message was: {{error_code |default('') }}" + ``` + +## 1.9.4 "Dancing In the Street" - Oct 9, 2015 + +* Fixes a bug where yum state=latest would error if there were no updates to install. +* Fixes a bug where yum state=latest did not work with wildcard package names. +* Fixes a bug in lineinfile relating to escape sequences. +* Fixes a bug where vars_prompt was not keeping passwords private by default. +* Fix ansible-galaxy and the hipchat callback plugin to check that the host it + is contacting matches its TLS Certificate. + +## 1.9.3 "Dancing In the Street" - Sep 3, 2015 + +* Fixes a bug related to keyczar messing up encodings internally, resulting in decrypted + messages coming out as empty strings. +* AES Keys generated for use in accelerated mode are now 256-bit by default instead of 128. +* Fix url fetching for SNI with python-2.7.9 or greater. SNI does not work + with python < 2.7.9. The best workaround is probably to use the command + module with curl or wget. +* Fix url fetching to allow tls-1.1 and tls-1.2 if the system's openssl library + supports those protocols +* Fix ec2_ami_search module to check TLS Certificates +* Fix the following extras modules to check TLS Certificates: + * campfire + * layman + * librarto_annotate + * twilio + * typetalk +* Fix docker module's parsing of docker-py version for dev checkouts +* Fix docker module to work with docker server api 1.19 +* Change yum module's state=latest feature to update all packages specified in + a single transaction. This is the same type of fix as was made for yum's + state=installed in 1.9.2 and both solves the same problems and with the same caveats. +* Fixed a bug where stdout from a module might be blank when there were were non-printable + ASCII characters contained within it + +## 1.9.2 "Dancing In the Street" - Jun 26, 2015 + +* Security fixes to check that hostnames match certificates with https urls (CVE-2015-3908) + - get_url and uri modules + - url and etcd lookup plugins +* Security fixes to the zone (Solaris containers), jail (bsd containers), + and chroot connection plugins. These plugins can be used to connect to + their respective container types in leiu of the standard ssh connection. + Prior to this fix being applied these connection plugins didn't properly + handle symlinks within the containers which could lead to files intended to + be written to or read from the container being written to or read from the + host system instead. (CVE pending) +* Fixed a bug in the service module where init scripts were being incorrectly used instead of upstart/systemd. +* Fixed a bug where sudo/su settings were not inherited from ansible.cfg correctly. +* Fixed a bug in the rds module where a traceback may occur due to an unbound variable. +* Fixed a bug where certain remote file systems where the SELinux context was not being properly set. +* Re-enabled several windows modules which had been partially merged (via action plugins): + - win_copy.ps1 + - win_copy.py + - win_file.ps1 + - win_file.py + - win_template.py +* Fix bug using with_sequence and a count that is zero. Also allows counting backwards isntead of forwards +* Fix get_url module bug preventing use of custom ports with https urls +* Fix bug disabling repositories in the yum module. +* Fix giving yum module a url to install a package from on RHEL/CENTOS5 +* Fix bug in dnf module preventing it from working when yum-utils was not already installed + +## 1.9.1 "Dancing In the Street" - Apr 27, 2015 + +* Fixed a bug related to Kerberos auth when using winrm with a domain account. +* Fixing several bugs in the s3 module. +* Fixed a bug with upstart service detection in the service module. +* Fixed several bugs with the user module when used on OSX. +* Fixed unicode handling in some module situations (assert and shell/command execution). +* Fixed a bug in redhat_subscription when using the activationkey parameter. +* Fixed a traceback in the gce module on EL6 distros when multiple pycrypto installations are available. +* Added support for PostgreSQL 9.4 in rds_param_group +* Several other minor fixes. + +## 1.9 "Dancing In the Street" - Mar 25, 2015 -in progress, details pending +Major changes: -* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. -* Safety changes: several modules have force parameters that defaulted to true. +* Added kerberos support to winrm connection plugin. +* Tags rehaul: added 'all', 'always', 'untagged' and 'tagged' special tags and normalized + tag resolution. Added tag information to --list-tasks and new --list-tags option. +* Privilege Escalation generalization, new 'Become' system and variables now will + handle existing and new methods. Sudo and su have been kept for backwards compatibility. + New methods pbrun and pfexec in 'alpha' state, planned adding 'runas' for winrm connection plugin. +* Improved ssh connection error reporting, now you get back the specific message from ssh. +* Added facility to document task module return values for registered vars, both for + ansible-doc and the docsite. Documented copy, stats and acl modules, the rest must be + updated individually (we will start doing so incrementally). +* Optimize the plugin loader to cache available plugins much more efficiently. + For some use cases this can lead to dramatic improvements in startup time. +* Overhaul of the checksum system, now supports more systems and more cases more reliably and uniformly. +* Fix skipped tasks to not display their parameters if no_log is specified. +* Many fixes to unicode support, standarized functions to make it easier to add to input/output boundaries. +* Added travis integration to github for basic tests, this should speed up ticket triage and merging. +* environment: directive now can also be applied to play and is inhertited by tasks, which can still override it. +* expanded facts and OS/distribution support for existing facts and improved performance with pypy. +* new 'wantlist' option to lookups allows for selecting a list typed variable vs a comma delimited string as the return. +* the shared module code for file backups now uses a timestamp resolution of seconds (previouslly minutes). +* allow for empty inventories, this is now a warning and not an error (for those using localhost and cloud modules). +* sped up YAML parsing in ansible by up to 25% by switching to CParser loader. + +New Modules: + +* cryptab: manages linux encrypted block devices +* gce_img: for utilizing GCE image resources +* gluster_volume: manage glusterfs volumes +* haproxy: for the load balancer of same name +* known_hosts: manages the ssh known_hosts file +* lxc_container: manage lxc containers +* patch: allows for patching files on target systems +* pkg5: installing and uninstalling packages on Solaris +* pkg5_publisher: manages Solaris pkg5 repository configuration +* postgresql_ext: manage postgresql extensions +* snmp_facts: gather facts via snmp +* svc: manages daemontools based services +* uptimerobot: manage monitoring with this service + +New Filters: + +* ternary: allows for trueval/falseval assignment dependent on conditional +* cartesian: returns the Cartesian product of 2 lists +* to_uuid: given a string it will return an ansible domain specific UUID +* checksum: uses the ansible internal checksum to return a hash from a string +* hash: get a hash from a string (md5, sha1, etc) +* password_hash: get a hash form as string that can be used as a password in the user module (and others) +* A whole set of ip/network manipulation filters: ipaddr,ipwrap,ipv4,ipv6ipsubnet,nthhost,hwaddr,macaddr + +Other Notable Changes: + +* New lookup plugins: + * dig: does dns resolution and returns IPs. + * url: allows pulling data from a url. + +* New callback plugins: + * syslog_json: allows logging play output to a syslog network server using json format + +* Many new enhancements to the amazon web service modules: + * ec2 now applies all specified security groups when creating a new instance. Previously it was only applying one + * ec2_vol gained the ability to specify the EBS volume type + * ec2_vol can now detach volumes by specifying instance=None + * Fix ec2_group to purge specific grants rather than whole rules + * Added tenancy support for the ec2 module + * rds module has gained the ability to manage tags and set charset and public accessibility + * ec2_snapshot module gained the capability to remove snapshots + * Add alias support for route53 + * Add private_zones support to route53 + * ec2_asg: Add wait_for_instances parameter that waits until an instance is ready before ending the ansible task +* Many new docker improvements: + * restart_policy parameters to configure when the container automatically restarts + * If the docker client or server doesn't support an option, the task will now fail instead of silently ignoring the option + * Add insecure_registry parameter for connecting to registries via http + * New parameter to set a container's domain name + * Undeprecated docker_image module until there's replacement functionality + * Allow setting the container's pid namespace + * Add a pull parameter that chooses when ansible will look for more recent images in the registry + * docker module states have been greatly enhanced. The reworked and new states are: + * present now creates but does not start containers + * restarted always restarts a container + * reloaded restarts a container if ansible detects that the configuration is different than what is specified + * reloaded accounts for exposed ports, env vars, and volumes + * Can now connect to the docker server using TLS +* Several source control modules had force parameters that defaulted to true. These have been changed to default to false so as not to accidentally lose - work. Playbooks that depended on the former behaviour simply to add + work. Playbooks that depended on the former behaviour simply need to add force=True to the task that needs it. Affected modules: - * bzr: When local modifications exist in a checkout, the bzr module used to - default to temoving the modifications on any operation. Now the module + default to removing the modifications on any operation. Now the module will not remove the modifications unless force=yes is specified. Operations that depend on a clean working tree may fail unless force=yes is added. * git: When local modifications exist in a checkout, the git module will now - fail unless force is explictly specified. Specifying force will allow the - module to revert and overwrite local modifications to make git actions + fail unless force is explicitly specified. Specifying force=yes will allow + the module to revert and overwrite local modifications to make git actions succeed. * hg: When local modifications exist in a checkout, the hg module used to default to removing the modifications on any operation. Now the module will not remove the modifications unless force=yes is specified. * subversion: When updating a checkout with local modifications, you now need - to add force so the module will revert the modifications before updating. - -* Optimize the plugin loader to cache available plugins much more efficiently. - For some use cases this can lead to dramatic improvements in startup time. - -* Fix skipped tasks to not display their parameters if no_log is specified. + to add force=yes so the module will revert the modifications before updating. +* New inventory scripts: + * vbox: virtualbox + * consul: use consul as an inventory source +* gce gained the ip_forward parameter to forward ip packets +* disk_auto_delete parameter to gce that will remove the boot disk after an instance is destroyed +* gce can now spawn instances with no external ip +* gce_pd gained the ability to choose a disk type +* gce_net gained target_tags parameter for creating firewall rules +* rax module has new parameters for making use of a boot volume +* Add scheduler_hints to the nova_compute module for optional parameters +* vsphere_guest now supports deploying guests from a template +* Many fixes for hardlink and softlink handling in file-related modules +* Implement user, group, mode, and selinux parameters for the unarchive module +* authorized_keys can now use url as a key source +* authorized_keys has a new exclusive parameter that determines if keys that weren't specified in the task +* The selinux module now sets the current running state to permissive if state='disabled' +* Can now set accounts to expire via the user module +* Overhaul of the service module to make code simpler and behave better for systems running several popular init systems +* yum module now has a parameter to refresh its cache of package metadata +* apt module gained a build_dep parameter to install a package's build dependencies +* Add parameters to the postgres modules to specify a unix socket to connect to the db +* The mount module now supports bind mounts +* Add a clone parameter to git module that allows you to get information about a remote repo even if it doesn't exist locally. +* Add a refspec argument to the git module that allows pulling commits that aren't part of a branch +* Many documentation additions and fixes. ## 1.8.4 "You Really Got Me" - Feb 19, 2015 @@ -37,7 +605,7 @@ in progress, details pending ## 1.8.3 "You Really Got Me" - Feb 17, 2015 -* Fixing a security bug related to the default permissions set on a tempoary file created when using "ansible-vault view ". +* Fixing a security bug related to the default permissions set on a temporary file created when using "ansible-vault view ". * Many bug fixes, for both core code and core modules. ## 1.8.2 "You Really Got Me" - Dec 04, 2014 @@ -138,7 +706,7 @@ And various other bug fixes and improvements ... - Fixes a bug in vault where the password file option was not being used correctly internally. - Improved multi-line parsing when using YAML literal blocks (using > or |). - Fixed a bug with the file module and the creation of relative symlinks. -- Fixed a bug where checkmode was not being honored during the templating of files. +- Fixed a bug where checkmode was not being honoured during the templating of files. - Other various bug fixes. ## 1.7.1 "Summer Nights" - Aug 14, 2014 @@ -181,7 +749,7 @@ New Modules: Other notable changes: * Security fixes - - Prevent the use of lookups when using legaxy "{{ }}" syntax around variables and with_* loops. + - Prevent the use of lookups when using legacy "{{ }}" syntax around variables and with_* loops. - Remove relative paths in TAR-archived file names used by ansible-galaxy. * Inventory speed improvements for very large inventories. * Vault password files can now be executable, to support scripts that fetch the vault password. @@ -325,7 +893,7 @@ Other notable changes: ## 1.5.4 "Love Walks In" - April 1, 2014 - Security fix for safe_eval, which further hardens the checking of the evaluation function. -- Changing order of variable precendence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host. +- Changing order of variable precedence for system facts, to ensure that inventory variables take precedence over any facts that may be set on a host. ## 1.5.3 "Love Walks In" - March 13, 2014 @@ -360,7 +928,7 @@ Major features/changes: * ec2 module now accepts 'exact_count' and 'count_tag' as a way to enforce a running number of nodes by tags. * all ec2 modules that work with Eucalyptus also now support a 'validate_certs' option, which can be set to 'off' for installations using self-signed certs. * Start of new integration test infrastructure (WIP, more details TBD) -* if repoquery is unavailble, the yum module will automatically attempt to install yum-utils +* if repoquery is unavailable, the yum module will automatically attempt to install yum-utils * ansible-vault: a framework for encrypting your playbooks and variable files * added support for privilege escalation via 'su' into bin/ansible and bin/ansible-playbook and associated keywords 'su', 'su_user', 'su_pass' for tasks/plays @@ -823,7 +1391,7 @@ Bugfixes and Misc Changes: * misc fixes to the Riak module * make template module slightly more efficient * base64encode / decode filters are now available to templates -* libvirt module can now work with multiple different libvirt connecton URIs +* libvirt module can now work with multiple different libvirt connection URIs * fix for postgresql password escaping * unicode fix for shlex.split in some cases * apt module upgrade logic improved @@ -858,7 +1426,7 @@ the variable is still registered for the host, with the attribute skipped: True. * service pattern argument now correctly read for BSD services * fetch location can now be controlled more directly via the 'flat' parameter. * added basename and dirname as Jinja2 filters available to all templates -* pip works better when sudoing from unpriveledged users +* pip works better when sudoing from unprivileged users * fix for user creation with groups specification reporting 'changed' incorrectly in some cases * fix for some unicode encoding errors in outputing some data in verbose mode * improved FreeBSD, NetBSD and Solaris facts @@ -1028,7 +1596,7 @@ New playbook/language features: * task includes can now be of infinite depth * when_set and when_unset can take more than one var (when_set: $a and $b and $c) * added the with_sequence lookup plugin -* can override "connection:" on an indvidual task +* can override "connection:" on an individual task * parameterized playbook includes can now define complex variables (not just all on one line) * making inventory variables available for use in vars_files paths * messages when skipping plays are now more clear diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 8ce40348ca1e8a..094501db9064ad 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,11 +1,22 @@ ##### Issue Type: -Can you help us out in labelling this by telling us what kind of ticket this this? You can say “Bug Report”, “Feature Idea”, “Feature Pull Request”, “New Module Pull Request”, “Bugfix Pull Request”, “Documentation Report”, or “Docs Pull Request”. +Can you help us out in labelling this by telling us what kind of ticket this this? You can say: + - Bug Report + - Feature Idea + - Feature Pull Request + - New Module Pull Request + - Bugfix Pull Request + - Documentation Report + - Docs Pull Request ##### Ansible Version: Let us know what version of Ansible you are using. Please supply the verbatim output from running “ansible --version”. If you're filing a ticket on a version of Ansible which is not the latest, we'd greatly appreciate it if you could retest on the latest version first. We don't expect you to test against the development branch most of the time, but we may ask for that if you have cycles. Thanks! +##### Ansible Configuration: + +What have you changed about your Ansible installation? What configuration settings have you changed/added/removed? Compare your /etc/ansible/ansible.cfg against a clean version from Github and let us know what's different. + ##### Environment: What OS are you running Ansible from and what OS are you managing? Examples include RHEL 5/6, Centos 5/6, Ubuntu 12.04/13.10, *BSD, Solaris. If this is a generic feature request or it doesn’t apply, just say “N/A”. Not all tickets may be about operating system related things and we understand that. diff --git a/MANIFEST.in b/MANIFEST.in index f4e727d8c4dcab..a5e29c9a4334f6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,15 +4,21 @@ prune ticket_stubs prune packaging prune test prune hacking -include README.md packaging/rpm/ansible.spec COPYING +include README.md COPYING include examples/hosts include examples/ansible.cfg include lib/ansible/module_utils/powershell.ps1 recursive-include lib/ansible/modules * +recursive-include lib/ansible/galaxy/data * recursive-include docs * -recursive-include plugins * +recursive-include packaging * include Makefile include VERSION include MANIFEST.in +include CHANGELOG.md +include contrib/README.md +recursive-include contrib/inventory * +exclude lib/ansible/modules/core/.git* +exclude lib/ansible/modules/extras/.git* prune lib/ansible/modules/core/.git prune lib/ansible/modules/extras/.git diff --git a/Makefile b/Makefile index f688bd73bf607b..367987affce554 100644 --- a/Makefile +++ b/Makefile @@ -34,11 +34,17 @@ PYTHON=python SITELIB = $(shell $(PYTHON) -c "from distutils.sysconfig import get_python_lib; print get_python_lib()") # VERSION file provides one place to update the software version -VERSION := $(shell cat VERSION) +VERSION := $(shell cat VERSION | cut -f1 -d' ') +RELEASE := $(shell cat VERSION | cut -f2 -d' ') # Get the branch information from git ifneq ($(shell which git),) GIT_DATE := $(shell git log -n 1 --format="%ai") +GIT_HASH := $(shell git log -n 1 --format="%h") +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.\/]//g') +GITINFO = .$(GIT_HASH).$(GIT_BRANCH) +else +GITINFO = "" endif ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1) @@ -52,15 +58,16 @@ DEBUILD_BIN ?= debuild DEBUILD_OPTS = --source-option="-I" DPUT_BIN ?= dput DPUT_OPTS ?= +DEB_DATE := $(shell LC_TIME=C date +"%a, %d %b %Y %T %z") ifeq ($(OFFICIAL),yes) - DEB_RELEASE = 1ppa + DEB_RELEASE = $(RELEASE)ppa # Sign OFFICIAL builds using 'DEBSIGN_KEYID' # DEBSIGN_KEYID is required when signing ifneq ($(DEBSIGN_KEYID),) DEBUILD_OPTS += -k$(DEBSIGN_KEYID) endif else - DEB_RELEASE = 0.git$(DATE) + DEB_RELEASE = 0.git$(DATE)$(GITINFO) # Do not sign unofficial builds DEBUILD_OPTS += -uc -us DPUT_OPTS += -u @@ -74,9 +81,9 @@ DEB_DIST ?= unstable RPMSPECDIR= packaging/rpm RPMSPEC = $(RPMSPECDIR)/ansible.spec RPMDIST = $(shell rpm --eval '%{?dist}') -RPMRELEASE = 1 +RPMRELEASE = $(RELEASE) ifneq ($(OFFICIAL),yes) - RPMRELEASE = 0.git$(DATE) + RPMRELEASE = 0.git$(DATE)$(GITINFO) endif RPMNVR = "$(NAME)-$(VERSION)-$(RPMRELEASE)$(RPMDIST)" @@ -86,20 +93,17 @@ MOCK_CFG ?= NOSETESTS ?= nosetests -NOSETESTS3 ?= nosetests-3.3 +NOSETESTS3 ?= nosetests-3.4 ######################################################## all: clean python tests: - PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v # Could do: --with-coverage --cover-package=ansible - -newtests: - PYTHONPATH=./v2:./lib $(NOSETESTS) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches + PYTHONPATH=./lib $(NOSETESTS) -d -w test/units -v --with-coverage --cover-package=ansible --cover-branches -newtests-py3: - PYTHONPATH=./v2:./lib $(NOSETESTS3) -d -w v2/test -v --with-coverage --cover-package=ansible --cover-branches +tests-py3: + PYTHONPATH=./lib $(NOSETESTS3) -d -w test/units -v --with-coverage --cover-package=ansible --cover-branches authors: sh hacking/authors.sh @@ -131,10 +135,11 @@ clean: @echo "Cleaning up distutils stuff" rm -rf build rm -rf dist + rm -rf lib/ansible.egg-info/ @echo "Cleaning up byte compiled python stuff" find . -type f -regex ".*\.py[co]$$" -delete @echo "Cleaning up editor backup files" - find . -type f \( -name "*~" -or -name "#*" \) -delete + find . -type f -not -path ./test/units/inventory_test_data/group_vars/noparse/all.yml~ \( -name "*~" -or -name "#*" \) -delete find . -type f \( -name "*.swp" \) -delete @echo "Cleaning up manpage stuff" find ./docs/man -type f -name "*.xml" -delete @@ -162,6 +167,9 @@ install: sdist: clean docs $(PYTHON) setup.py sdist +sdist_upload: clean docs + $(PYTHON) setup.py sdist upload 2>&1 |tee upload.log + rpmcommon: $(MANPAGES) sdist @mkdir -p rpm-build @cp dist/*.gz rpm-build/ @@ -216,7 +224,7 @@ debian: sdist mkdir -p deb-build/$${DIST} ; \ tar -C deb-build/$${DIST} -xvf dist/$(NAME)-$(VERSION).tar.gz ; \ cp -a packaging/debian deb-build/$${DIST}/$(NAME)-$(VERSION)/ ; \ - sed -ie "s#^$(NAME) (\([^)]*\)) \([^;]*\);#ansible (\1-$(DEB_RELEASE)~$${DIST}) $${DIST};#" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \ + sed -ie "s|%VERSION%|$(VERSION)|g;s|%RELEASE%|$(DEB_RELEASE)|;s|%DIST%|$${DIST}|g;s|%DATE%|$(DEB_DATE)|g" deb-build/$${DIST}/$(NAME)-$(VERSION)/debian/changelog ; \ done deb: debian diff --git a/README.md b/README.md index 8bfe58a5433377..2e1f15559d31a2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -[![PyPI version](https://badge.fury.io/py/ansible.png)](http://badge.fury.io/py/ansible) [![PyPI downloads](https://pypip.in/d/ansible/badge.png)](https://pypi.python.org/pypi/ansible) +[![PyPI version](https://img.shields.io/pypi/v/ansible.svg)](https://pypi.python.org/pypi/ansible) +[![PyPI downloads](https://img.shields.io/pypi/dm/ansible.svg)](https://pypi.python.org/pypi/ansible) +[![Build Status](https://travis-ci.org/ansible/ansible.svg?branch=devel)](https://travis-ci.org/ansible/ansible) Ansible @@ -10,9 +12,9 @@ Read the documentation and more at http://ansible.com/ Many users run straight from the development branch (it's generally fine to do so), but you might also wish to consume a release. -You can find instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you decide to go with the development branch, be sure to run "git submodule update --init --recursive" after doing a checkout. +You can find instructions [here](http://docs.ansible.com/intro_getting_started.html) for a variety of platforms. If you decide to go with the development branch, be sure to run `git submodule update --init --recursive` after doing a checkout. -If you want to download a tarball of a release, go to [releases.ansible.com](http://releases.ansible.com/ansible), though most users use yum (using the EPEL instructions linked above), apt (using the PPA instructions linked above), or "pip install ansible". +If you want to download a tarball of a release, go to [releases.ansible.com](http://releases.ansible.com/ansible), though most users use `yum` (using the EPEL instructions linked above), `apt` (using the PPA instructions linked above), or `pip install ansible`. Design Principles ================= @@ -31,7 +33,7 @@ Get Involved ============ * Read [Community Information](http://docs.ansible.com/community.html) for all kinds of ways to contribute to and interact with the project, including mailing list information and how to submit bug reports and code to Ansible. - * All code submissions are done through pull requests. Take care to make sure no merge commits are in the submission, and use "git rebase" vs "git merge" for this reason. If submitting a large code change (other than modules), it's probably a good idea to join ansible-devel and talk about what you would like to do or add first and to avoid duplicate efforts. This not only helps everyone know what's going on, it also helps save time and effort if we decide some changes are needed. + * All code submissions are done through pull requests. Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason. If submitting a large code change (other than modules), it's probably a good idea to join ansible-devel and talk about what you would like to do or add first and to avoid duplicate efforts. This not only helps everyone know what's going on, it also helps save time and effort if we decide some changes are needed. * Users list: [ansible-project](http://groups.google.com/group/ansible-project) * Development list: [ansible-devel](http://groups.google.com/group/ansible-devel) * Announcement list: [ansible-announce](http://groups.google.com/group/ansible-announce) - read only @@ -40,7 +42,7 @@ Get Involved Branch Info =========== - * Releases are named after Van Halen songs. + * Releases are named after Led Zeppelin songs. (Releases prior to 2.0 were named after Van Halen songs.) * The devel branch corresponds to the release actively under development. * As of 1.8, modules are kept in different repos, you'll want to follow [core](https://github.com/ansible/ansible-modules-core) and [extras](https://github.com/ansible/ansible-modules-extras) * Various release-X.Y branches exist for previous releases. @@ -49,7 +51,8 @@ Branch Info Authors ======= -Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 900 users (and growing). Thanks everyone! +Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.dehaan/gmail/com) and has contributions from over 1000 users (and growing). Thanks everyone! Ansible is sponsored by [Ansible, Inc](http://ansible.com) + diff --git a/RELEASES.txt b/RELEASES.txt index ddcce78efab557..cd32b0cddb04f9 100644 --- a/RELEASES.txt +++ b/RELEASES.txt @@ -4,12 +4,20 @@ Ansible Releases at a Glance Active Development ++++++++++++++++++ -1.9 "Dancing In the Street" - in progress +2.0 "Over the Hills and Far Away" - in progress Released ++++++++ -1.8.1 "You Really Got Me" -- 11-26-2014 +1.9.4 "Dancing In the Streets" 10-09-2015 +1.9.3 "Dancing In the Streets" 09-03-2015 +1.9.2 "Dancing In the Streets" 06-24-2015 +1.9.1 "Dancing In the Streets" 04-27-2015 +1.9.0 "Dancing In the Streets" 03-25-2015 +1.8.4 "You Really Got Me" ---- 02-19-2015 +1.8.3 "You Really Got Me" ---- 02-17-2015 +1.8.2 "You Really Got Me" ---- 12-04-2014 +1.8.1 "You Really Got Me" ---- 11-26-2014 1.7.2 "Summer Nights" -------- 09-24-2014 1.7.1 "Summer Nights" -------- 08-14-2014 1.7 "Summer Nights" -------- 08-06-2014 diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 00000000000000..d4982369d45b67 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,98 @@ +Roadmap For Ansible by RedHat +============= +This document is now the location for published Ansible Core roadmaps. + +The roadmap will be updated by version. Based on team and community feedback, an initial roadmap will be published for a major or minor version (2.0, 2.1). Subminor versions will generally not have roadmaps published. + +This is the first time Ansible has published this and asked for feedback in this manner. So feedback on the roadmap and the new process is quite welcome. The team is aiming for further transparency and better inclusion of both community desires and submissions. + +These roadmaps are the team's *best guess* roadmaps based on the Ansible team's experience and are also based on requests and feedback from the community. There are things that may not make it on due to time constraints, lack of community maintainers, etc. And there may be things that got missed, so each roadmap is published both as an idea of what is upcoming in Ansible, and as a medium for seeking further feedback from the community. Here are the good places for you to submit feedback: + + * Ansible's google-group: ansible-devel + * Ansible Fest conferences. + * IRC freenode channel: #ansible-devel (this one may have things lost in lots of conversation, so a caution). + +2.1 Roadmap, Targeted for the End of April +========== +## Windows, General +* Figuring out privilege escalation (runas w/ username/password) +* Implement kerberos encryption over http +* pywinrm conversion to requests (Some mess here on pywinrm/requests. will need docs etc.) +* NTLM support + +## Modules +* Windows + * Finish cleaning up tests and support for post-beta release + * Strict mode cleanup (one module in core) + * Domain user/group management + * Finish win\_host and win\_rm in the domain/workgroup modules. + * Close 2 existing PRs (These were deemed insufficient) + * Replicate python module API in PS/C# (deprecate hodgepodge of stuff from module_utils/powershell.ps1) +* Network + * Cisco modules (ios, iosxr, nxos, iosxe) + * Arista modules (eos) + * Juniper modules (junos) + * OpenSwitch + * Cumulus + * Dell (os10) - At risk + * Netconf shared module + * Hooks for supporting Tower credentials +* VMware (This one is a little at risk due to staffing. We're investigating some community maintainers and shifting some people at Ansible around, but it is a VERY high priority). + * vsphere\_guest brought to parity with other vmware modules (vs Viasat and 'whereismyjetpack' provided modules) + * VMware modules moved to official pyvmomi bindings + * VMware inventory script updates for pyvmomi, adding tagging support +* Azure (Notes: This is on hold until Microsoft swaps out the code generator on the Azure Python SDK, which may introduce breaking changes. We have basic modules working against all of these resources at this time. Could ship it against current SDK, but may break. Or should the version be pinned?) + * Minimal Azure coverage using new ARM api + * Resource Group + * Virtual Network + * Subnet + * Public IP + * Network Interface + * Storage Account + * Security Group + * Virtual Machine + * Update of inventory script to use new API, adding tagging support +* Docker: + * Start Docker module refactor + * Update to match current docker CLI capabilities + * Docker exec support +* Upgrade other cloud modules or work with community maintainers to upgrade. (In order) + * AWS (Community maintainers) + * Openstack (Community maintainers) + * Google (Google/Community) + * Digital Ocean (Community) +* Ziploader: + * Write code to create the zipfile that gets passed across the wire to be run on the remote python + * Port most of the functionality in module\_utils to be usage in ziploader instead + * Port a few essential modules to use ziploader instead of module-replacer as proof of concept + * New modules will be able to use ziploader. Old modules will need to be ported in future releases (Some modules will not need porting but others will) + * Better testing of modules, caching of modules clientside(Have not yet arrived at an architecture for this that we like), better code sharing between ansible/ansible and modules + * ziploader is a helpful building block for: python3 porting(high priority), better code sharing between modules(medium priority) + * ziploader is a good idea before: enabling users to have custom module_utils directories +* Expand module diff support (already in progress in devel) + * Framework done. Need to add to modules, test etc. + * Coordinate with community to update their modules +* Things being kicking down the road that we said we’d do + * NOT remerging core with ansible/ansible this release cycle +* Community stuff + * Define the process/ETA for reviewing PR’s from community + * Publish better docs and how-tos for submitting code/features/fixes + + + + + + + + + + + + + + + + + + + diff --git a/VERSION b/VERSION index 2e0e38c63a62a4..7ec1d6db408777 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.9 +2.1.0 diff --git a/ansible-core-sitemap.xml b/ansible-core-sitemap.xml new file mode 100644 index 00000000000000..84a048d3116809 --- /dev/null +++ b/ansible-core-sitemap.xml @@ -0,0 +1,2716 @@ + + + + + + http://docs.ansible.com/ansible/ + weekly + 1.0 + + + http://docs.ansible.com/ansible/intro_patterns.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_adhoc.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_configuration.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_getting_started.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_inventory.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_installation.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_bsd.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_dynamic_inventory.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/intro_windows.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_filters.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_conditionals.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/quickstart.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_loops.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_variables.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_roles.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_intro.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_blocks.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_async.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_checkmode.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/become.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_acceleration.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_best_practices.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_delegation.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_special_topics.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_strategies.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_environment.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_error_handling.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_prompts.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/modules_intro.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_tags.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_lookups.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_vault.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/playbooks_startnstep.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/modules_core.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/modules_extra.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_commands_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/common_return_values.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/modules_by_category.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_cloud_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_all_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_clustering_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_database_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_files_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_inventory_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_source_control_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_system_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_utilities_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_monitoring_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_notification_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_messaging_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_network_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_packaging_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_web_infrastructure_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/guide_cloudstack.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/guide_vagrant.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/guides.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/guide_gce.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/list_of_windows_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/guide_aws.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/guide_rax.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/guide_rolling_upgrade.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/developing.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/developing_releases.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/tower.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/developing_inventory.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/developing_test_pr.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/developing_plugins.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/community.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/developing_api.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/developing_modules.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/test_strategies.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/glossary.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/galaxy.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/faq.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/YAMLSyntax.html + weekly + 0.5 + + + http://docs.ansible.com/ansible/index.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/command_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/shell_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/playbooks_filters_ipaddr.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/expect_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/script_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/raw_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/znode_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/xenserver_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cloudtrail_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cloudformation_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/dynamodb_table_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_ami_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_ami_copy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_elb_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_ami_find_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_eip_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_elb_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_asg_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_eni_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_elb_lb_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_eni_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_key_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_lc_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_tag_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_scaling_policy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_metric_alarm_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_snapshot_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_remote_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vol_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_igw_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_subnet_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_net_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_net_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_route_table_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_win_password_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_route_table_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_vpc_subnet_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ecs_cluster_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/iam_cert_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ecs_taskdefinition_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ecs_task_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/elasticache_subnet_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/iam_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/elasticache_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/iam_policy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rds_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/route53_zone_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rds_subnet_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/route53_health_check_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/route53_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rds_param_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/route53_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/s3_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/sts_assume_role_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/s3_bucket_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/s3_lifecycle_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/sns_topic_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/s3_logging_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/sqs_queue_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/azure_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_aa_policy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_modify_server_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_alert_policy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_publicip_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_firewall_policy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_blueprint_package_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_loadbalancer_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_server_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_firewall_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_instance_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/clc_server_snapshot_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_affinitygroup_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_domain_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_account_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_instancegroup_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_iso_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_project_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_ip_address_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_securitygroup_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_loadbalancer_rule_member_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_loadbalancer_rule_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_network_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_portforward_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_securitygroup_rule_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_sshkeypair_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_template_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_staticnat_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/digital_ocean_domain_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_vmsnapshot_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/digital_ocean_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_volume_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cs_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/docker_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/digital_ocean_sshkey_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/docker_login_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gce_net_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gce_pd_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gc_storage_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/docker_image_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gce_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gce_lb_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gce_img_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gce_tag_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/linode_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/virt_net_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/virt_pool_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_auth_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ovirt_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/lxc_container_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/virt_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/proxmox_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/proxmox_template_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_client_config_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_floating_ip_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_network_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_networks_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_image_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_ironic_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_image_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_ironic_node_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_keypair_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_nova_flavor_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_security_group_rule_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_server_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_server_actions_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_object_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_project_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_security_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_server_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_router_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_port_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_server_volume_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/profitbricks_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/profitbricks_datacenter_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_subnets_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_subnet_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_volume_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/os_user_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/profitbricks_nic_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_cdb_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_cdb_database_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/profitbricks_volume_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/profitbricks_volume_attachments_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_cbs_attachments_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_cdb_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_cbs_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_files_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_files_objects_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_clb_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_dns_record_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_clb_nodes_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_dns_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_identity_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_mon_entity_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_mon_notification_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_mon_notification_plan_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_clb_ssl_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_meta_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_keypair_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_mon_check_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_network_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_mon_alarm_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_queue_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_scaling_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vca_vapp_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vca_nat_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rax_scaling_policy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vca_fw_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_dvswitch_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_cluster_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_host_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_dns_config_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_datacenter_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_dvs_host_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_dvs_portgroup_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_target_canonical_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_migrate_vmk_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_vmkernel_ip_config_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_vswitch_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vsphere_copy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_vm_shell_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_vsan_cluster_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_vmkernel_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vmware_vm_vss_dvs_migrate_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vsphere_guest_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zypper_repository_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/a10_server_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/a10_service_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/webfaction_db_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/webfaction_domain_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/a10_virtual_server_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/webfaction_app_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/webfaction_site_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/webfaction_mailbox_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/accelerate_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/apache2_module_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/apt_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/acl_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/alternatives_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/airbrake_deployment_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/add_host_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/apt_key_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/apk_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/at_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/authorized_key_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/apt_repository_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/assemble_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/apt_rpm_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/assert_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/async_status_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigip_gtm_wide_ip_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigip_pool_member_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigip_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigip_monitor_http_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigpanda_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bower_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigip_monitor_tcp_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigip_node_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bigip_pool_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bundler_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/boundary_meter_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/blockinfile_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/consul_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/consul_acl_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/consul_kv_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/campfire_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/composer_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/bzr_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/circonus_annotation_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/capabilities_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/consul_session_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/datadog_event_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/datadog_monitor_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/debconf_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/copy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cronvar_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cpanm_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/cron_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/debug_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/crypttab_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/deploy_helper_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/django_manage_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/dnf_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/dpkg_selections_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/dnsmadeeasy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/dnsimple_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/fail_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/easy_install_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/fetch_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ec2_ami_search_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ejabberd_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/filesystem_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/facter_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/elasticsearch_plugin_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/firewalld_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/file_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/get_url_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/find_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/flowdock_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/git_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/fireball_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gem_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/getent_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/gluster_volume_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/github_hooks_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/hall_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/hg_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/glance_image_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/hipchat_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/grove_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/group_by_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/haproxy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/homebrew_cask_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/homebrew_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ini_file_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/homebrew_tap_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/irc_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/hostname_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/htpasswd_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/include_vars_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ipify_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/jabber_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/iptables_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/known_hosts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/jboss_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/librato_annotation_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/kernel_blacklist_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/keystone_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/jira_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/layman_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/lvol_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/lineinfile_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/macports_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/logentries_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/locale_gen_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/lvg_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/maven_artifact_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/lldp_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mail_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mqtt_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/modprobe_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mount_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mongodb_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mysql_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/monit_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mysql_db_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mysql_replication_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/mysql_variables_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/nmcli_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/netscaler_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/nagios_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/newrelic_deployment_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/nexmo_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/nova_compute_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/openvswitch_bridge_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/openvswitch_db_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/openvswitch_port_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ohai_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/npm_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/open_iscsi_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/openbsd_pkg_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/opkg_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/nova_keypair_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pagerduty_alert_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pam_limits_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/patch_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/osx_say_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pacman_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/osx_defaults_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/package_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pagerduty_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pause_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pkg5_publisher_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pear_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pkgin_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pingdom_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pkg5_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ping_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pip_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pkgutil_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/postgresql_lang_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pkgng_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/portage_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/postgresql_privs_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/portinstall_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pushbullet_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/postgresql_db_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/postgresql_ext_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/puppet_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/postgresql_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/quantum_network_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/pushover_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/quantum_router_interface_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/quantum_floating_ip_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/quantum_floating_ip_associate_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_binding_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/quantum_router_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/quantum_router_gateway_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_policy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/quantum_subnet_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_vhost_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_plugin_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_exchange_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_queue_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_parameter_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/redhat_subscription_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rabbitmq_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/riak_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rpm_key_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/seboolean_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rollbar_deployment_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/redis_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rhn_channel_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/rhn_register_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/replace_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/selinux_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/set_fact_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/setup_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/slack_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/sendgrid_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/seport_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/slackpkg_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/sensu_check_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/service_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/selinux_permissive_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/stat_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/subversion_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/slurp_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/supervisorctl_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/solaris_zone_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/sns_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/stackdriver_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/svr4pkg_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/snmp_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/svc_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/twilio_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/swdepot_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/unarchive_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/template_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/sysctl_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/synchronize_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/uptimerobot_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/typetalk_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/ufw_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/uri_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vertica_role_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/urpmi_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vertica_facts_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vertica_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vertica_configuration_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/vertica_schema_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_dotnet_ngen_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/wait_for_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_feature_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_copy_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_chocolatey_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_firewall_rule_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_environment_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_acl_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_file_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_get_url_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_iis_webbinding_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_lineinfile_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_msi_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_iis_webapplication_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_iis_website_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_iis_virtualdirectory_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_iis_webapppool_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_nssm_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_stat_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_template_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_unzip_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_ping_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_package_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_regedit_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_updates_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_service_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_scheduled_task_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_user_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zabbix_group_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zabbix_hostmacro_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/win_webpicmd_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/xattr_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/yum_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/yumrepo_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zabbix_host_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zabbix_maintenance_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zypper_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zabbix_screen_module.html + weekly + 0.3 + + + http://docs.ansible.com/ansible/zfs_module.html + weekly + 0.3 + + + \ No newline at end of file diff --git a/bin/ansible b/bin/ansible index 5aaaa582a7e4f0..a02c5bc1745fdf 100755 --- a/bin/ansible +++ b/bin/ansible @@ -18,6 +18,8 @@ # along with Ansible. If not, see . ######################################################## +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type __requires__ = ['ansible'] try: @@ -32,187 +34,82 @@ except Exception: import os import sys +import traceback -from ansible.runner import Runner -import ansible.constants as C -from ansible import utils -from ansible import errors -from ansible import callbacks -from ansible import inventory -######################################################## - -class Cli(object): - ''' code behind bin/ansible ''' - - # ---------------------------------------------- - - def __init__(self): - self.stats = callbacks.AggregateStats() - self.callbacks = callbacks.CliRunnerCallbacks() - if C.DEFAULT_LOAD_CALLBACK_PLUGINS: - callbacks.load_callback_plugins() - - # ---------------------------------------------- - - def parse(self): - ''' create an options parser for bin/ansible ''' - - parser = utils.base_parser( - constants=C, - runas_opts=True, - subset_opts=True, - async_opts=True, - output_opts=True, - connect_opts=True, - check_opts=True, - diff_opts=False, - usage='%prog [options]' - ) - - parser.add_option('-a', '--args', dest='module_args', - help="module arguments", default=C.DEFAULT_MODULE_ARGS) - parser.add_option('-m', '--module-name', dest='module_name', - help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME, - default=C.DEFAULT_MODULE_NAME) - - options, args = parser.parse_args() - self.callbacks.options = options - - if len(args) == 0 or len(args) > 1: - parser.print_help() - sys.exit(1) - - # su and sudo command line arguments need to be mutually exclusive - if (options.su or options.su_user or options.ask_su_pass) and \ - (options.sudo or options.sudo_user or options.ask_sudo_pass): - parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') are " - "mutually exclusive") - - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - - return (options, args) - - # ---------------------------------------------- - - def run(self, options, args): - ''' use Runner lib to do SSH things ''' - - pattern = args[0] - - sshpass = None - sudopass = None - su_pass = None - vault_pass = None - - options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - # Never ask for an SSH password when we run with local connection - if options.connection == "local": - options.ask_pass = False - options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS - options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS - options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - - (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass) - - # read vault_pass from a file - if not options.ask_vault_pass and options.vault_password_file: - vault_pass = utils.read_vault_file(options.vault_password_file) - - extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) - - inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass) - if options.subset: - inventory_manager.subset(options.subset) - hosts = inventory_manager.list_hosts(pattern) - if len(hosts) == 0: - callbacks.display("No hosts matched", stderr=True) - sys.exit(0) - - if options.listhosts: - for host in hosts: - callbacks.display(' %s' % host) - sys.exit(0) - - if ((options.module_name == 'command' or options.module_name == 'shell') - and not options.module_args): - callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True) - sys.exit(1) - - - if options.su_user or options.ask_su_pass: - options.su = True - options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER - options.su_user = options.su_user or C.DEFAULT_SU_USER - if options.tree: - utils.prepare_writeable_dir(options.tree) - - runner = Runner( - module_name=options.module_name, - module_path=options.module_path, - module_args=options.module_args, - remote_user=options.remote_user, - remote_pass=sshpass, - inventory=inventory_manager, - timeout=options.timeout, - private_key_file=options.private_key_file, - forks=options.forks, - pattern=pattern, - callbacks=self.callbacks, - sudo=options.sudo, - sudo_pass=sudopass, - sudo_user=options.sudo_user, - transport=options.connection, - subset=options.subset, - check=options.check, - diff=options.check, - su=options.su, - su_pass=su_pass, - su_user=options.su_user, - vault_pass=vault_pass, - extra_vars=extra_vars, - ) - - if options.seconds: - callbacks.display("background launch...\n\n", color='cyan') - results, poller = runner.run_async(options.seconds) - results = self.poll_while_needed(poller, options) - else: - results = runner.run() +from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError +from ansible.utils.display import Display +from ansible.utils.unicode import to_unicode - return (runner, results) - # ---------------------------------------------- +######################################## +### OUTPUT OF LAST RESORT ### +class LastResort(object): + def display(self, msg): + print(msg, file=sys.stderr) - def poll_while_needed(self, poller, options): - ''' summarize results from Runner ''' + def error(self, msg, wrap_text=None): + print(msg, file=sys.stderr) - # BACKGROUND POLL LOGIC when -B and -P are specified - if options.seconds and options.poll_interval > 0: - poller.wait(options.seconds, options.poll_interval) - return poller.results - - -######################################################## +######################################## if __name__ == '__main__': - callbacks.display("", log_only=True) - callbacks.display(" ".join(sys.argv), log_only=True) - callbacks.display("", log_only=True) - cli = Cli() - (options, args) = cli.parse() + display = LastResort() + cli = None + me = os.path.basename(sys.argv[0]) + try: - (runner, results) = cli.run(options, args) - for result in results['contacted'].values(): - if 'failed' in result or result.get('rc', 0) != 0: - sys.exit(2) - if results['dark']: - sys.exit(3) - except errors.AnsibleError, e: - # Generic handler for ansible specific errors - callbacks.display("ERROR: %s" % str(e), stderr=True, color='red') + display = Display() + display.debug("starting run") + + sub = None + try: + if me.find('-') != -1: + target = me.split('-') + if len(target) > 1: + sub = target[1] + myclass = "%sCLI" % sub.capitalize() + mycli = getattr(__import__("ansible.cli.%s" % sub, fromlist=[myclass]), myclass) + elif me == 'ansible': + from ansible.cli.adhoc import AdHocCLI as mycli + else: + raise AnsibleError("Unknown Ansible alias: %s" % me) + except ImportError as e: + if e.message.endswith(' %s' % sub): + raise AnsibleError("Ansible sub-program not implemented: %s" % me) + else: + raise + + cli = mycli(sys.argv) + cli.parse() + sys.exit(cli.run()) + + except AnsibleOptionsError as e: + cli.parser.print_help() + display.error(to_unicode(e), wrap_text=False) + sys.exit(5) + except AnsibleParserError as e: + display.error(to_unicode(e), wrap_text=False) + sys.exit(4) +# TQM takes care of these, but leaving comment to reserve the exit codes +# except AnsibleHostUnreachable as e: +# display.error(str(e)) +# sys.exit(3) +# except AnsibleHostFailed as e: +# display.error(str(e)) +# sys.exit(2) + except AnsibleError as e: + display.error(to_unicode(e), wrap_text=False) sys.exit(1) - + except KeyboardInterrupt: + display.error("User interrupted execution") + sys.exit(99) + except Exception as e: + have_cli_options = cli is not None and cli.options is not None + display.error("Unexpected Exception: %s" % to_unicode(e), wrap_text=False) + if not have_cli_options or have_cli_options and cli.options.verbosity > 2: + display.display(u"the full traceback was:\n\n%s" % to_unicode(traceback.format_exc())) + else: + display.display("to see the full traceback, use -vvv") + sys.exit(250) diff --git a/bin/ansible-doc b/bin/ansible-doc deleted file mode 100755 index dff7cecce7903a..00000000000000 --- a/bin/ansible-doc +++ /dev/null @@ -1,337 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Jan-Piet Mens -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# - -import os -import sys -import textwrap -import re -import optparse -import datetime -import subprocess -import fcntl -import termios -import struct - -from ansible import utils -from ansible.utils import module_docs -import ansible.constants as C -from ansible.utils import version -import traceback - -MODULEDIR = C.DEFAULT_MODULE_PATH - -BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm') -IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"] - -_ITALIC = re.compile(r"I\(([^)]+)\)") -_BOLD = re.compile(r"B\(([^)]+)\)") -_MODULE = re.compile(r"M\(([^)]+)\)") -_URL = re.compile(r"U\(([^)]+)\)") -_CONST = re.compile(r"C\(([^)]+)\)") -PAGER = 'less' -LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars) - # -S (chop long lines) -X (disable termcap init and de-init) - -def pager_print(text): - ''' just print text ''' - print text - -def pager_pipe(text, cmd): - ''' pipe text through a pager ''' - if 'LESS' not in os.environ: - os.environ['LESS'] = LESS_OPTS - try: - cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout) - cmd.communicate(input=text) - except IOError: - pass - except KeyboardInterrupt: - pass - -def pager(text): - ''' find reasonable way to display text ''' - # this is a much simpler form of what is in pydoc.py - if not sys.stdout.isatty(): - pager_print(text) - elif 'PAGER' in os.environ: - if sys.platform == 'win32': - pager_print(text) - else: - pager_pipe(text, os.environ['PAGER']) - elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0: - pager_pipe(text, 'less') - else: - pager_print(text) - -def tty_ify(text): - - t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word' - t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word* - t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word] - t = _URL.sub(r"\1", t) # U(word) => word - t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word' - - return t - -def get_man_text(doc): - - opt_indent=" " - text = [] - text.append("> %s\n" % doc['module'].upper()) - - desc = " ".join(doc['description']) - - text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" ")) - - if 'option_keys' in doc and len(doc['option_keys']) > 0: - text.append("Options (= is mandatory):\n") - - for o in sorted(doc['option_keys']): - opt = doc['options'][o] - - if opt.get('required', False): - opt_leadin = "=" - else: - opt_leadin = "-" - - text.append("%s %s" % (opt_leadin, o)) - - desc = " ".join(opt['description']) - - if 'choices' in opt: - choices = ", ".join(str(i) for i in opt['choices']) - desc = desc + " (Choices: " + choices + ")" - if 'default' in opt: - default = str(opt['default']) - desc = desc + " [Default: " + default + "]" - text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent, - subsequent_indent=opt_indent)) - - if 'notes' in doc and len(doc['notes']) > 0: - notes = " ".join(doc['notes']) - text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ", - subsequent_indent=opt_indent)) - - - if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0: - req = ", ".join(doc['requirements']) - text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ", - subsequent_indent=opt_indent)) - - if 'examples' in doc and len(doc['examples']) > 0: - text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's')) - for ex in doc['examples']: - text.append("%s\n" % (ex['code'])) - - if 'plainexamples' in doc and doc['plainexamples'] is not None: - text.append("EXAMPLES:") - text.append(doc['plainexamples']) - if 'returndocs' in doc and doc['returndocs'] is not None: - text.append("RETURN VALUES:") - text.append(doc['returndocs']) - text.append('') - - return "\n".join(text) - - -def get_snippet_text(doc): - - text = [] - desc = tty_ify(" ".join(doc['short_description'])) - text.append("- name: %s" % (desc)) - text.append(" action: %s" % (doc['module'])) - - for o in sorted(doc['options'].keys()): - opt = doc['options'][o] - desc = tty_ify(" ".join(opt['description'])) - - if opt.get('required', False): - s = o + "=" - else: - s = o - - text.append(" %-20s # %s" % (s, desc)) - text.append('') - - return "\n".join(text) - -def get_module_list_text(module_list): - tty_size = 0 - if os.isatty(0): - tty_size = struct.unpack('HHHH', - fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1] - columns = max(60, tty_size) - displace = max(len(x) for x in module_list) - linelimit = columns - displace - 5 - text = [] - deprecated = [] - for module in sorted(set(module_list)): - - if module in module_docs.BLACKLIST_MODULES: - continue - - filename = utils.plugins.module_finder.find_plugin(module) - - if filename is None: - continue - if filename.endswith(".ps1"): - continue - if os.path.isdir(filename): - continue - - try: - doc, plainexamples, returndocs = module_docs.get_docstring(filename) - desc = tty_ify(doc.get('short_description', '?')).strip() - if len(desc) > linelimit: - desc = desc[:linelimit] + '...' - - if module.startswith('_'): # Handle deprecated - deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc)) - else: - text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc)) - except: - traceback.print_exc() - sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) - - if len(deprecated) > 0: - text.append("\nDEPRECATED:") - text.extend(deprecated) - return "\n".join(text) - -def find_modules(path, module_list): - - if os.path.isdir(path): - for module in os.listdir(path): - if module.startswith('.'): - continue - elif os.path.isdir(module): - find_modules(module, module_list) - elif any(module.endswith(x) for x in BLACKLIST_EXTS): - continue - elif module.startswith('__'): - continue - elif module in IGNORE_FILES: - continue - elif module.startswith('_'): - fullpath = '/'.join([path,module]) - if os.path.islink(fullpath): # avoids aliases - continue - - module = os.path.splitext(module)[0] # removes the extension - module_list.append(module) - -def main(): - - p = optparse.OptionParser( - version=version("%prog"), - usage='usage: %prog [options] [module...]', - description='Show Ansible module documentation', - ) - - p.add_option("-M", "--module-path", - action="store", - dest="module_path", - default=MODULEDIR, - help="Ansible modules/ directory") - p.add_option("-l", "--list", - action="store_true", - default=False, - dest='list_dir', - help='List available modules') - p.add_option("-s", "--snippet", - action="store_true", - default=False, - dest='show_snippet', - help='Show playbook snippet for specified module(s)') - p.add_option('-v', action='version', help='Show version number and exit') - - (options, args) = p.parse_args() - - if options.module_path is not None: - for i in options.module_path.split(os.pathsep): - utils.plugins.module_finder.add_directory(i) - - if options.list_dir: - # list modules - paths = utils.plugins.module_finder._get_paths() - module_list = [] - for path in paths: - find_modules(path, module_list) - - pager(get_module_list_text(module_list)) - sys.exit() - - if len(args) == 0: - p.print_help() - - def print_paths(finder): - ''' Returns a string suitable for printing of the search path ''' - - # Uses a list to get the order right - ret = [] - for i in finder._get_paths(): - if i not in ret: - ret.append(i) - return os.pathsep.join(ret) - - text = '' - for module in args: - - filename = utils.plugins.module_finder.find_plugin(module) - if filename is None: - sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder))) - continue - - if any(filename.endswith(x) for x in BLACKLIST_EXTS): - continue - - try: - doc, plainexamples, returndocs = module_docs.get_docstring(filename) - except: - traceback.print_exc() - sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module) - continue - - if doc is not None: - - all_keys = [] - for (k,v) in doc['options'].iteritems(): - all_keys.append(k) - all_keys = sorted(all_keys) - doc['option_keys'] = all_keys - - doc['filename'] = filename - doc['docuri'] = doc['module'].replace('_', '-') - doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d') - doc['plainexamples'] = plainexamples - doc['returndocs'] = returndocs - - if options.show_snippet: - text += get_snippet_text(doc) - else: - text += get_man_text(doc) - else: - # this typically means we couldn't even parse the docstring, not just that the YAML is busted, - # probably a quoting issue. - sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module) - pager(text) - -if __name__ == '__main__': - main() diff --git a/bin/ansible-doc b/bin/ansible-doc new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-doc @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy deleted file mode 100755 index f281bf97ae896b..00000000000000 --- a/bin/ansible-galaxy +++ /dev/null @@ -1,957 +0,0 @@ -#!/usr/bin/env python - -######################################################################## -# -# (C) 2013, James Cammarata -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -######################################################################## - -import datetime -import json -import os -import os.path -import shutil -import subprocess -import sys -import tarfile -import tempfile -import urllib -import urllib2 -import yaml - -from collections import defaultdict -from distutils.version import LooseVersion -from jinja2 import Environment -from optparse import OptionParser - -import ansible.constants as C -import ansible.utils -from ansible.errors import AnsibleError - -default_meta_template = """--- -galaxy_info: - author: {{ author }} - description: {{description}} - company: {{ company }} - # If the issue tracker for your role is not on github, uncomment the - # next line and provide a value - # issue_tracker_url: {{ issue_tracker_url }} - # Some suggested licenses: - # - BSD (default) - # - MIT - # - GPLv2 - # - GPLv3 - # - Apache - # - CC-BY - license: {{ license }} - min_ansible_version: {{ min_ansible_version }} - # - # Below are all platforms currently available. Just uncomment - # the ones that apply to your role. If you don't see your - # platform on this list, let us know and we'll get it added! - # - #platforms: - {%- for platform,versions in platforms.iteritems() %} - #- name: {{ platform }} - # versions: - # - all - {%- for version in versions %} - # - {{ version }} - {%- endfor %} - {%- endfor %} - # - # Below are all categories currently available. Just as with - # the platforms above, uncomment those that apply to your role. - # - #categories: - {%- for category in categories %} - #- {{ category.name }} - {%- endfor %} -dependencies: [] - # List your role dependencies here, one per line. - # Be sure to remove the '[]' above if you add dependencies - # to this list. - {% for dependency in dependencies %} - #- {{ dependency }} - {% endfor %} - -""" - -default_readme_template = """Role Name -========= - -A brief description of the role goes here. - -Requirements ------------- - -Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. - -Role Variables --------------- - -A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. - -Dependencies ------------- - -A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. - -Example Playbook ----------------- - -Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: - - - hosts: servers - roles: - - { role: username.rolename, x: 42 } - -License -------- - -BSD - -Author Information ------------------- - -An optional section for the role authors to include contact information, or a website (HTML is not allowed). -""" - -#------------------------------------------------------------------------------------- -# Utility functions for parsing actions/options -#------------------------------------------------------------------------------------- - -VALID_ACTIONS = ("init", "info", "install", "list", "remove") -SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" ) - -def get_action(args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - return arg - return None - -def build_option_parser(action): - """ - Builds an option parser object based on the action - the user wants to execute. - """ - - usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS) - epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) - OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(usage=usage, epilog=epilog) - - if not action: - parser.print_help() - sys.exit() - - # options for all actions - # - none yet - - # options specific to actions - if action == "info": - parser.set_usage("usage: %prog info [options] role_name[,version]") - elif action == "init": - parser.set_usage("usage: %prog init [options] role_name") - parser.add_option( - '-p', '--init-path', dest='init_path', default="./", - help='The path in which the skeleton role will be created. ' - 'The default is the current working directory.') - parser.add_option( - '--offline', dest='offline', default=False, action='store_true', - help="Don't query the galaxy API when creating roles") - elif action == "install": - parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]") - parser.add_option( - '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, - help='Ignore errors and continue with the next specified role.') - parser.add_option( - '-n', '--no-deps', dest='no_deps', action='store_true', default=False, - help='Don\'t download roles listed as dependencies') - parser.add_option( - '-r', '--role-file', dest='role_file', - help='A file containing a list of roles to be imported') - elif action == "remove": - parser.set_usage("usage: %prog remove role1 role2 ...") - elif action == "list": - parser.set_usage("usage: %prog list [role_name]") - - # options that apply to more than one action - if action != "init": - parser.add_option( - '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH, - help='The path to the directory containing your roles. ' - 'The default is the roles_path configured in your ' - 'ansible.cfg file (/etc/ansible/roles if not configured)') - - if action in ("info","init","install"): - parser.add_option( - '-s', '--server', dest='api_server', default="galaxy.ansible.com", - help='The API server destination') - - if action in ("init","install"): - parser.add_option( - '-f', '--force', dest='force', action='store_true', default=False, - help='Force overwriting an existing role') - # done, return the parser - return parser - -def get_opt(options, k, defval=""): - """ - Returns an option from an Optparse values instance. - """ - try: - data = getattr(options, k) - except: - return defval - if k == "roles_path": - if os.pathsep in data: - data = data.split(os.pathsep)[0] - return data - -def exit_without_ignore(options, rc=1): - """ - Exits with the specified return code unless the - option --ignore-errors was specified - """ - - if not get_opt(options, "ignore_errors", False): - print '- you can use --ignore-errors to skip failed roles.' - sys.exit(rc) - - -#------------------------------------------------------------------------------------- -# Galaxy API functions -#------------------------------------------------------------------------------------- - -def api_get_config(api_server): - """ - Fetches the Galaxy API current version to ensure - the API server is up and reachable. - """ - - try: - url = 'https://%s/api/' % api_server - data = json.load(urllib2.urlopen(url)) - if not data.get("current_version",None): - return None - else: - return data - except: - return None - -def api_lookup_role_by_name(api_server, role_name, notify=True): - """ - Uses the Galaxy API to do a lookup on the role owner/name. - """ - - role_name = urllib.quote(role_name) - - try: - parts = role_name.split(".") - user_name = ".".join(parts[0:-1]) - role_name = parts[-1] - if notify: - print "- downloading role '%s', owned by %s" % (role_name, user_name) - except: - parser.print_help() - print "- invalid role name (%s). Specify role as format: username.rolename" % role_name - sys.exit(1) - - url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name) - try: - data = json.load(urllib2.urlopen(url)) - if len(data["results"]) == 0: - return None - else: - return data["results"][0] - except: - return None - -def api_fetch_role_related(api_server, related, role_id): - """ - Uses the Galaxy API to fetch the list of related items for - the given role. The url comes from the 'related' field of - the role. - """ - - try: - url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related) - data = json.load(urllib2.urlopen(url)) - results = data['results'] - done = (data.get('next', None) == None) - while not done: - url = 'https://%s%s' % (api_server, data['next']) - print url - data = json.load(urllib2.urlopen(url)) - results += data['results'] - done = (data.get('next', None) == None) - return results - except: - return None - -def api_get_list(api_server, what): - """ - Uses the Galaxy API to fetch the list of items specified. - """ - - try: - url = 'https://%s/api/v1/%s/?page_size' % (api_server, what) - data = json.load(urllib2.urlopen(url)) - if "results" in data: - results = data['results'] - else: - results = data - done = True - if "next" in data: - done = (data.get('next', None) == None) - while not done: - url = 'https://%s%s' % (api_server, data['next']) - print url - data = json.load(urllib2.urlopen(url)) - results += data['results'] - done = (data.get('next', None) == None) - return results - except: - print "- failed to download the %s list" % what - return None - -#------------------------------------------------------------------------------------- -# scm repo utility functions -#------------------------------------------------------------------------------------- - -def scm_archive_role(scm, role_url, role_version, role_name): - if scm not in ['hg', 'git']: - print "- scm %s is not currently supported" % scm - return False - tempdir = tempfile.mkdtemp() - clone_cmd = [scm, 'clone', role_url, role_name] - with open('/dev/null', 'w') as devnull: - try: - print "- executing: %s" % " ".join(clone_cmd) - popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull) - except: - raise AnsibleError("error executing: %s" % " ".join(clone_cmd)) - rc = popen.wait() - if rc != 0: - print "- command %s failed" % ' '.join(clone_cmd) - print " in directory %s" % tempdir - return False - - temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar') - if scm == 'hg': - archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name] - if role_version: - archive_cmd.extend(['-r', role_version]) - archive_cmd.append(temp_file.name) - if scm == 'git': - archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name] - if role_version: - archive_cmd.append(role_version) - else: - archive_cmd.append('HEAD') - - with open('/dev/null', 'w') as devnull: - print "- executing: %s" % " ".join(archive_cmd) - popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name), - stderr=devnull, stdout=devnull) - rc = popen.wait() - if rc != 0: - print "- command %s failed" % ' '.join(archive_cmd) - print " in directory %s" % tempdir - return False - - shutil.rmtree(tempdir, ignore_errors=True) - - return temp_file.name - - -#------------------------------------------------------------------------------------- -# Role utility functions -#------------------------------------------------------------------------------------- - -def get_role_path(role_name, options): - """ - Returns the role path based on the roles_path option - and the role name. - """ - roles_path = get_opt(options,'roles_path') - roles_path = os.path.join(roles_path, role_name) - roles_path = os.path.expanduser(roles_path) - return roles_path - -def get_role_metadata(role_name, options): - """ - Returns the metadata as YAML, if the file 'meta/main.yml' - exists in the specified role_path - """ - role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml') - try: - if os.path.isfile(role_path): - f = open(role_path, 'r') - meta_data = yaml.safe_load(f) - f.close() - return meta_data - else: - return None - except: - return None - -def get_galaxy_install_info(role_name, options): - """ - Returns the YAML data contained in 'meta/.galaxy_install_info', - if it exists. - """ - - try: - info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') - if os.path.isfile(info_path): - f = open(info_path, 'r') - info_data = yaml.safe_load(f) - f.close() - return info_data - else: - return None - except: - return None - -def write_galaxy_install_info(role_name, role_version, options): - """ - Writes a YAML-formatted file to the role's meta/ directory - (named .galaxy_install_info) which contains some information - we can use later for commands like 'list' and 'info'. - """ - - info = dict( - version = role_version, - install_date = datetime.datetime.utcnow().strftime("%c"), - ) - try: - info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info') - f = open(info_path, 'w+') - info_data = yaml.safe_dump(info, f) - f.close() - except: - return False - return True - - -def remove_role(role_name, options): - """ - Removes the specified role from the roles path. There is a - sanity check to make sure there's a meta/main.yml file at this - path so the user doesn't blow away random directories - """ - if get_role_metadata(role_name, options): - role_path = get_role_path(role_name, options) - shutil.rmtree(role_path) - return True - else: - return False - -def fetch_role(role_name, target, role_data, options): - """ - Downloads the archived role from github to a temp location, extracts - it, and then copies the extracted role to the role library path. - """ - - # first grab the file and save it to a temp location - if '://' in role_name: - archive_url = role_name - else: - archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target) - print "- downloading role from %s" % archive_url - - try: - url_file = urllib2.urlopen(archive_url) - temp_file = tempfile.NamedTemporaryFile(delete=False) - data = url_file.read() - while data: - temp_file.write(data) - data = url_file.read() - temp_file.close() - return temp_file.name - except Exception, e: - # TODO: better urllib2 error handling for error - # messages that are more exact - print "- error: failed to download the file." - return False - -def install_role(role_name, role_version, role_filename, options): - # the file is a tar, so open it that way and extract it - # to the specified (or default) roles directory - - if not tarfile.is_tarfile(role_filename): - print "- error: the file downloaded was not a tar.gz" - return False - else: - if role_filename.endswith('.gz'): - role_tar_file = tarfile.open(role_filename, "r:gz") - else: - role_tar_file = tarfile.open(role_filename, "r") - # verify the role's meta file - meta_file = None - members = role_tar_file.getmembers() - # next find the metadata file - for member in members: - if "/meta/main.yml" in member.name: - meta_file = member - break - if not meta_file: - print "- error: this role does not appear to have a meta/main.yml file." - return False - else: - try: - meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file)) - except: - print "- error: this role does not appear to have a valid meta/main.yml file." - return False - - # we strip off the top-level directory for all of the files contained within - # the tar file here, since the default is 'github_repo-target', and change it - # to the specified role's name - role_path = os.path.join(get_opt(options, 'roles_path'), role_name) - role_path = os.path.expanduser(role_path) - print "- extracting %s to %s" % (role_name, role_path) - try: - if os.path.exists(role_path): - if not os.path.isdir(role_path): - print "- error: the specified roles path exists and is not a directory." - return False - elif not get_opt(options, "force", False): - print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name - return False - else: - # using --force, remove the old path - if not remove_role(role_name, options): - print "- error: %s doesn't appear to contain a role." % role_path - print " please remove this directory manually if you really want to put the role here." - return False - else: - os.makedirs(role_path) - - # now we do the actual extraction to the role_path - for member in members: - # we only extract files, and remove any relative path - # bits that might be in the file for security purposes - # and drop the leading directory, as mentioned above - if member.isreg(): - parts = member.name.split("/")[1:] - final_parts = [] - for part in parts: - if part != '..' and '~' not in part and '$' not in part: - final_parts.append(part) - member.name = os.path.join(*final_parts) - role_tar_file.extract(member, role_path) - - # write out the install info file for later use - write_galaxy_install_info(role_name, role_version, options) - except OSError, e: - print "- error: you do not have permission to modify files in %s" % role_path - return False - - # return the parsed yaml metadata - print "- %s was installed successfully" % role_name - return meta_file_data - -#------------------------------------------------------------------------------------- -# Action functions -#------------------------------------------------------------------------------------- - -def execute_init(args, options, parser): - """ - Executes the init action, which creates the skeleton framework - of a role that complies with the galaxy metadata format. - """ - - init_path = get_opt(options, 'init_path', './') - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - force = get_opt(options, 'force', False) - offline = get_opt(options, 'offline', False) - - if not offline: - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - try: - role_name = args.pop(0).strip() - if role_name == "": - raise Exception("") - role_path = os.path.join(init_path, role_name) - if os.path.exists(role_path): - if os.path.isfile(role_path): - print "- the path %s already exists, but is a file - aborting" % role_path - sys.exit(1) - elif not force: - print "- the directory %s already exists." % role_path - print " you can use --force to re-initialize this directory,\n" + \ - " however it will reset any main.yml files that may have\n" + \ - " been modified there already." - sys.exit(1) - except Exception, e: - parser.print_help() - print "- no role name specified for init" - sys.exit(1) - - ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars') - - # create the default README.md - if not os.path.exists(role_path): - os.makedirs(role_path) - readme_path = os.path.join(role_path, "README.md") - f = open(readme_path, "wb") - f.write(default_readme_template) - f.close - - for dir in ROLE_DIRS: - dir_path = os.path.join(init_path, role_name, dir) - main_yml_path = os.path.join(dir_path, 'main.yml') - # create the directory if it doesn't exist already - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - # now create the main.yml file for that directory - if dir == "meta": - # create a skeleton meta/main.yml with a valid galaxy_info - # datastructure in place, plus with all of the available - # tags/platforms included (but commented out) and the - # dependencies section - platforms = [] - if not offline: - platforms = api_get_list(api_server, "platforms") or [] - categories = [] - if not offline: - categories = api_get_list(api_server, "categories") or [] - - # group the list of platforms from the api based - # on their names, with the release field being - # appended to a list of versions - platform_groups = defaultdict(list) - for platform in platforms: - platform_groups[platform['name']].append(platform['release']) - platform_groups[platform['name']].sort() - - inject = dict( - author = 'your name', - company = 'your company (optional)', - license = 'license (GPLv2, CC-BY, etc)', - issue_tracker_url = 'http://example.com/issue/tracker', - min_ansible_version = '1.2', - platforms = platform_groups, - categories = categories, - ) - rendered_meta = Environment().from_string(default_meta_template).render(inject) - f = open(main_yml_path, 'w') - f.write(rendered_meta) - f.close() - pass - elif dir not in ('files','templates'): - # just write a (mostly) empty YAML file for main.yml - f = open(main_yml_path, 'w') - f.write('---\n# %s file for %s\n' % (dir,role_name)) - f.close() - print "- %s was created successfully" % role_name - -def execute_info(args, options, parser): - """ - Executes the info action. This action prints out detailed - information about an installed role as well as info available - from the galaxy API. - """ - - if len(args) == 0: - # the user needs to specify a role - parser.print_help() - print "- you must specify a user/role name" - sys.exit(1) - - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - api_config = api_get_config(api_server) - roles_path = get_opt(options, "roles_path") - - for role in args: - - role_info = {} - - install_info = get_galaxy_install_info(role, options) - if install_info: - if 'version' in install_info: - install_info['intalled_version'] = install_info['version'] - del install_info['version'] - role_info.update(install_info) - - remote_data = api_lookup_role_by_name(api_server, role, False) - if remote_data: - role_info.update(remote_data) - - metadata = get_role_metadata(role, options) - if metadata: - role_info.update(metadata) - - role_spec = ansible.utils.role_spec_parse(role) - if role_spec: - role_info.update(role_spec) - - if role_info: - print "- %s:" % (role) - for k in sorted(role_info.keys()): - - if k in SKIP_INFO_KEYS: - continue - - if isinstance(role_info[k], dict): - print "\t%s: " % (k) - for key in sorted(role_info[k].keys()): - if key in SKIP_INFO_KEYS: - continue - print "\t\t%s: %s" % (key, role_info[k][key]) - else: - print "\t%s: %s" % (k, role_info[k]) - else: - print "- the role %s was not found" % role - -def execute_install(args, options, parser): - """ - Executes the installation action. The args list contains the - roles to be installed, unless -f was specified. The list of roles - can be a name (which will be downloaded via the galaxy API and github), - or it can be a local .tar.gz file. - """ - - role_file = get_opt(options, "role_file", None) - - if len(args) == 0 and role_file is None: - # the user needs to specify one of either --role-file - # or specify a single user/role name - parser.print_help() - print "- you must specify a user/role name or a roles file" - sys.exit() - elif len(args) == 1 and not role_file is None: - # using a role file is mutually exclusive of specifying - # the role name on the command line - parser.print_help() - print "- please specify a user/role name, or a roles file, but not both" - sys.exit(1) - - api_server = get_opt(options, "api_server", "galaxy.ansible.com") - no_deps = get_opt(options, "no_deps", False) - roles_path = get_opt(options, "roles_path") - - roles_done = [] - if role_file: - f = open(role_file, 'r') - if role_file.endswith('.yaml') or role_file.endswith('.yml'): - roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f)) - else: - # roles listed in a file, one per line - roles_left = map(ansible.utils.role_spec_parse, f.readlines()) - f.close() - else: - # roles were specified directly, so we'll just go out grab them - # (and their dependencies, unless the user doesn't want us to). - roles_left = map(ansible.utils.role_spec_parse, args) - - while len(roles_left) > 0: - # query the galaxy API for the role data - role_data = None - role = roles_left.pop(0) - role_src = role.get("src") - role_scm = role.get("scm") - role_path = role.get("path") - - if role_path: - options.roles_path = role_path - else: - options.roles_path = roles_path - - if os.path.isfile(role_src): - # installing a local tar.gz - tmp_file = role_src - else: - if role_scm: - # create tar file from scm url - tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name")) - elif '://' in role_src: - # just download a URL - version will probably be in the URL - tmp_file = fetch_role(role_src, None, None, options) - else: - # installing from galaxy - api_config = api_get_config(api_server) - if not api_config: - print "- the API server (%s) is not responding, please try again later." % api_server - sys.exit(1) - - role_data = api_lookup_role_by_name(api_server, role_src) - if not role_data: - print "- sorry, %s was not found on %s." % (role_src, api_server) - exit_without_ignore(options) - continue - - role_versions = api_fetch_role_related(api_server, 'versions', role_data['id']) - if "version" not in role or role['version'] == '': - # convert the version names to LooseVersion objects - # and sort them to get the latest version. If there - # are no versions in the list, we'll grab the head - # of the master branch - if len(role_versions) > 0: - loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] - loose_versions.sort() - role["version"] = str(loose_versions[-1]) - else: - role["version"] = 'master' - elif role['version'] != 'master': - if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]: - print 'role is %s' % role - print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions) - exit_without_ignore(options) - continue - - # download the role. if --no-deps was specified, we stop here, - # otherwise we recursively grab roles and all of their deps. - tmp_file = fetch_role(role_src, role["version"], role_data, options) - installed = False - if tmp_file: - installed = install_role(role.get("name"), role.get("version"), tmp_file, options) - # we're done with the temp file, clean it up - if tmp_file != role_src: - os.unlink(tmp_file) - # install dependencies, if we want them - if not no_deps and installed: - if not role_data: - role_data = get_role_metadata(role.get("name"), options) - role_dependencies = role_data['dependencies'] - else: - role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id']) - for dep in role_dependencies: - if isinstance(dep, basestring): - dep = ansible.utils.role_spec_parse(dep) - else: - dep = ansible.utils.role_yaml_parse(dep) - if not get_role_metadata(dep["name"], options): - if dep not in roles_left: - print '- adding dependency: %s' % dep["name"] - roles_left.append(dep) - else: - print '- dependency %s already pending installation.' % dep["name"] - else: - print '- dependency %s is already installed, skipping.' % dep["name"] - if not tmp_file or not installed: - print "- %s was NOT installed successfully." % role.get("name") - exit_without_ignore(options) - sys.exit(0) - -def execute_remove(args, options, parser): - """ - Executes the remove action. The args list contains the list - of roles to be removed. This list can contain more than one role. - """ - - if len(args) == 0: - parser.print_help() - print '- you must specify at least one role to remove.' - sys.exit() - - for role in args: - if get_role_metadata(role, options): - if remove_role(role, options): - print '- successfully removed %s' % role - else: - print "- failed to remove role: %s" % role - else: - print '- %s is not installed, skipping.' % role - sys.exit(0) - -def execute_list(args, options, parser): - """ - Executes the list action. The args list can contain zero - or one role. If one is specified, only that role will be - shown, otherwise all roles in the specified directory will - be shown. - """ - - if len(args) > 1: - print "- please specify only one role to list, or specify no roles to see a full list" - sys.exit(1) - - if len(args) == 1: - # show only the request role, if it exists - role_name = args[0] - metadata = get_role_metadata(role_name, options) - if metadata: - install_info = get_galaxy_install_info(role_name, options) - version = None - if install_info: - version = install_info.get("version", None) - if not version: - version = "(unknown version)" - # show some more info about single roles here - print "- %s, %s" % (role_name, version) - else: - print "- the role %s was not found" % role_name - else: - # show all valid roles in the roles_path directory - roles_path = get_opt(options, 'roles_path') - roles_path = os.path.expanduser(roles_path) - if not os.path.exists(roles_path): - parser.print_help() - print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path - sys.exit(1) - elif not os.path.isdir(roles_path): - print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path - parser.print_help() - sys.exit(1) - path_files = os.listdir(roles_path) - for path_file in path_files: - if get_role_metadata(path_file, options): - install_info = get_galaxy_install_info(path_file, options) - version = None - if install_info: - version = install_info.get("version", None) - if not version: - version = "(unknown version)" - print "- %s, %s" % (path_file, version) - sys.exit(0) - -#------------------------------------------------------------------------------------- -# The main entry point -#------------------------------------------------------------------------------------- - -def main(): - # parse the CLI options - action = get_action(sys.argv) - parser = build_option_parser(action) - (options, args) = parser.parse_args() - - # execute the desired action - if 1: #try: - fn = globals()["execute_%s" % action] - fn(args, options, parser) - #except KeyError, e: - # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS)) - # sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/bin/ansible-galaxy b/bin/ansible-galaxy new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-galaxy @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-playbook b/bin/ansible-playbook deleted file mode 100755 index f62c699d64d614..00000000000000 --- a/bin/ansible-playbook +++ /dev/null @@ -1,333 +0,0 @@ -#!/usr/bin/env python -# (C) 2012, Michael DeHaan, - -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -####################################################### - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - -import sys -import os -import stat - -# Augment PYTHONPATH to find Python modules relative to this file path -# This is so that we can find the modules when running from a local checkout -# installed as editable with `pip install -e ...` or `python setup.py develop` -local_module_path = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', 'lib') -) -sys.path.append(local_module_path) - -import ansible.playbook -import ansible.constants as C -import ansible.utils.template -from ansible import errors -from ansible import callbacks -from ansible import utils -from ansible.color import ANSIBLE_COLOR, stringc -from ansible.callbacks import display - -def colorize(lead, num, color): - """ Print 'lead' = 'num' in 'color' """ - if num != 0 and ANSIBLE_COLOR and color is not None: - return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color)) - else: - return "%s=%-4s" % (lead, str(num)) - -def hostcolor(host, stats, color=True): - if ANSIBLE_COLOR and color: - if stats['failures'] != 0 or stats['unreachable'] != 0: - return "%-37s" % stringc(host, 'red') - elif stats['changed'] != 0: - return "%-37s" % stringc(host, 'yellow') - else: - return "%-37s" % stringc(host, 'green') - return "%-26s" % host - - -def main(args): - ''' run ansible-playbook operations ''' - - # create parser for CLI options - parser = utils.base_parser( - constants=C, - usage = "%prog playbook.yml", - connect_opts=True, - runas_opts=True, - subset_opts=True, - check_opts=True, - diff_opts=True - ) - #parser.add_option('--vault-password', dest="vault_password", - # help="password for vault encrypted files") - parser.add_option('-t', '--tags', dest='tags', default='all', - help="only run plays and tasks tagged with these values") - parser.add_option('--skip-tags', dest='skip_tags', - help="only run plays and tasks whose tags do not match these values") - parser.add_option('--syntax-check', dest='syntax', action='store_true', - help="perform a syntax check on the playbook, but do not execute it") - parser.add_option('--list-tasks', dest='listtasks', action='store_true', - help="list all tasks that would be executed") - parser.add_option('--list-tags', dest='listtags', action='store_true', - help="list all available tags") - parser.add_option('--step', dest='step', action='store_true', - help="one-step-at-a-time: confirm each task before running") - parser.add_option('--start-at-task', dest='start_at', - help="start the playbook at the task matching this name") - parser.add_option('--force-handlers', dest='force_handlers', action='store_true', - help="run handlers even if a task fails") - parser.add_option('--flush-cache', dest='flush_cache', action='store_true', - help="clear the fact cache") - - options, args = parser.parse_args(args) - - if len(args) == 0: - parser.print_help(file=sys.stderr) - return 1 - - # su and sudo command line arguments need to be mutually exclusive - if (options.su or options.su_user or options.ask_su_pass) and \ - (options.sudo or options.sudo_user or options.ask_sudo_pass): - parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') " - "and su arguments ('-su', '--su-user', and '--ask-su-pass') are " - "mutually exclusive") - - if (options.ask_vault_pass and options.vault_password_file): - parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive") - - sshpass = None - sudopass = None - su_pass = None - vault_pass = None - - options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS - - if options.listhosts or options.syntax or options.listtasks or options.listtags: - (_, _, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass) - else: - options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS - # Never ask for an SSH password when we run with local connection - if options.connection == "local": - options.ask_pass = False - options.ask_sudo_pass = options.ask_sudo_pass or C.DEFAULT_ASK_SUDO_PASS - options.ask_su_pass = options.ask_su_pass or C.DEFAULT_ASK_SU_PASS - (sshpass, sudopass, su_pass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, ask_sudo_pass=options.ask_sudo_pass, ask_su_pass=options.ask_su_pass, ask_vault_pass=options.ask_vault_pass) - options.sudo_user = options.sudo_user or C.DEFAULT_SUDO_USER - options.su_user = options.su_user or C.DEFAULT_SU_USER - - # read vault_pass from a file - if not options.ask_vault_pass and options.vault_password_file: - vault_pass = utils.read_vault_file(options.vault_password_file) - - extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass) - - only_tags = options.tags.split(",") - skip_tags = options.skip_tags - if options.skip_tags is not None: - skip_tags = options.skip_tags.split(",") - - for playbook in args: - if not os.path.exists(playbook): - raise errors.AnsibleError("the playbook: %s could not be found" % playbook) - if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)): - raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook) - - inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass) - - # Note: slightly wrong, this is written so that implicit localhost - # (which is not returned in list_hosts()) is taken into account for - # warning if inventory is empty. But it can't be taken into account for - # checking if limit doesn't match any hosts. Instead we don't worry about - # limit if only implicit localhost was in inventory to start with. - # - # Fix this in v2 - no_hosts = False - if len(inventory.list_hosts()) == 0: - # Empty inventory - utils.warning("provided hosts list is empty, only localhost is available") - no_hosts = True - inventory.subset(options.subset) - if len(inventory.list_hosts()) == 0 and no_hosts is False: - # Invalid limit - raise errors.AnsibleError("Specified --limit does not match any hosts") - - # run all playbooks specified on the command line - for playbook in args: - - stats = callbacks.AggregateStats() - playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY) - if options.step: - playbook_cb.step = options.step - if options.start_at: - playbook_cb.start_at = options.start_at - runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY) - - pb = ansible.playbook.PlayBook( - playbook=playbook, - module_path=options.module_path, - inventory=inventory, - forks=options.forks, - remote_user=options.remote_user, - remote_pass=sshpass, - callbacks=playbook_cb, - runner_callbacks=runner_cb, - stats=stats, - timeout=options.timeout, - transport=options.connection, - sudo=options.sudo, - sudo_user=options.sudo_user, - sudo_pass=sudopass, - extra_vars=extra_vars, - private_key_file=options.private_key_file, - only_tags=only_tags, - skip_tags=skip_tags, - check=options.check, - diff=options.diff, - su=options.su, - su_pass=su_pass, - su_user=options.su_user, - vault_password=vault_pass, - force_handlers=options.force_handlers - ) - - if options.flush_cache: - display(callbacks.banner("FLUSHING FACT CACHE")) - pb.SETUP_CACHE.flush() - - if options.listhosts or options.listtasks or options.syntax or options.listtags: - print '' - print 'playbook: %s' % playbook - print '' - playnum = 0 - for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs): - playnum += 1 - play = ansible.playbook.Play(pb, play_ds, play_basedir, - vault_password=pb.vault_password) - label = play.name - hosts = pb.inventory.list_hosts(play.hosts) - - if options.listhosts: - print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts)) - for host in hosts: - print ' %s' % host - - if options.listtags or options.listtasks: - print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags)))) - - if options.listtags: - tags = [] - for task in pb.tasks_to_run_in_play(play): - tags.extend(task.tags) - print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged'])))) - - if options.listtasks: - - for task in pb.tasks_to_run_in_play(play): - if getattr(task, 'name', None) is not None: - # meta tasks have no names - print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged'])))) - - if options.listhosts or options.listtasks or options.listtags: - print '' - continue - - if options.syntax: - # if we've not exited by now then we are fine. - print 'Playbook Syntax is fine' - return 0 - - failed_hosts = [] - unreachable_hosts = [] - - try: - - pb.run() - - hosts = sorted(pb.stats.processed.keys()) - display(callbacks.banner("PLAY RECAP")) - playbook_cb.on_stats(pb.stats) - - for h in hosts: - t = pb.stats.summarize(h) - if t['failures'] > 0: - failed_hosts.append(h) - if t['unreachable'] > 0: - unreachable_hosts.append(h) - - retries = failed_hosts + unreachable_hosts - - if C.RETRY_FILES_ENABLED and len(retries) > 0: - filename = pb.generate_retry_inventory(retries) - if filename: - display(" to retry, use: --limit @%s\n" % filename) - - for h in hosts: - t = pb.stats.summarize(h) - - display("%s : %s %s %s %s" % ( - hostcolor(h, t), - colorize('ok', t['ok'], 'green'), - colorize('changed', t['changed'], 'yellow'), - colorize('unreachable', t['unreachable'], 'red'), - colorize('failed', t['failures'], 'red')), - screen_only=True - ) - - display("%s : %s %s %s %s" % ( - hostcolor(h, t, False), - colorize('ok', t['ok'], None), - colorize('changed', t['changed'], None), - colorize('unreachable', t['unreachable'], None), - colorize('failed', t['failures'], None)), - log_only=True - ) - - - print "" - if len(failed_hosts) > 0: - return 2 - if len(unreachable_hosts) > 0: - return 3 - - except errors.AnsibleError, e: - display("ERROR: %s" % e, color='red') - return 1 - - return 0 - - -if __name__ == "__main__": - display(" ", log_only=True) - display(" ".join(sys.argv), log_only=True) - display(" ", log_only=True) - try: - sys.exit(main(sys.argv[1:])) - except errors.AnsibleError, e: - display("ERROR: %s" % e, color='red', stderr=True) - sys.exit(1) - except KeyboardInterrupt, ke: - display("ERROR: interrupted", color='red', stderr=True) - sys.exit(1) diff --git a/bin/ansible-playbook b/bin/ansible-playbook new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-playbook @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-pull b/bin/ansible-pull deleted file mode 100755 index a9a0897fbff821..00000000000000 --- a/bin/ansible-pull +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env python - -# (c) 2012, Stephen Fromm -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# ansible-pull is a script that runs ansible in local mode -# after checking out a playbooks directory from source repo. There is an -# example playbook to bootstrap this script in the examples/ dir which -# installs ansible and sets it up to run on cron. - -# usage: -# ansible-pull -d /var/lib/ansible \ -# -U http://example.net/content.git [-C production] \ -# [path/playbook.yml] -# -# the -d and -U arguments are required; the -C argument is optional. -# -# ansible-pull accepts an optional argument to specify a playbook -# location underneath the workdir and then searches the source repo -# for playbooks in the following order, stopping at the first match: -# -# 1. $workdir/path/playbook.yml, if specified -# 2. $workdir/$fqdn.yml -# 3. $workdir/$hostname.yml -# 4. $workdir/local.yml -# -# the source repo must contain at least one of these playbooks. - -import os -import shutil -import sys -import datetime -import socket -import random -import time -from ansible import utils -from ansible.utils import cmd_functions -from ansible import errors -from ansible import inventory - -DEFAULT_REPO_TYPE = 'git' -DEFAULT_PLAYBOOK = 'local.yml' -PLAYBOOK_ERRORS = {1: 'File does not exist', - 2: 'File is not readable'} - -VERBOSITY=0 - -def increment_debug(option, opt, value, parser): - global VERBOSITY - VERBOSITY += 1 - -def try_playbook(path): - if not os.path.exists(path): - return 1 - if not os.access(path, os.R_OK): - return 2 - return 0 - - -def select_playbook(path, args): - playbook = None - if len(args) > 0 and args[0] is not None: - playbook = "%s/%s" % (path, args[0]) - rc = try_playbook(playbook) - if rc != 0: - print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc]) - return None - return playbook - else: - fqdn = socket.getfqdn() - hostpb = "%s/%s.yml" % (path, fqdn) - shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0]) - localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK) - errors = [] - for pb in [hostpb, shorthostpb, localpb]: - rc = try_playbook(pb) - if rc == 0: - playbook = pb - break - else: - errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc])) - if playbook is None: - print >>sys.stderr, "\n".join(errors) - return playbook - - -def main(args): - """ Set up and run a local playbook """ - usage = "%prog [options] [playbook.yml]" - parser = utils.SortedOptParser(usage=usage) - parser.add_option('--purge', default=False, action='store_true', - help='purge checkout after playbook run') - parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true', - help='only run the playbook if the repository has been updated') - parser.add_option('-s', '--sleep', dest='sleep', default=None, - help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests') - parser.add_option('-f', '--force', dest='force', default=False, - action='store_true', - help='run the playbook even if the repository could ' - 'not be updated') - parser.add_option('-d', '--directory', dest='dest', default=None, - help='directory to checkout repository to') - #parser.add_option('-l', '--live', default=True, action='store_live', - # help='Print the ansible-playbook output while running') - parser.add_option('-U', '--url', dest='url', default=None, - help='URL of the playbook repository') - parser.add_option('-C', '--checkout', dest='checkout', - help='branch/tag/commit to checkout. ' - 'Defaults to behavior of repository module.') - parser.add_option('-i', '--inventory-file', dest='inventory', - help="location of the inventory host file") - parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append", - help="set additional variables as key=value or YAML/JSON", default=[]) - parser.add_option('-v', '--verbose', default=False, action="callback", - callback=increment_debug, - help='Pass -vvvv to ansible-playbook') - parser.add_option('-m', '--module-name', dest='module_name', - default=DEFAULT_REPO_TYPE, - help='Module name used to check out repository. ' - 'Default is %s.' % DEFAULT_REPO_TYPE) - parser.add_option('--vault-password-file', dest='vault_password_file', - help="vault password file") - parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true', - help='ask for sudo password') - parser.add_option('-t', '--tags', dest='tags', default=False, - help='only run plays and tasks tagged with these values') - parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true', - help='adds the hostkey for the repo url if not already added') - parser.add_option('--key-file', dest='key_file', - help="Pass '-i ' to the SSH arguments used by git.") - options, args = parser.parse_args(args) - - hostname = socket.getfqdn() - if not options.dest: - # use a hostname dependent directory, in case of $HOME on nfs - options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname) - - options.dest = os.path.abspath(options.dest) - - if not options.url: - parser.error("URL for repository not specified, use -h for help") - return 1 - - now = datetime.datetime.now() - print now.strftime("Starting ansible-pull at %F %T") - - # Attempt to use the inventory passed in as an argument - # It might not yet have been downloaded so use localhost if note - if not options.inventory or not os.path.exists(options.inventory): - inv_opts = 'localhost,' - else: - inv_opts = options.inventory - limit_opts = 'localhost:%s:127.0.0.1' % hostname - repo_opts = "name=%s dest=%s" % (options.url, options.dest) - - if VERBOSITY == 0: - base_opts = '-c local --limit "%s"' % limit_opts - elif VERBOSITY > 0: - debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ]) - base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts) - - if options.checkout: - repo_opts += ' version=%s' % options.checkout - - # Only git module is supported - if options.module_name == DEFAULT_REPO_TYPE: - if options.accept_host_key: - repo_opts += ' accept_hostkey=yes' - - if options.key_file: - repo_opts += ' key_file=%s' % options.key_file - - path = utils.plugins.module_finder.find_plugin(options.module_name) - if path is None: - sys.stderr.write("module '%s' not found.\n" % options.module_name) - return 1 - cmd = 'ansible localhost -i "%s" %s -m %s -a "%s"' % ( - inv_opts, base_opts, options.module_name, repo_opts - ) - for ev in options.extra_vars: - cmd += ' -e "%s"' % ev - - if options.sleep: - try: - secs = random.randint(0,int(options.sleep)); - except ValueError: - parser.error("%s is not a number." % options.sleep) - return 1 - - print >>sys.stderr, "Sleeping for %d seconds..." % secs - time.sleep(secs); - - - # RUN THe CHECKOUT COMMAND - rc, out, err = cmd_functions.run_cmd(cmd, live=True) - - if rc != 0: - if options.force: - print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook." - else: - return rc - elif options.ifchanged and '"changed": true' not in out: - print "Repository has not changed, quitting." - return 0 - - playbook = select_playbook(options.dest, args) - - if playbook is None: - print >>sys.stderr, "Could not find a playbook to run." - return 1 - - cmd = 'ansible-playbook %s %s' % (base_opts, playbook) - if options.vault_password_file: - cmd += " --vault-password-file=%s" % options.vault_password_file - if options.inventory: - cmd += ' -i "%s"' % options.inventory - for ev in options.extra_vars: - cmd += ' -e "%s"' % ev - if options.ask_sudo_pass: - cmd += ' -K' - if options.tags: - cmd += ' -t "%s"' % options.tags - os.chdir(options.dest) - - # RUN THE PLAYBOOK COMMAND - rc, out, err = cmd_functions.run_cmd(cmd, live=True) - - if options.purge: - os.chdir('/') - try: - shutil.rmtree(options.dest) - except Exception, e: - print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e)) - - return rc - -if __name__ == '__main__': - try: - sys.exit(main(sys.argv[1:])) - except KeyboardInterrupt, e: - print >>sys.stderr, "Exit on user request.\n" - sys.exit(1) diff --git a/bin/ansible-pull b/bin/ansible-pull new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-pull @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/bin/ansible-vault b/bin/ansible-vault deleted file mode 100755 index 22cfc0e14877af..00000000000000 --- a/bin/ansible-vault +++ /dev/null @@ -1,241 +0,0 @@ -#!/usr/bin/env python - -# (c) 2014, James Tanner -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -# -# ansible-vault is a script that encrypts/decrypts YAML files. See -# http://docs.ansible.com/playbooks_vault.html for more details. - -__requires__ = ['ansible'] -try: - import pkg_resources -except Exception: - # Use pkg_resources to find the correct versions of libraries and set - # sys.path appropriately when there are multiversion installs. But we - # have code that better expresses the errors in the places where the code - # is actually used (the deps are optional for many code paths) so we don't - # want to fail here. - pass - -import os -import sys -import traceback - -import ansible.constants as C - -from ansible import utils -from ansible import errors -from ansible.utils.vault import VaultEditor - -from optparse import OptionParser - -#------------------------------------------------------------------------------------- -# Utility functions for parsing actions/options -#------------------------------------------------------------------------------------- - -VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view") - -def build_option_parser(action): - """ - Builds an option parser object based on the action - the user wants to execute. - """ - - usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS) - epilog = "\nSee '%s --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]) - OptionParser.format_epilog = lambda self, formatter: self.epilog - parser = OptionParser(usage=usage, epilog=epilog) - - if not action: - parser.print_help() - sys.exit() - - # options for all actions - #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use") - parser.add_option('--debug', dest='debug', action="store_true", help="debug") - parser.add_option('--vault-password-file', dest='password_file', - help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE) - - # options specific to actions - if action == "create": - parser.set_usage("usage: %prog create [options] file_name") - elif action == "decrypt": - parser.set_usage("usage: %prog decrypt [options] file_name") - elif action == "edit": - parser.set_usage("usage: %prog edit [options] file_name") - elif action == "view": - parser.set_usage("usage: %prog view [options] file_name") - elif action == "encrypt": - parser.set_usage("usage: %prog encrypt [options] file_name") - elif action == "rekey": - parser.set_usage("usage: %prog rekey [options] file_name") - - # done, return the parser - return parser - -def get_action(args): - """ - Get the action the user wants to execute from the - sys argv list. - """ - for i in range(0,len(args)): - arg = args[i] - if arg in VALID_ACTIONS: - del args[i] - return arg - return None - -def get_opt(options, k, defval=""): - """ - Returns an option from an Optparse values instance. - """ - try: - data = getattr(options, k) - except: - return defval - if k == "roles_path": - if os.pathsep in data: - data = data.split(os.pathsep)[0] - return data - -#------------------------------------------------------------------------------------- -# Command functions -#------------------------------------------------------------------------------------- - -def execute_create(args, options, parser): - if len(args) > 1: - raise errors.AnsibleError("'create' does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - this_editor = VaultEditor(cipher, password, args[0]) - this_editor.create_file() - -def execute_decrypt(args, options, parser): - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.decrypt_file() - - print "Decryption successful" - -def execute_edit(args, options, parser): - - if len(args) > 1: - raise errors.AnsibleError("edit does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.edit_file() - -def execute_view(args, options, parser): - - if len(args) > 1: - raise errors.AnsibleError("view does not accept more than one filename") - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = None - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.view_file() - -def execute_encrypt(args, options, parser): - - if not options.password_file: - password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True) - else: - password = utils.read_vault_file(options.password_file) - - cipher = 'AES256' - if hasattr(options, 'cipher'): - cipher = options.cipher - - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.encrypt_file() - - print "Encryption successful" - -def execute_rekey(args, options, parser): - - if not options.password_file: - password, __ = utils.ask_vault_passwords(ask_vault_pass=True) - else: - password = utils.read_vault_file(options.password_file) - - __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True) - - cipher = None - for f in args: - this_editor = VaultEditor(cipher, password, f) - this_editor.rekey_file(new_password) - - print "Rekey successful" - -#------------------------------------------------------------------------------------- -# MAIN -#------------------------------------------------------------------------------------- - -def main(): - - action = get_action(sys.argv) - parser = build_option_parser(action) - (options, args) = parser.parse_args() - - if not len(args): - raise errors.AnsibleError( - "The '%s' command requires a filename as the first argument" % action - ) - - # execute the desired action - try: - fn = globals()["execute_%s" % action] - fn(args, options, parser) - except Exception, err: - if options.debug: - print traceback.format_exc() - print "ERROR:",err - sys.exit(1) - -if __name__ == "__main__": - main() diff --git a/bin/ansible-vault b/bin/ansible-vault new file mode 120000 index 00000000000000..cabb1f519aad06 --- /dev/null +++ b/bin/ansible-vault @@ -0,0 +1 @@ +ansible \ No newline at end of file diff --git a/contrib/README.md b/contrib/README.md new file mode 100644 index 00000000000000..dab0da4ba72b31 --- /dev/null +++ b/contrib/README.md @@ -0,0 +1,17 @@ +inventory +========= + +Inventory scripts allow you to store your hosts, groups, and variables in any way +you like. Examples include discovering inventory from EC2 or pulling it from +Cobbler. These could also be used to interface with LDAP or database. + +chmod +x an inventory plugin and either name it /etc/ansible/hosts or use ansible +with -i to designate the path to the script. You might also need to copy a configuration +file with the same name and/or set environment variables, the scripts or configuration +files have more details. + +contributions welcome +===================== + +Send in pull requests to add plugins of your own. The sky is the limit! + diff --git a/plugins/inventory/abiquo.ini b/contrib/inventory/abiquo.ini similarity index 100% rename from plugins/inventory/abiquo.ini rename to contrib/inventory/abiquo.ini diff --git a/plugins/inventory/abiquo.py b/contrib/inventory/abiquo.py similarity index 92% rename from plugins/inventory/abiquo.py rename to contrib/inventory/abiquo.py index a6030c58b8e848..cd068e482b0a54 100755 --- a/plugins/inventory/abiquo.py +++ b/contrib/inventory/abiquo.py @@ -45,26 +45,24 @@ import sys import time import ConfigParser -import urllib2 -import base64 try: import json except ImportError: import simplejson as json +from ansible.module_utils.urls import open_url + def api_get(link, config): try: if link == None: - request = urllib2.Request(config.get('api','uri')+config.get('api','login_path')) - request.add_header("Accept",config.get('api','login_type')) + url = config.get('api','uri') + config.get('api','login_path') + headers = {"Accept": config.get('api','login_type')} else: - request = urllib2.Request(link['href']+'?limit=0') - request.add_header("Accept",link['type']) - # Auth - base64string = base64.encodestring('%s:%s' % (config.get('auth','apiuser'),config.get('auth','apipass'))).replace('\n', '') - request.add_header("Authorization", "Basic %s" % base64string) - result = urllib2.urlopen(request) + url = link['href'] + '?limit=0' + headers = {"Accept": link['type']} + result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''), + url_password=config.get('auth','apipass').replace('\n', '')) return json.loads(result.read()) except: return None @@ -76,7 +74,7 @@ def save_cache(data, config): cache = open('/'.join([dpath,'inventory']), 'w') cache.write(json.dumps(data)) cache.close() - except IOError, e: + except IOError as e: pass # not really sure what to do here @@ -88,7 +86,7 @@ def get_cache(cache_item, config): cache = open('/'.join([dpath,'inventory']), 'r') inv = cache.read() cache.close() - except IOError, e: + except IOError as e: pass # not really sure what to do here return inv @@ -172,7 +170,7 @@ def generate_inv_from_api(enterprise_entity,config): else: vm_metadata = metadata['metadata']['metadata'] inventory['_meta']['hostvars'][vm_nic] = vm_metadata - except Exception, e: + except Exception as e: pass inventory[vm_vapp]['children'].append(vmcollection['name']) @@ -183,7 +181,7 @@ def generate_inv_from_api(enterprise_entity,config): inventory[vmcollection['name']].append(vm_nic) return inventory - except Exception, e: + except Exception as e: # Return empty hosts output return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } } @@ -214,7 +212,7 @@ def get_inventory(enterprise, config): try: login = api_get(None,config) enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise')) - except Exception, e: + except Exception as e: enterprise = None if cache_available(config): diff --git a/plugins/inventory/apache-libcloud.py b/contrib/inventory/apache-libcloud.py similarity index 93% rename from plugins/inventory/apache-libcloud.py rename to contrib/inventory/apache-libcloud.py index 95804095da90d0..f7d64c257c24ef 100755 --- a/plugins/inventory/apache-libcloud.py +++ b/contrib/inventory/apache-libcloud.py @@ -37,6 +37,7 @@ from time import time import ConfigParser +from six import iteritems, string_types from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver import libcloud.security as sec @@ -79,7 +80,7 @@ def __init__(self): else: data_to_print = self.json_format_dict(self.inventory, True) - print data_to_print + print(data_to_print) def is_cache_valid(self): @@ -222,12 +223,17 @@ def add_node(self, node): self.push(self.inventory, self.to_safe('type_' + node.instance_type), dest) ''' # Inventory: Group by key pair - if node.extra['keyname']: - self.push(self.inventory, self.to_safe('key_' + node.extra['keyname']), dest) + if node.extra['key_name']: + self.push(self.inventory, self.to_safe('key_' + node.extra['key_name']), dest) # Inventory: Group by security group, quick thing to handle single sg - if node.extra['securitygroup']: - self.push(self.inventory, self.to_safe('sg_' + node.extra['securitygroup'][0]), dest) + if node.extra['security_group']: + self.push(self.inventory, self.to_safe('sg_' + node.extra['security_group'][0]), dest) + + # Inventory: Group by tag + if node.extra['tags']: + for tagkey in node.extra['tags'].keys(): + self.push(self.inventory, self.to_safe('tag_' + tagkey + '_' + node.extra['tags'][tagkey]), dest) def get_host_info(self): ''' @@ -254,16 +260,16 @@ def get_host_info(self): key = self.to_safe('ec2_' + key) # Handle complex types - if type(value) in [int, bool]: + if isinstance(value, (int, bool)): instance_vars[key] = value - elif type(value) in [str, unicode]: + elif isinstance(value, string_types): instance_vars[key] = value.strip() - elif type(value) == type(None): + elif value is None: instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2_tags': - for k, v in value.iteritems(): + for k, v in iteritems(value): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': @@ -277,9 +283,9 @@ def get_host_info(self): else: pass # TODO Product codes if someone finds them useful - #print key - #print type(value) - #print value + #print(key) + #print(type(value)) + #print(value) return self.json_format_dict(instance_vars, True) diff --git a/contrib/inventory/cloudstack.ini b/contrib/inventory/cloudstack.ini new file mode 100644 index 00000000000000..43777b593fb4a6 --- /dev/null +++ b/contrib/inventory/cloudstack.ini @@ -0,0 +1,5 @@ +[cloudstack] +#endpoint = https://api.exoscale.ch/compute +endpoint = https://cloud.example.com/client/api +key = cloudstack api key +secret = cloudstack api secret diff --git a/contrib/inventory/cloudstack.py b/contrib/inventory/cloudstack.py new file mode 100755 index 00000000000000..5911f662c94c33 --- /dev/null +++ b/contrib/inventory/cloudstack.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# (c) 2015, René Moser +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +""" +Ansible CloudStack external inventory script. +============================================= + +Generates Ansible inventory from CloudStack. Configuration is read from +'cloudstack.ini'. If you need to pass the project, write a simple wrapper +script, e.g. project_cloudstack.sh: + + #!/bin/bash + cloudstack.py --project $@ + + +When run against a specific host, this script returns the following attributes +based on the data obtained from CloudStack API: + + "web01": { + "cpu_number": 2, + "nic": [ + { + "ip": "10.102.76.98", + "mac": "02:00:50:99:00:01", + "type": "Isolated", + "netmask": "255.255.255.0", + "gateway": "10.102.76.1" + }, + { + "ip": "10.102.138.63", + "mac": "06:b7:5a:00:14:84", + "type": "Shared", + "netmask": "255.255.255.0", + "gateway": "10.102.138.1" + } + ], + "default_ip": "10.102.76.98", + "zone": "ZUERICH", + "created": "2014-07-02T07:53:50+0200", + "hypervisor": "VMware", + "memory": 2048, + "state": "Running", + "tags": [], + "cpu_speed": 1800, + "affinity_group": [], + "service_offering": "Small", + "cpu_used": "62%" + } + + +usage: cloudstack.py [--list] [--host HOST] [--project PROJECT] +""" + +from __future__ import print_function + +import os +import sys +import argparse + +try: + import json +except: + import simplejson as json + + +try: + from cs import CloudStack, CloudStackException, read_config +except ImportError: + print("Error: CloudStack library must be installed: pip install cs.", + file=sys.stderr) + sys.exit(1) + + +class CloudStackInventory(object): + def __init__(self): + + parser = argparse.ArgumentParser() + parser.add_argument('--host') + parser.add_argument('--list', action='store_true') + parser.add_argument('--project') + + options = parser.parse_args() + try: + self.cs = CloudStack(**read_config()) + except CloudStackException as e: + print("Error: Could not connect to CloudStack API", file=sys.stderr) + + project_id = '' + if options.project: + project_id = self.get_project_id(options.project) + + if options.host: + data = self.get_host(options.host) + print(json.dumps(data, indent=2)) + + elif options.list: + data = self.get_list() + print(json.dumps(data, indent=2)) + else: + print("usage: --list | --host [--project ]", + file=sys.stderr) + sys.exit(1) + + + def get_project_id(self, project): + projects = self.cs.listProjects() + if projects: + for p in projects['project']: + if p['name'] == project or p['id'] == project: + return p['id'] + print("Error: Project %s not found." % project, file=sys.stderr) + sys.exit(1) + + + def get_host(self, name, project_id=''): + hosts = self.cs.listVirtualMachines(projectid=project_id) + data = {} + if not hosts: + return data + for host in hosts['virtualmachine']: + host_name = host['displayname'] + if name == host_name: + data['zone'] = host['zonename'] + if 'group' in host: + data['group'] = host['group'] + data['state'] = host['state'] + data['service_offering'] = host['serviceofferingname'] + data['affinity_group'] = host['affinitygroup'] + data['security_group'] = host['securitygroup'] + data['cpu_number'] = host['cpunumber'] + data['cpu_speed'] = host['cpuspeed'] + if 'cpuused' in host: + data['cpu_used'] = host['cpuused'] + data['memory'] = host['memory'] + data['tags'] = host['tags'] + data['hypervisor'] = host['hypervisor'] + data['created'] = host['created'] + data['nic'] = [] + for nic in host['nic']: + data['nic'].append({ + 'ip': nic['ipaddress'], + 'mac': nic['macaddress'], + 'netmask': nic['netmask'], + 'gateway': nic['gateway'], + 'type': nic['type'], + }) + if nic['isdefault']: + data['default_ip'] = nic['ipaddress'] + break; + return data + + + def get_list(self, project_id=''): + data = { + 'all': { + 'hosts': [], + }, + '_meta': { + 'hostvars': {}, + }, + } + + groups = self.cs.listInstanceGroups(projectid=project_id) + if groups: + for group in groups['instancegroup']: + group_name = group['name'] + if group_name and not group_name in data: + data[group_name] = { + 'hosts': [] + } + + hosts = self.cs.listVirtualMachines(projectid=project_id) + if not hosts: + return data + for host in hosts['virtualmachine']: + host_name = host['displayname'] + data['all']['hosts'].append(host_name) + data['_meta']['hostvars'][host_name] = {} + data['_meta']['hostvars'][host_name]['zone'] = host['zonename'] + if 'group' in host: + data['_meta']['hostvars'][host_name]['group'] = host['group'] + data['_meta']['hostvars'][host_name]['state'] = host['state'] + data['_meta']['hostvars'][host_name]['service_offering'] = host['serviceofferingname'] + data['_meta']['hostvars'][host_name]['affinity_group'] = host['affinitygroup'] + data['_meta']['hostvars'][host_name]['security_group'] = host['securitygroup'] + data['_meta']['hostvars'][host_name]['cpu_number'] = host['cpunumber'] + data['_meta']['hostvars'][host_name]['cpu_speed'] = host['cpuspeed'] + if 'cpuused' in host: + data['_meta']['hostvars'][host_name]['cpu_used'] = host['cpuused'] + data['_meta']['hostvars'][host_name]['created'] = host['created'] + data['_meta']['hostvars'][host_name]['memory'] = host['memory'] + data['_meta']['hostvars'][host_name]['tags'] = host['tags'] + data['_meta']['hostvars'][host_name]['hypervisor'] = host['hypervisor'] + data['_meta']['hostvars'][host_name]['created'] = host['created'] + data['_meta']['hostvars'][host_name]['nic'] = [] + for nic in host['nic']: + data['_meta']['hostvars'][host_name]['nic'].append({ + 'ip': nic['ipaddress'], + 'mac': nic['macaddress'], + 'netmask': nic['netmask'], + 'gateway': nic['gateway'], + 'type': nic['type'], + }) + if nic['isdefault']: + data['_meta']['hostvars'][host_name]['default_ip'] = nic['ipaddress'] + + group_name = '' + if 'group' in host: + group_name = host['group'] + + if group_name and group_name in data: + data[group_name]['hosts'].append(host_name) + return data + + +if __name__ == '__main__': + CloudStackInventory() diff --git a/plugins/inventory/cobbler.ini b/contrib/inventory/cobbler.ini similarity index 100% rename from plugins/inventory/cobbler.ini rename to contrib/inventory/cobbler.ini diff --git a/plugins/inventory/cobbler.py b/contrib/inventory/cobbler.py similarity index 85% rename from plugins/inventory/cobbler.py rename to contrib/inventory/cobbler.py index f352c8cf9d2054..b5fcdeacbbe5b1 100755 --- a/plugins/inventory/cobbler.py +++ b/contrib/inventory/cobbler.py @@ -30,9 +30,15 @@ Tested with Cobbler 2.0.11. Changelog: + - 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in + higher performance at ansible startup. Groups are determined by owner rather than + default mgmt_classes. DNS name determined from hostname. cobbler values are written + to a 'cobbler' fact namespace + - 2013-09-01 pgehres: Refactored implementation to make use of caching and to limit the number of connections to external cobbler server for performance. Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0 + """ # (c) 2012, Michael DeHaan @@ -54,7 +60,6 @@ ###################################################################### - import argparse import ConfigParser import os @@ -67,14 +72,19 @@ except ImportError: import simplejson as json +from six import iteritems + # NOTE -- this file assumes Ansible is being accessed FROM the cobbler # server, so it does not attempt to login with a username and password. # this will be addressed in a future version of this script. +orderby_keyname = 'owners' # alternatively 'mgmt_classes' + class CobblerInventory(object): def __init__(self): + """ Main execution path """ self.conn = None @@ -98,16 +108,14 @@ def __init__(self): # Data to print if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - data_to_print = self.json_format_dict(self.inventory, True) - - else: # default action with no options - data_to_print = self.json_format_dict(self.inventory, True) + data_to_print += self.get_host_info() + else: + self.inventory['_meta'] = { 'hostvars': {} } + for hostname in self.cache: + self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname] } + data_to_print += self.json_format_dict(self.inventory, True) - print data_to_print + print(data_to_print) def _connect(self): if not self.conn: @@ -160,21 +168,23 @@ def update_cache(self): for host in data: # Get the FQDN for the host and add it to the right groups - dns_name = None + dns_name = host['hostname'] #None ksmeta = None interfaces = host['interfaces'] - for (iname, ivalue) in interfaces.iteritems(): - if ivalue['management']: - this_dns_name = ivalue.get('dns_name', None) - if this_dns_name is not None and this_dns_name is not "": - dns_name = this_dns_name - - if dns_name is None: + # hostname is often empty for non-static IP hosts + if dns_name == '': + for (iname, ivalue) in iteritems(interfaces): + if ivalue['management'] or not ivalue['static']: + this_dns_name = ivalue.get('dns_name', None) + if this_dns_name is not None and this_dns_name is not "": + dns_name = this_dns_name + + if dns_name == '': continue status = host['status'] profile = host['profile'] - classes = host['mgmt_classes'] + classes = host[orderby_keyname] if status not in self.inventory: self.inventory[status] = [] @@ -193,9 +203,9 @@ def update_cache(self): # The old way was ksmeta only -- provide backwards compatibility - self.cache[dns_name] = dict() + self.cache[dns_name] = host if "ks_meta" in host: - for key, value in host["ks_meta"].iteritems(): + for key, value in iteritems(host["ks_meta"]): self.cache[dns_name][key] = value self.write_to_cache(self.cache, self.cache_path_cache) @@ -242,7 +252,6 @@ def load_cache_from_cache(self): def write_to_cache(self, data, filename): """ Writes data in JSON format to a file """ - json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) diff --git a/plugins/inventory/collins.ini b/contrib/inventory/collins.ini similarity index 100% rename from plugins/inventory/collins.ini rename to contrib/inventory/collins.ini diff --git a/plugins/inventory/collins.py b/contrib/inventory/collins.py similarity index 97% rename from plugins/inventory/collins.py rename to contrib/inventory/collins.py index 64e16f57069232..bbcb32b01789f9 100755 --- a/plugins/inventory/collins.py +++ b/contrib/inventory/collins.py @@ -41,7 +41,7 @@ If errors are encountered during operation, this script will return an exit code of 255; otherwise, it will return an exit code of 0. -Collins attributes are accessable as variables in ansible via the COLLINS['attribute_name']. +Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name']. Tested against Ansible 1.8.2 and Collins 1.3.0. """ @@ -67,7 +67,6 @@ import argparse -import base64 import ConfigParser import logging import os @@ -76,13 +75,15 @@ from time import time import traceback import urllib -import urllib2 try: import json except ImportError: import simplejson as json +from six import iteritems + +from ansible.module_utils.urls import open_url class CollinsDefaults(object): ASSETS_API_ENDPOINT = '%s/api/assets' @@ -164,7 +165,7 @@ def run(self): else: # default action with no options data_to_print = self.json_format_dict(self.inventory, self.args.pretty) - print data_to_print + print(data_to_print) return successful def find_assets(self, attributes = {}, operation = 'AND'): @@ -174,7 +175,7 @@ def find_assets(self, attributes = {}, operation = 'AND'): # the CQL search feature as described here: # http://tumblr.github.io/collins/recipes.html attributes_query = [ '='.join(attr_pair) - for attr_pair in attributes.iteritems() ] + for attr_pair in iteritems(attributes) ] query_parameters = { 'details': ['True'], 'operation': [operation], @@ -196,10 +197,11 @@ def find_assets(self, attributes = {}, operation = 'AND'): (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), urllib.urlencode(query_parameters, doseq=True) ) - request = urllib2.Request(query_url) - request.add_header('Authorization', self.basic_auth_header) try: - response = urllib2.urlopen(request, timeout=self.collins_timeout_secs) + response = open_url(query_url, + timeout=self.collins_timeout_secs, + url_username=self.collins_username, + url_password=self.collins_password) json_response = json.loads(response.read()) # Adds any assets found to the array of assets. assets += json_response['data']['Data'] @@ -259,8 +261,6 @@ def read_settings(self): log_path = config.get('collins', 'log_path') self.log_location = log_path + '/ansible-collins.log' - self.basic_auth_header = "Basic %s" % base64.encodestring( - '%s:%s' % (self.collins_username, self.collins_password))[:-1] def parse_cli_args(self): """ Command line argument processing """ diff --git a/plugins/inventory/consul.ini b/contrib/inventory/consul.ini similarity index 100% rename from plugins/inventory/consul.ini rename to contrib/inventory/consul.ini diff --git a/plugins/inventory/consul_io.py b/contrib/inventory/consul_io.py similarity index 97% rename from plugins/inventory/consul_io.py rename to contrib/inventory/consul_io.py index 46d47fd3bf5456..1bcf22d3730cd4 100755 --- a/plugins/inventory/consul_io.py +++ b/contrib/inventory/consul_io.py @@ -125,6 +125,7 @@ import re import argparse from time import time +import sys import ConfigParser import urllib, urllib2, base64 @@ -135,11 +136,12 @@ try: import consul -except ImportError, e: - print """failed=True msg='python-consul required for this module. see - http://python-consul.readthedocs.org/en/latest/#installation'""" +except ImportError as e: + print("""failed=True msg='python-consul required for this module. see + http://python-consul.readthedocs.org/en/latest/#installation'""") sys.exit(1) +from six import iteritems class ConsulInventory(object): @@ -170,7 +172,7 @@ def __init__(self): self.load_all_data_consul() self.combine_all_results() - print json.dumps(self.inventory, sort_keys=True, indent=2) + print(json.dumps(self.inventory, sort_keys=True, indent=2)) def load_all_data_consul(self): ''' cycle through each of the datacenters in the consul catalog and process @@ -186,7 +188,7 @@ def load_availability_groups(self, node, datacenter): an 'available' or 'unavailable' grouping. The suffix for each group can be controlled from the config''' if self.config.has_config('availability'): - for service_name, service in node['Services'].iteritems(): + for service_name, service in iteritems(node['Services']): for node in self.consul_api.health.service(service_name)[1]: for check in node['Checks']: if check['ServiceName'] == service_name: @@ -210,9 +212,9 @@ def load_data_for_datacenter(self, datacenter): def load_data_for_node(self, node, datacenter): '''loads the data for a sinle node adding it to various groups based on - metadata retrieved from the kv store and service availablity''' + metadata retrieved from the kv store and service availability''' - index, node_data = self.consul_api.catalog.node(node, datacenter) + index, node_data = self.consul_api.catalog.node(node, dc=datacenter) node = node_data['Node'] self.add_node_to_map(self.nodes, 'all', node) self.add_metadata(node_data, "consul_datacenter", datacenter) diff --git a/contrib/inventory/digital_ocean.ini b/contrib/inventory/digital_ocean.ini new file mode 100644 index 00000000000000..01afe33968df75 --- /dev/null +++ b/contrib/inventory/digital_ocean.ini @@ -0,0 +1,28 @@ +# Ansible DigitalOcean external inventory script settings +# + +[digital_ocean] + +# The module needs your DigitalOcean API Token. +# It may also be specified on the command line via --api-token +# or via the environment variables DO_API_TOKEN or DO_API_KEY +# +#api_token = 123456abcdefg + + +# API calls to DigitalOcean may be slow. For this reason, we cache the results +# of an API call. Set this to the path you want cache files to be written to. +# One file will be written to this directory: +# - ansible-digital_ocean.cache +# +cache_path = /tmp + + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# +cache_max_age = 300 + +# Use the private network IP address instead of the public when available. +# +use_private_network = False diff --git a/contrib/inventory/digital_ocean.py b/contrib/inventory/digital_ocean.py new file mode 100755 index 00000000000000..1c0ef68cff4cce --- /dev/null +++ b/contrib/inventory/digital_ocean.py @@ -0,0 +1,453 @@ +#!/usr/bin/env python + +''' +DigitalOcean external inventory script +====================================== + +Generates Ansible inventory of DigitalOcean Droplets. + +In addition to the --list and --host options used by Ansible, there are options +for generating JSON of other DigitalOcean data. This is useful when creating +droplets. For example, --regions will return all the DigitalOcean Regions. +This information can also be easily found in the cache file, whose default +location is /tmp/ansible-digital_ocean.cache). + +The --pretty (-p) option pretty-prints the output for better human readability. + +---- +Although the cache stores all the information received from DigitalOcean, +the cache is not used for current droplet information (in --list, --host, +--all, and --droplets). This is so that accurate droplet information is always +found. You can force this script to use the cache with --force-cache. + +---- +Configuration is read from `digital_ocean.ini`, then from environment variables, +then and command-line arguments. + +Most notably, the DigitalOcean API Token must be specified. It can be specified +in the INI file or with the following environment variables: + export DO_API_TOKEN='abc123' or + export DO_API_KEY='abc123' + +Alternatively, it can be passed on the command-line with --api-token. + +If you specify DigitalOcean credentials in the INI file, a handy way to +get them into your environment (e.g., to use the digital_ocean module) +is to use the output of the --env option with export: + export $(digital_ocean.py --env) + +---- +The following groups are generated from --list: + - ID (droplet ID) + - NAME (droplet NAME) + - image_ID + - image_NAME + - distro_NAME (distribution NAME from image) + - region_NAME + - size_NAME + - status_STATUS + +When run against a specific host, this script returns the following variables: + - do_backup_ids + - do_created_at + - do_disk + - do_features - list + - do_id + - do_image - object + - do_ip_address + - do_private_ip_address + - do_kernel - object + - do_locked + - de_memory + - do_name + - do_networks - object + - do_next_backup_window + - do_region - object + - do_size - object + - do_size_slug + - do_snapshot_ids - list + - do_status + - do_vcpus + +----- +``` +usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] + [--droplets] [--regions] [--images] [--sizes] + [--ssh-keys] [--domains] [--pretty] + [--cache-path CACHE_PATH] + [--cache-max_age CACHE_MAX_AGE] + [--force-cache] + [--refresh-cache] + [--api-token API_TOKEN] + +Produce an Ansible Inventory file based on DigitalOcean credentials + +optional arguments: + -h, --help show this help message and exit + --list List all active Droplets as Ansible inventory + (default: True) + --host HOST Get all Ansible inventory variables about a specific + Droplet + --all List all DigitalOcean information as JSON + --droplets List Droplets as JSON + --regions List Regions as JSON + --images List Images as JSON + --sizes List Sizes as JSON + --ssh-keys List SSH keys as JSON + --domains List Domains as JSON + --pretty, -p Pretty-print results + --cache-path CACHE_PATH + Path to the cache files (default: .) + --cache-max_age CACHE_MAX_AGE + Maximum age of the cached items (default: 0) + --force-cache Only use data from the cache + --refresh-cache Force refresh of cache by making API requests to + DigitalOcean (default: False - use cache files) + --api-token API_TOKEN, -a API_TOKEN + DigitalOcean API Token +``` + +''' + +# (c) 2013, Evan Wies +# +# Inspired by the EC2 inventory plugin: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import os +import sys +import re +import argparse +from time import time +import ConfigParser + +try: + import json +except ImportError: + import simplejson as json + +try: + from dopy.manager import DoError, DoManager +except ImportError as e: + print("failed=True msg='`dopy` library required for this script'") + sys.exit(1) + + + +class DigitalOceanInventory(object): + + ########################################################################### + # Main execution path + ########################################################################### + + def __init__(self): + ''' Main execution path ''' + + # DigitalOceanInventory data + self.data = {} # All DigitalOcean data + self.inventory = {} # Ansible Inventory + + # Define defaults + self.cache_path = '.' + self.cache_max_age = 0 + self.use_private_network = False + + # Read settings, environment variables, and CLI arguments + self.read_settings() + self.read_environment() + self.read_cli_args() + + # Verify credentials were set + if not hasattr(self, 'api_token'): + print('''Could not find values for DigitalOcean api_token. +They must be specified via either ini file, command line argument (--api-token), +or environment variables (DO_API_TOKEN)''') + sys.exit(-1) + + # env command, show DigitalOcean credentials + if self.args.env: + print("DO_API_TOKEN=%s" % self.api_token) + sys.exit(0) + + # Manage cache + self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" + self.cache_refreshed = False + + if self.is_cache_valid: + self.load_from_cache() + if len(self.data) == 0: + if self.args.force_cache: + print('''Cache is empty and --force-cache was specified''') + sys.exit(-1) + + self.manager = DoManager(None, self.api_token, api_version=2) + + # Pick the json_data to print based on the CLI command + if self.args.droplets: + self.load_from_digital_ocean('droplets') + json_data = {'droplets': self.data['droplets']} + elif self.args.regions: + self.load_from_digital_ocean('regions') + json_data = {'regions': self.data['regions']} + elif self.args.images: + self.load_from_digital_ocean('images') + json_data = {'images': self.data['images']} + elif self.args.sizes: + self.load_from_digital_ocean('sizes') + json_data = {'sizes': self.data['sizes']} + elif self.args.ssh_keys: + self.load_from_digital_ocean('ssh_keys') + json_data = {'ssh_keys': self.data['ssh_keys']} + elif self.args.domains: + self.load_from_digital_ocean('domains') + json_data = {'domains': self.data['domains']} + elif self.args.all: + self.load_from_digital_ocean() + json_data = self.data + elif self.args.host: + json_data = self.load_droplet_variables_for_host() + else: # '--list' this is last to make it default + self.load_from_digital_ocean('droplets') + self.build_inventory() + json_data = self.inventory + + if self.cache_refreshed: + self.write_to_cache() + + if self.args.pretty: + print(json.dumps(json_data, sort_keys=True, indent=2)) + else: + print(json.dumps(json_data)) + # That's all she wrote... + + + ########################################################################### + # Script configuration + ########################################################################### + + def read_settings(self): + ''' Reads the settings from the digital_ocean.ini file ''' + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') + + # Credentials + if config.has_option('digital_ocean', 'api_token'): + self.api_token = config.get('digital_ocean', 'api_token') + + # Cache related + if config.has_option('digital_ocean', 'cache_path'): + self.cache_path = config.get('digital_ocean', 'cache_path') + if config.has_option('digital_ocean', 'cache_max_age'): + self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') + + # Private IP Address + if config.has_option('digital_ocean', 'use_private_network'): + self.use_private_network = config.get('digital_ocean', 'use_private_network') + + def read_environment(self): + ''' Reads the settings from environment variables ''' + # Setup credentials + if os.getenv("DO_API_TOKEN"): + self.api_token = os.getenv("DO_API_TOKEN") + if os.getenv("DO_API_KEY"): + self.api_token = os.getenv("DO_API_KEY") + + + def read_cli_args(self): + ''' Command line argument processing ''' + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') + + parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') + parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') + + parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') + parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON') + parser.add_argument('--regions', action='store_true', help='List Regions as JSON') + parser.add_argument('--images', action='store_true', help='List Images as JSON') + parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') + parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') + parser.add_argument('--domains', action='store_true',help='List Domains as JSON') + + parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results') + + parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') + parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') + parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') + parser.add_argument('--refresh-cache','-r', action='store_true', default=False, + help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') + + parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN') + parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token') + + self.args = parser.parse_args() + + if self.args.api_token: + self.api_token = self.args.api_token + + # Make --list default if none of the other commands are specified + if (not self.args.droplets and not self.args.regions and + not self.args.images and not self.args.sizes and + not self.args.ssh_keys and not self.args.domains and + not self.args.all and not self.args.host): + self.args.list = True + + + ########################################################################### + # Data Management + ########################################################################### + + def load_from_digital_ocean(self, resource=None): + '''Get JSON from DigitalOcean API''' + if self.args.force_cache: + return + # We always get fresh droplets + if self.is_cache_valid() and not (resource=='droplets' or resource is None): + return + if self.args.refresh_cache: + resource=None + + if resource == 'droplets' or resource is None: + self.data['droplets'] = self.manager.all_active_droplets() + self.cache_refreshed = True + if resource == 'regions' or resource is None: + self.data['regions'] = self.manager.all_regions() + self.cache_refreshed = True + if resource == 'images' or resource is None: + self.data['images'] = self.manager.all_images(filter=None) + self.cache_refreshed = True + if resource == 'sizes' or resource is None: + self.data['sizes'] = self.manager.sizes() + self.cache_refreshed = True + if resource == 'ssh_keys' or resource is None: + self.data['ssh_keys'] = self.manager.all_ssh_keys() + self.cache_refreshed = True + if resource == 'domains' or resource is None: + self.data['domains'] = self.manager.all_domains() + self.cache_refreshed = True + + + def build_inventory(self): + '''Build Ansible inventory of droplets''' + self.inventory = {} + + # add all droplets by id and name + for droplet in self.data['droplets']: + #when using private_networking, the API reports the private one in "ip_address". + if 'private_networking' in droplet['features'] and not self.use_private_network: + for net in droplet['networks']['v4']: + if net['type']=='public': + dest=net['ip_address'] + else: + continue + else: + dest = droplet['ip_address'] + + self.inventory[droplet['id']] = [dest] + self.push(self.inventory, droplet['name'], dest) + self.push(self.inventory, 'region_' + droplet['region']['slug'], dest) + self.push(self.inventory, 'image_' + str(droplet['image']['id']), dest) + self.push(self.inventory, 'size_' + droplet['size']['slug'], dest) + + image_slug = droplet['image']['slug'] + if image_slug: + self.push(self.inventory, 'image_' + self.to_safe(image_slug), dest) + else: + image_name = droplet['image']['name'] + if image_name: + self.push(self.inventory, 'image_' + self.to_safe(image_name), dest) + + self.push(self.inventory, 'distro_' + self.to_safe(droplet['image']['distribution']), dest) + self.push(self.inventory, 'status_' + droplet['status'], dest) + + + def load_droplet_variables_for_host(self): + '''Generate a JSON response to a --host call''' + host = int(self.args.host) + + droplet = self.manager.show_droplet(host) + + # Put all the information in a 'do_' namespace + info = {} + for k, v in droplet.items(): + info['do_'+k] = v + + return {'droplet': info} + + + + ########################################################################### + # Cache Management + ########################################################################### + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + if os.path.isfile(self.cache_filename): + mod_time = os.path.getmtime(self.cache_filename) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + return False + + + def load_from_cache(self): + ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' + try: + cache = open(self.cache_filename, 'r') + json_data = cache.read() + cache.close() + data = json.loads(json_data) + except IOError: + data = {'data': {}, 'inventory': {}} + + self.data = data['data'] + self.inventory = data['inventory'] + + + def write_to_cache(self): + ''' Writes data in JSON format to a file ''' + data = { 'data': self.data, 'inventory': self.inventory } + json_data = json.dumps(data, sort_keys=True, indent=2) + + cache = open(self.cache_filename, 'w') + cache.write(json_data) + cache.close() + + + ########################################################################### + # Utilities + ########################################################################### + + def push(self, my_dict, key, element): + ''' Pushed an element onto an array that may not have been defined in the dict ''' + if key in my_dict: + my_dict[key].append(element) + else: + my_dict[key] = [element] + + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + return re.sub("[^A-Za-z0-9\-\.]", "_", word) + + + +########################################################################### +# Run the script +DigitalOceanInventory() diff --git a/contrib/inventory/docker.py b/contrib/inventory/docker.py new file mode 100755 index 00000000000000..7e8ee30a7cc22f --- /dev/null +++ b/contrib/inventory/docker.py @@ -0,0 +1,359 @@ +#!/usr/bin/env python + +# (c) 2013, Paul Durivage +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# +# Author: Paul Durivage +# +# Description: +# This module queries local or remote Docker daemons and generates +# inventory information. +# +# This plugin does not support targeting of specific hosts using the --host +# flag. Instead, it queries the Docker API for each container, running +# or not, and returns this data all once. +# +# The plugin returns the following custom attributes on Docker containers: +# docker_args +# docker_config +# docker_created +# docker_driver +# docker_exec_driver +# docker_host_config +# docker_hostname_path +# docker_hosts_path +# docker_id +# docker_image +# docker_name +# docker_network_settings +# docker_path +# docker_resolv_conf_path +# docker_state +# docker_volumes +# docker_volumes_rw +# +# Requirements: +# The docker-py module: https://github.com/dotcloud/docker-py +# +# Notes: +# A config file can be used to configure this inventory module, and there +# are several environment variables that can be set to modify the behavior +# of the plugin at runtime: +# DOCKER_CONFIG_FILE +# DOCKER_HOST +# DOCKER_VERSION +# DOCKER_TIMEOUT +# DOCKER_PRIVATE_SSH_PORT +# DOCKER_DEFAULT_IP +# +# Environment Variables: +# environment variable: DOCKER_CONFIG_FILE +# description: +# - A path to a Docker inventory hosts/defaults file in YAML format +# - A sample file has been provided, colocated with the inventory +# file called 'docker.yml' +# required: false +# default: Uses docker.docker.Client constructor defaults +# environment variable: DOCKER_HOST +# description: +# - The socket on which to connect to a Docker daemon API +# required: false +# default: Uses docker.docker.Client constructor defaults +# environment variable: DOCKER_VERSION +# description: +# - Version of the Docker API to use +# default: Uses docker.docker.Client constructor defaults +# required: false +# environment variable: DOCKER_TIMEOUT +# description: +# - Timeout in seconds for connections to Docker daemon API +# default: Uses docker.docker.Client constructor defaults +# required: false +# environment variable: DOCKER_PRIVATE_SSH_PORT +# description: +# - The private port (container port) on which SSH is listening +# for connections +# default: 22 +# required: false +# environment variable: DOCKER_DEFAULT_IP +# description: +# - This environment variable overrides the container SSH connection +# IP address (aka, 'ansible_ssh_host') +# +# This option allows one to override the ansible_ssh_host whenever +# Docker has exercised its default behavior of binding private ports +# to all interfaces of the Docker host. This behavior, when dealing +# with remote Docker hosts, does not allow Ansible to determine +# a proper host IP address on which to connect via SSH to containers. +# By default, this inventory module assumes all 0.0.0.0-exposed +# ports to be bound to localhost:. To override this +# behavior, for example, to bind a container's SSH port to the public +# interface of its host, one must manually set this IP. +# +# It is preferable to begin to launch Docker containers with +# ports exposed on publicly accessible IP addresses, particularly +# if the containers are to be targeted by Ansible for remote +# configuration, not accessible via localhost SSH connections. +# +# Docker containers can be explicitly exposed on IP addresses by +# a) starting the daemon with the --ip argument +# b) running containers with the -P/--publish ip::containerPort +# argument +# default: 127.0.0.1 if port exposed on 0.0.0.0 by Docker +# required: false +# +# Examples: +# Use the config file: +# DOCKER_CONFIG_FILE=./docker.yml docker.py --list +# +# Connect to docker instance on localhost port 4243 +# DOCKER_HOST=tcp://localhost:4243 docker.py --list +# +# Any container's ssh port exposed on 0.0.0.0 will mapped to +# another IP address (where Ansible will attempt to connect via SSH) +# DOCKER_DEFAULT_IP=1.2.3.4 docker.py --list + +import os +import sys +import json +import argparse + +from UserDict import UserDict +from collections import defaultdict + +import yaml + +from requests import HTTPError, ConnectionError + +# Manipulation of the path is needed because the docker-py +# module is imported by the name docker, and because this file +# is also named docker +for path in [os.getcwd(), '', os.path.dirname(os.path.abspath(__file__))]: + try: + del sys.path[sys.path.index(path)] + except: + pass + +try: + import docker +except ImportError: + print('docker-py is required for this module') + sys.exit(1) + + +class HostDict(UserDict): + def __setitem__(self, key, value): + if value is not None: + self.data[key] = value + + def update(self, dict=None, **kwargs): + if dict is None: + pass + elif isinstance(dict, UserDict): + for k, v in dict.data.items(): + self[k] = v + else: + for k, v in dict.items(): + self[k] = v + if len(kwargs): + for k, v in kwargs.items(): + self[k] = v + + +def write_stderr(string): + sys.stderr.write('%s\n' % string) + + +def setup(): + config = dict() + config_file = os.environ.get('DOCKER_CONFIG_FILE') + if config_file: + try: + config_file = os.path.abspath(config_file) + except Exception as e: + write_stderr(e) + sys.exit(1) + + with open(config_file) as f: + try: + config = yaml.safe_load(f.read()) + except Exception as e: + write_stderr(e) + sys.exit(1) + + # Environment Variables + env_base_url = os.environ.get('DOCKER_HOST') + env_version = os.environ.get('DOCKER_VERSION') + env_timeout = os.environ.get('DOCKER_TIMEOUT') + env_ssh_port = os.environ.get('DOCKER_PRIVATE_SSH_PORT', '22') + env_default_ip = os.environ.get('DOCKER_DEFAULT_IP', '127.0.0.1') + # Config file defaults + defaults = config.get('defaults', dict()) + def_host = defaults.get('host') + def_version = defaults.get('version') + def_timeout = defaults.get('timeout') + def_default_ip = defaults.get('default_ip') + def_ssh_port = defaults.get('private_ssh_port') + + hosts = list() + + if config: + hosts_list = config.get('hosts', list()) + # Look to the config file's defined hosts + if hosts_list: + for host in hosts_list: + baseurl = host.get('host') or def_host or env_base_url + version = host.get('version') or def_version or env_version + timeout = host.get('timeout') or def_timeout or env_timeout + default_ip = host.get('default_ip') or def_default_ip or env_default_ip + ssh_port = host.get('private_ssh_port') or def_ssh_port or env_ssh_port + + hostdict = HostDict( + base_url=baseurl, + version=version, + timeout=timeout, + default_ip=default_ip, + private_ssh_port=ssh_port, + ) + hosts.append(hostdict) + # Look to the defaults + else: + hostdict = HostDict( + base_url=def_host, + version=def_version, + timeout=def_timeout, + default_ip=def_default_ip, + private_ssh_port=def_ssh_port, + ) + hosts.append(hostdict) + # Look to the environment + else: + hostdict = HostDict( + base_url=env_base_url, + version=env_version, + timeout=env_timeout, + default_ip=env_default_ip, + private_ssh_port=env_ssh_port, + ) + hosts.append(hostdict) + + return hosts + + +def list_groups(): + hosts = setup() + groups = defaultdict(list) + hostvars = defaultdict(dict) + + for host in hosts: + ssh_port = host.pop('private_ssh_port', None) + default_ip = host.pop('default_ip', None) + hostname = host.get('base_url') + + try: + client = docker.Client(**host) + containers = client.containers(all=True) + except (HTTPError, ConnectionError) as e: + write_stderr(e) + sys.exit(1) + + for container in containers: + id = container.get('Id') + short_id = id[:13] + try: + name = container.get('Names', list()).pop(0).lstrip('/') + except IndexError: + name = short_id + + if not id: + continue + + inspect = client.inspect_container(id) + running = inspect.get('State', dict()).get('Running') + + groups[id].append(name) + groups[name].append(name) + if not short_id in groups.keys(): + groups[short_id].append(name) + groups[hostname].append(name) + + if running is True: + groups['running'].append(name) + else: + groups['stopped'].append(name) + + try: + port = client.port(container, ssh_port)[0] + except (IndexError, AttributeError, TypeError): + port = dict() + + try: + ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp'] + except KeyError: + ip = '' + + container_info = dict( + ansible_ssh_host=ip, + ansible_ssh_port=port.get('HostPort', int()), + docker_args=inspect.get('Args'), + docker_config=inspect.get('Config'), + docker_created=inspect.get('Created'), + docker_driver=inspect.get('Driver'), + docker_exec_driver=inspect.get('ExecDriver'), + docker_host_config=inspect.get('HostConfig'), + docker_hostname_path=inspect.get('HostnamePath'), + docker_hosts_path=inspect.get('HostsPath'), + docker_id=inspect.get('ID'), + docker_image=inspect.get('Image'), + docker_name=name, + docker_network_settings=inspect.get('NetworkSettings'), + docker_path=inspect.get('Path'), + docker_resolv_conf_path=inspect.get('ResolvConfPath'), + docker_state=inspect.get('State'), + docker_volumes=inspect.get('Volumes'), + docker_volumes_rw=inspect.get('VolumesRW'), + ) + + hostvars[name].update(container_info) + + groups['docker_hosts'] = [host.get('base_url') for host in hosts] + groups['_meta'] = dict() + groups['_meta']['hostvars'] = hostvars + print(json.dumps(groups, sort_keys=True, indent=4)) + sys.exit(0) + + +def parse_args(): + parser = argparse.ArgumentParser() + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true') + group.add_argument('--host', action='store_true') + return parser.parse_args() + + +def main(): + args = parse_args() + if args.list: + list_groups() + elif args.host: + write_stderr('This option is not supported.') + sys.exit(1) + sys.exit(0) + + +main() diff --git a/plugins/inventory/docker.yml b/contrib/inventory/docker.yml similarity index 100% rename from plugins/inventory/docker.yml rename to contrib/inventory/docker.yml diff --git a/contrib/inventory/ec2.ini b/contrib/inventory/ec2.ini new file mode 100644 index 00000000000000..25947a88f0fac2 --- /dev/null +++ b/contrib/inventory/ec2.ini @@ -0,0 +1,152 @@ +# Ansible EC2 external inventory script settings +# + +[ec2] + +# to talk to a private eucalyptus instance uncomment these lines +# and edit edit eucalyptus_host to be the host name of your cloud controller +#eucalyptus = True +#eucalyptus_host = clc.cloud.domain.org + +# AWS regions to make calls to. Set this to 'all' to make request to all regions +# in AWS and merge the results together. Alternatively, set this to a comma +# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' +regions = all +regions_exclude = us-gov-west-1,cn-north-1 + +# When generating inventory, Ansible needs to know how to address a server. +# Each EC2 instance has a lot of variables associated with it. Here is the list: +# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance +# Below are 2 variables that are used as the address of a server: +# - destination_variable +# - vpc_destination_variable + +# This is the normal destination variable to use. If you are running Ansible +# from outside EC2, then 'public_dns_name' makes the most sense. If you are +# running Ansible from within EC2, then perhaps you want to use the internal +# address, and should set this to 'private_dns_name'. The key of an EC2 tag +# may optionally be used; however the boto instance variables hold precedence +# in the event of a collision. +destination_variable = public_dns_name + +# For server inside a VPC, using DNS names may not make sense. When an instance +# has 'subnet_id' set, this variable is used. If the subnet is public, setting +# this to 'ip_address' will return the public IP address. For instances in a +# private subnet, this should be set to 'private_ip_address', and Ansible must +# be run from within EC2. The key of an EC2 tag may optionally be used; however +# the boto instance variables hold precedence in the event of a collision. +# WARNING: - instances that are in the private vpc, _without_ public ip address +# will not be listed in the inventory until You set: +# vpc_destination_variable = private_ip_address +vpc_destination_variable = ip_address + +# To tag instances on EC2 with the resource records that point to them from +# Route53, uncomment and set 'route53' to True. +route53 = False + +# To exclude RDS instances from the inventory, uncomment and set to False. +#rds = False + +# To exclude ElastiCache instances from the inventory, uncomment and set to False. +#elasticache = False + +# Additionally, you can specify the list of zones to exclude looking up in +# 'route53_excluded_zones' as a comma-separated list. +# route53_excluded_zones = samplezone1.com, samplezone2.com + +# By default, only EC2 instances in the 'running' state are returned. Set +# 'all_instances' to True to return all instances regardless of state. +all_instances = False + +# By default, only EC2 instances in the 'running' state are returned. Specify +# EC2 instance states to return as a comma-separated list. This +# option is overriden when 'all_instances' is True. +# instance_states = pending, running, shutting-down, terminated, stopping, stopped + +# By default, only RDS instances in the 'available' state are returned. Set +# 'all_rds_instances' to True return all RDS instances regardless of state. +all_rds_instances = False + +# By default, only ElastiCache clusters and nodes in the 'available' state +# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' +# to True return all ElastiCache clusters and nodes, regardless of state. +# +# Note that all_elasticache_nodes only applies to listed clusters. That means +# if you set all_elastic_clusters to false, no node will be return from +# unavailable clusters, regardless of the state and to what you set for +# all_elasticache_nodes. +all_elasticache_replication_groups = False +all_elasticache_clusters = False +all_elasticache_nodes = False + +# API calls to EC2 are slow. For this reason, we cache the results of an API +# call. Set this to the path you want cache files to be written to. Two files +# will be written to this directory: +# - ansible-ec2.cache +# - ansible-ec2.index +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 300 + +# Organize groups into a nested/hierarchy instead of a flat namespace. +nested_groups = False + +# Replace - tags when creating groups to avoid issues with ansible +replace_dash_in_groups = True + +# If set to true, any tag of the form "a,b,c" is expanded into a list +# and the results are used to create additional tag_* inventory groups. +expand_csv_tags = False + +# The EC2 inventory output can become very large. To manage its size, +# configure which groups should be created. +group_by_instance_id = True +group_by_region = True +group_by_availability_zone = True +group_by_ami_id = True +group_by_instance_type = True +group_by_key_pair = True +group_by_vpc_id = True +group_by_security_group = True +group_by_tag_keys = True +group_by_tag_none = True +group_by_route53_names = True +group_by_rds_engine = True +group_by_rds_parameter_group = True +group_by_elasticache_engine = True +group_by_elasticache_cluster = True +group_by_elasticache_parameter_group = True +group_by_elasticache_replication_group = True + +# If you only want to include hosts that match a certain regular expression +# pattern_include = staging-* + +# If you want to exclude any hosts that match a certain regular expression +# pattern_exclude = staging-* + +# Instance filters can be used to control which instances are retrieved for +# inventory. For the full list of possible filters, please read the EC2 API +# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters +# Filters are key/value pairs separated by '=', to list multiple filters use +# a list separated by commas. See examples below. + +# Retrieve only instances with (key=value) env=staging tag +# instance_filters = tag:env=staging + +# Retrieve only instances with role=webservers OR role=dbservers tag +# instance_filters = tag:role=webservers,tag:role=dbservers + +# Retrieve only t1.micro instances OR instances with tag env=staging +# instance_filters = instance-type=t1.micro,tag:env=staging + +# You can use wildcards in filter values also. Below will list instances which +# tag Name value matches webservers1* +# (ex. webservers15, webservers1a, webservers123 etc) +# instance_filters = tag:Name=webservers1* + +# A boto configuration profile may be used to separate out credentials +# see http://boto.readthedocs.org/en/latest/boto_config_tut.html +# boto_profile = some-boto-profile-name diff --git a/contrib/inventory/ec2.py b/contrib/inventory/ec2.py new file mode 100755 index 00000000000000..700b51a839e57c --- /dev/null +++ b/contrib/inventory/ec2.py @@ -0,0 +1,1327 @@ +#!/usr/bin/env python + +''' +EC2 external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +AWS EC2 using the Boto library. + +NOTE: This script assumes Ansible is being executed where the environment +variables needed for Boto have already been set: + export AWS_ACCESS_KEY_ID='AK123' + export AWS_SECRET_ACCESS_KEY='abc123' + +This script also assumes there is an ec2.ini file alongside it. To specify a +different path to ec2.ini, define the EC2_INI_PATH environment variable: + + export EC2_INI_PATH=/path/to/my_ec2.ini + +If you're using eucalyptus you need to set the above variables and +you need to define: + + export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus + +If you're using boto profiles (requires boto>=2.24.0) you can choose a profile +using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using +the AWS_PROFILE variable: + + AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml + +For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html + +When run against a specific host, this script returns the following variables: + - ec2_ami_launch_index + - ec2_architecture + - ec2_association + - ec2_attachTime + - ec2_attachment + - ec2_attachmentId + - ec2_client_token + - ec2_deleteOnTermination + - ec2_description + - ec2_deviceIndex + - ec2_dns_name + - ec2_eventsSet + - ec2_group_name + - ec2_hypervisor + - ec2_id + - ec2_image_id + - ec2_instanceState + - ec2_instance_type + - ec2_ipOwnerId + - ec2_ip_address + - ec2_item + - ec2_kernel + - ec2_key_name + - ec2_launch_time + - ec2_monitored + - ec2_monitoring + - ec2_networkInterfaceId + - ec2_ownerId + - ec2_persistent + - ec2_placement + - ec2_platform + - ec2_previous_state + - ec2_private_dns_name + - ec2_private_ip_address + - ec2_publicIp + - ec2_public_dns_name + - ec2_ramdisk + - ec2_reason + - ec2_region + - ec2_requester_id + - ec2_root_device_name + - ec2_root_device_type + - ec2_security_group_ids + - ec2_security_group_names + - ec2_shutdown_state + - ec2_sourceDestCheck + - ec2_spot_instance_request_id + - ec2_state + - ec2_state_code + - ec2_state_reason + - ec2_status + - ec2_subnet_id + - ec2_tenancy + - ec2_virtualization_type + - ec2_vpc_id + +These variables are pulled out of a boto.ec2.instance object. There is a lack of +consistency with variable spellings (camelCase and underscores) since this +just loops through all variables the object exposes. It is preferred to use the +ones with underscores when multiple exist. + +In addition, if an instance has AWS Tags associated with it, each tag is a new +variable named: + - ec2_tag_[Key] = [Value] + +Security groups are comma-separated in 'ec2_security_group_ids' and +'ec2_security_group_names'. +''' + +# (c) 2012, Peter Sankauskas +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +import sys +import os +import argparse +import re +from time import time +import boto +from boto import ec2 +from boto import rds +from boto import elasticache +from boto import route53 +import six + +from six.moves import configparser +from collections import defaultdict + +try: + import json +except ImportError: + import simplejson as json + + +class Ec2Inventory(object): + + def _empty_inventory(self): + return {"_meta" : {"hostvars" : {}}} + + def __init__(self): + ''' Main execution path ''' + + # Inventory grouped by instance IDs, tags, security groups, regions, + # and availability zones + self.inventory = self._empty_inventory() + + # Index of hostname (address) to instance ID + self.index = {} + + # Boto profile to use (if any) + self.boto_profile = None + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.read_settings() + + # Make sure that profile_name is not passed at all if not set + # as pre 2.24 boto will fall over otherwise + if self.boto_profile: + if not hasattr(boto.ec2.EC2Connection, 'profile_name'): + self.fail_with_error("boto version must be >= 2.24 to use profile") + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + # Data to print + if self.args.host: + data_to_print = self.get_host_info() + + elif self.args.list: + # Display list of instances for inventory + if self.inventory == self._empty_inventory(): + data_to_print = self.get_inventory_from_cache() + else: + data_to_print = self.json_format_dict(self.inventory, True) + + print(data_to_print) + + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + + return False + + + def read_settings(self): + ''' Reads the settings from the ec2.ini file ''' + if six.PY3: + config = configparser.ConfigParser() + else: + config = configparser.SafeConfigParser() + ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') + ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path))) + config.read(ec2_ini_path) + + # is eucalyptus? + self.eucalyptus_host = None + self.eucalyptus = False + if config.has_option('ec2', 'eucalyptus'): + self.eucalyptus = config.getboolean('ec2', 'eucalyptus') + if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): + self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') + + # Regions + self.regions = [] + configRegions = config.get('ec2', 'regions') + configRegions_exclude = config.get('ec2', 'regions_exclude') + if (configRegions == 'all'): + if self.eucalyptus_host: + self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) + else: + for regionInfo in ec2.regions(): + if regionInfo.name not in configRegions_exclude: + self.regions.append(regionInfo.name) + else: + self.regions = configRegions.split(",") + + # Destination addresses + self.destination_variable = config.get('ec2', 'destination_variable') + self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') + + # Route53 + self.route53_enabled = config.getboolean('ec2', 'route53') + self.route53_excluded_zones = [] + if config.has_option('ec2', 'route53_excluded_zones'): + self.route53_excluded_zones.extend( + config.get('ec2', 'route53_excluded_zones', '').split(',')) + + # Include RDS instances? + self.rds_enabled = True + if config.has_option('ec2', 'rds'): + self.rds_enabled = config.getboolean('ec2', 'rds') + + # Include ElastiCache instances? + self.elasticache_enabled = True + if config.has_option('ec2', 'elasticache'): + self.elasticache_enabled = config.getboolean('ec2', 'elasticache') + + # Return all EC2 instances? + if config.has_option('ec2', 'all_instances'): + self.all_instances = config.getboolean('ec2', 'all_instances') + else: + self.all_instances = False + + # Instance states to be gathered in inventory. Default is 'running'. + # Setting 'all_instances' to 'yes' overrides this option. + ec2_valid_instance_states = [ + 'pending', + 'running', + 'shutting-down', + 'terminated', + 'stopping', + 'stopped' + ] + self.ec2_instance_states = [] + if self.all_instances: + self.ec2_instance_states = ec2_valid_instance_states + elif config.has_option('ec2', 'instance_states'): + for instance_state in config.get('ec2', 'instance_states').split(','): + instance_state = instance_state.strip() + if instance_state not in ec2_valid_instance_states: + continue + self.ec2_instance_states.append(instance_state) + else: + self.ec2_instance_states = ['running'] + + # Return all RDS instances? (if RDS is enabled) + if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: + self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') + else: + self.all_rds_instances = False + + # Return all ElastiCache replication groups? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: + self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') + else: + self.all_elasticache_replication_groups = False + + # Return all ElastiCache clusters? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: + self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') + else: + self.all_elasticache_clusters = False + + # Return all ElastiCache nodes? (if ElastiCache is enabled) + if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: + self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') + else: + self.all_elasticache_nodes = False + + # boto configuration profile (prefer CLI argument) + self.boto_profile = self.args.boto_profile + if config.has_option('ec2', 'boto_profile') and not self.boto_profile: + self.boto_profile = config.get('ec2', 'boto_profile') + + # Cache related + cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) + if self.boto_profile: + cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + self.cache_path_cache = cache_dir + "/ansible-ec2.cache" + self.cache_path_index = cache_dir + "/ansible-ec2.index" + self.cache_max_age = config.getint('ec2', 'cache_max_age') + + if config.has_option('ec2', 'expand_csv_tags'): + self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') + else: + self.expand_csv_tags = False + + # Configure nested groups instead of flat namespace. + if config.has_option('ec2', 'nested_groups'): + self.nested_groups = config.getboolean('ec2', 'nested_groups') + else: + self.nested_groups = False + + # Replace dash or not in group names + if config.has_option('ec2', 'replace_dash_in_groups'): + self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') + else: + self.replace_dash_in_groups = True + + # Configure which groups should be created. + group_by_options = [ + 'group_by_instance_id', + 'group_by_region', + 'group_by_availability_zone', + 'group_by_ami_id', + 'group_by_instance_type', + 'group_by_key_pair', + 'group_by_vpc_id', + 'group_by_security_group', + 'group_by_tag_keys', + 'group_by_tag_none', + 'group_by_route53_names', + 'group_by_rds_engine', + 'group_by_rds_parameter_group', + 'group_by_elasticache_engine', + 'group_by_elasticache_cluster', + 'group_by_elasticache_parameter_group', + 'group_by_elasticache_replication_group', + ] + for option in group_by_options: + if config.has_option('ec2', option): + setattr(self, option, config.getboolean('ec2', option)) + else: + setattr(self, option, True) + + # Do we need to just include hosts that match a pattern? + try: + pattern_include = config.get('ec2', 'pattern_include') + if pattern_include and len(pattern_include) > 0: + self.pattern_include = re.compile(pattern_include) + else: + self.pattern_include = None + except configparser.NoOptionError: + self.pattern_include = None + + # Do we need to exclude hosts that match a pattern? + try: + pattern_exclude = config.get('ec2', 'pattern_exclude'); + if pattern_exclude and len(pattern_exclude) > 0: + self.pattern_exclude = re.compile(pattern_exclude) + else: + self.pattern_exclude = None + except configparser.NoOptionError: + self.pattern_exclude = None + + # Instance filters (see boto and EC2 API docs). Ignore invalid filters. + self.ec2_instance_filters = defaultdict(list) + if config.has_option('ec2', 'instance_filters'): + for instance_filter in config.get('ec2', 'instance_filters', '').split(','): + instance_filter = instance_filter.strip() + if not instance_filter or '=' not in instance_filter: + continue + filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] + if not filter_key: + continue + self.ec2_instance_filters[filter_key].append(filter_value) + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') + parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', + help='Use boto profile for connections to EC2') + self.args = parser.parse_args() + + + def do_api_calls_update_cache(self): + ''' Do API calls to each region, and save data in cache files ''' + + if self.route53_enabled: + self.get_route53_records() + + for region in self.regions: + self.get_instances_by_region(region) + if self.rds_enabled: + self.get_rds_instances_by_region(region) + if self.elasticache_enabled: + self.get_elasticache_clusters_by_region(region) + self.get_elasticache_replication_groups_by_region(region) + + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def connect(self, region): + ''' create connection to api server''' + if self.eucalyptus: + conn = boto.connect_euca(host=self.eucalyptus_host) + conn.APIVersion = '2010-08-31' + else: + conn = self.connect_to_aws(ec2, region) + return conn + + def boto_fix_security_token_in_profile(self, connect_args): + ''' monkey patch for boto issue boto/boto#2100 ''' + profile = 'profile ' + self.boto_profile + if boto.config.has_option(profile, 'aws_security_token'): + connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') + return connect_args + + def connect_to_aws(self, module, region): + connect_args = {} + + # only pass the profile name if it's set (as it is not supported by older boto versions) + if self.boto_profile: + connect_args['profile_name'] = self.boto_profile + self.boto_fix_security_token_in_profile(connect_args) + + conn = module.connect_to_region(region, **connect_args) + # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported + if conn is None: + self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) + return conn + + def get_instances_by_region(self, region): + ''' Makes an AWS EC2 API call to the list of instances in a particular + region ''' + + try: + conn = self.connect(region) + reservations = [] + if self.ec2_instance_filters: + for filter_key, filter_values in self.ec2_instance_filters.items(): + reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) + else: + reservations = conn.get_all_instances() + + for reservation in reservations: + for instance in reservation.instances: + self.add_instance(instance, region) + + except boto.exception.BotoServerError as e: + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + else: + backend = 'Eucalyptus' if self.eucalyptus else 'AWS' + error = "Error connecting to %s backend.\n%s" % (backend, e.message) + self.fail_with_error(error, 'getting EC2 instances') + + def get_rds_instances_by_region(self, region): + ''' Makes an AWS API call to the list of RDS instances in a particular + region ''' + + try: + conn = self.connect_to_aws(rds, region) + if conn: + instances = conn.get_all_dbinstances() + for instance in instances: + self.add_rds_instance(instance, region) + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS RDS is down:\n%s" % e.message + self.fail_with_error(error, 'getting RDS instances') + + def get_elasticache_clusters_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache clusters (with + nodes' info) in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + # show_cache_node_info = True + # because we also want nodes' information + response = conn.describe_cache_clusters(None, None, None, True) + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to CacheClusters or + # CacheNodes. Because of that wo can't make use of the get_list + # method in the AWSQueryConnection. Let's do the work manually + clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] + + except KeyError as e: + error = "ElastiCache query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for cluster in clusters: + self.add_elasticache_cluster(cluster, region) + + def get_elasticache_replication_groups_by_region(self, region): + ''' Makes an AWS API call to the list of ElastiCache replication groups + in a particular region.''' + + # ElastiCache boto module doesn't provide a get_all_intances method, + # that's why we need to call describe directly (it would be called by + # the shorthand method anyway...) + try: + conn = elasticache.connect_to_region(region) + if conn: + response = conn.describe_replication_groups() + + except boto.exception.BotoServerError as e: + error = e.reason + + if e.error_code == 'AuthFailure': + error = self.get_auth_error_message() + if not e.reason == "Forbidden": + error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message + self.fail_with_error(error, 'getting ElastiCache clusters') + + try: + # Boto also doesn't provide wrapper classes to ReplicationGroups + # Because of that wo can't make use of the get_list method in the + # AWSQueryConnection. Let's do the work manually + replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] + + except KeyError as e: + error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." + self.fail_with_error(error, 'getting ElastiCache clusters') + + for replication_group in replication_groups: + self.add_elasticache_replication_group(replication_group, region) + + def get_auth_error_message(self): + ''' create an informative error message if there is an issue authenticating''' + errors = ["Authentication error retrieving ec2 inventory."] + if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: + errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') + else: + errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') + + boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] + boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) + if len(boto_config_found) > 0: + errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) + else: + errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) + + return '\n'.join(errors) + + def fail_with_error(self, err_msg, err_operation=None): + '''log an error to std err for ansible-playbook to consume and exit''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def get_instance(self, region, instance_id): + conn = self.connect(region) + + reservations = conn.get_all_instances([instance_id]) + for reservation in reservations: + for instance in reservation.instances: + return instance + + def add_instance(self, instance, region): + ''' Adds an instance to the inventory and index, as long as it is + addressable ''' + + # Only return instances with desired instance states + if instance.state not in self.ec2_instance_states: + return + + # Select the best destination address + if instance.subnet_id: + dest = getattr(instance, self.vpc_destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) + else: + dest = getattr(instance, self.destination_variable, None) + if dest is None: + dest = getattr(instance, 'tags').get(self.destination_variable, None) + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # if we only want to include hosts that match a pattern, skip those that don't + if self.pattern_include and not self.pattern_include.match(dest): + return + + # if we need to exclude hosts that match a pattern, skip those + if self.pattern_exclude and self.pattern_exclude.match(dest): + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.placement, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.placement) + self.push_group(self.inventory, 'zones', instance.placement) + + # Inventory: Group by Amazon Machine Image (AMI) ID + if self.group_by_ami_id: + ami_id = self.to_safe(instance.image_id) + self.push(self.inventory, ami_id, dest) + if self.nested_groups: + self.push_group(self.inventory, 'images', ami_id) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_type) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by key pair + if self.group_by_key_pair and instance.key_name: + key_name = self.to_safe('key_' + instance.key_name) + self.push(self.inventory, key_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'keys', key_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + for group in instance.groups: + key = self.to_safe("security_group_" + group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + + # Inventory: Group by tag keys + if self.group_by_tag_keys: + for k, v in instance.tags.items(): + if self.expand_csv_tags and v and ',' in v: + values = map(lambda x: x.strip(), v.split(',')) + else: + values = [v] + + for v in values: + if v: + key = self.to_safe("tag_" + k + "=" + v) + else: + key = self.to_safe("tag_" + k) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) + if v: + self.push_group(self.inventory, self.to_safe("tag_" + k), key) + + # Inventory: Group by Route53 domain names if enabled + if self.route53_enabled and self.group_by_route53_names: + route53_names = self.get_instance_route53_names(instance) + for name in route53_names: + self.push(self.inventory, name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'route53', name) + + # Global Tag: instances without tags + if self.group_by_tag_none and len(instance.tags) == 0: + self.push(self.inventory, 'tag_none', dest) + if self.nested_groups: + self.push_group(self.inventory, 'tags', 'tag_none') + + # Global Tag: tag all EC2 instances + self.push(self.inventory, 'ec2', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + + def add_rds_instance(self, instance, region): + ''' Adds an RDS instance to the inventory and index, as long as it is + addressable ''' + + # Only want available instances unless all_rds_instances is True + if not self.all_rds_instances and instance.status != 'available': + return + + # Select the best destination address + dest = instance.endpoint[0] + + if not dest: + # Skip instances we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, instance.id] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[instance.id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', instance.id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, instance.availability_zone, dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, instance.availability_zone) + self.push_group(self.inventory, 'zones', instance.availability_zone) + + # Inventory: Group by instance type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + instance.instance_class) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC + if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: + vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) + self.push(self.inventory, vpc_id_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'vpcs', vpc_id_name) + + # Inventory: Group by security group + if self.group_by_security_group: + try: + if instance.security_group: + key = self.to_safe("security_group_" + instance.security_group.name) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + except AttributeError: + self.fail_with_error('\n'.join(['Package boto seems a bit older.', + 'Please upgrade boto >= 2.3.0.'])) + + + # Inventory: Group by engine + if self.group_by_rds_engine: + self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) + + # Inventory: Group by parameter group + if self.group_by_rds_parameter_group: + self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) + if self.nested_groups: + self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) + + # Global Tag: all RDS instances + self.push(self.inventory, 'rds', dest) + + self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) + + def add_elasticache_cluster(self, cluster, region): + ''' Adds an ElastiCache cluster to the inventory and index, as long as + it's nodes are addressable ''' + + # Only want available clusters unless all_elasticache_clusters is True + if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': + return + + # Select the best destination address + if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: + # Memcached cluster + dest = cluster['ConfigurationEndpoint']['Address'] + is_redis = False + else: + # Redis sigle node cluster + # Because all Redis clusters are single nodes, we'll merge the + # info from the cluster with info about the node + dest = cluster['CacheNodes'][0]['Endpoint']['Address'] + is_redis = True + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, cluster['CacheClusterId']] + + # Inventory: Group by instance ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[cluster['CacheClusterId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) + + # Inventory: Group by region + if self.group_by_region and not is_redis: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone and not is_redis: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type and not is_redis: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group and not is_redis: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine and not is_redis: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) + + # Inventory: Group by parameter group + if self.group_by_elasticache_parameter_group: + self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) + + # Inventory: Group by replication group + if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: + self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) + + host_info = self.get_host_info_dict_from_describe_dict(cluster) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + # Add the nodes + for node in cluster['CacheNodes']: + self.add_elasticache_node(node, cluster, region) + + def add_elasticache_node(self, node, cluster, region): + ''' Adds an ElastiCache node to the inventory and index, as long as + it is addressable ''' + + # Only want available nodes unless all_elasticache_nodes is True + if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': + return + + # Select the best destination address + dest = node['Endpoint']['Address'] + + if not dest: + # Skip nodes we cannot address (e.g. private VPC subnet) + return + + node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) + + # Add to index + self.index[dest] = [region, node_id] + + # Inventory: Group by node ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[node_id] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', node_id) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone + if self.group_by_availability_zone: + self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) + if self.nested_groups: + if self.group_by_region: + self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) + self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) + + # Inventory: Group by node type + if self.group_by_instance_type: + type_name = self.to_safe('type_' + cluster['CacheNodeType']) + self.push(self.inventory, type_name, dest) + if self.nested_groups: + self.push_group(self.inventory, 'types', type_name) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for ElastiCache) + + # Inventory: Group by security group + if self.group_by_security_group: + + # Check for the existence of the 'SecurityGroups' key and also if + # this key has some value. When the cluster is not placed in a SG + # the query can return None here and cause an error. + if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: + for security_group in cluster['SecurityGroups']: + key = self.to_safe("security_group_" + security_group['SecurityGroupId']) + self.push(self.inventory, key, dest) + if self.nested_groups: + self.push_group(self.inventory, 'security_groups', key) + + # Inventory: Group by engine + if self.group_by_elasticache_engine: + self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) + + # Inventory: Group by parameter group (done at cluster level) + + # Inventory: Group by replication group (done at cluster level) + + # Inventory: Group by ElastiCache Cluster + if self.group_by_elasticache_cluster: + self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) + + # Global Tag: all ElastiCache nodes + self.push(self.inventory, 'elasticache_nodes', dest) + + host_info = self.get_host_info_dict_from_describe_dict(node) + + if dest in self.inventory["_meta"]["hostvars"]: + self.inventory["_meta"]["hostvars"][dest].update(host_info) + else: + self.inventory["_meta"]["hostvars"][dest] = host_info + + def add_elasticache_replication_group(self, replication_group, region): + ''' Adds an ElastiCache replication group to the inventory and index ''' + + # Only want available clusters unless all_elasticache_replication_groups is True + if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': + return + + # Select the best destination address (PrimaryEndpoint) + dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] + + if not dest: + # Skip clusters we cannot address (e.g. private VPC subnet) + return + + # Add to index + self.index[dest] = [region, replication_group['ReplicationGroupId']] + + # Inventory: Group by ID (always a group of 1) + if self.group_by_instance_id: + self.inventory[replication_group['ReplicationGroupId']] = [dest] + if self.nested_groups: + self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) + + # Inventory: Group by region + if self.group_by_region: + self.push(self.inventory, region, dest) + if self.nested_groups: + self.push_group(self.inventory, 'regions', region) + + # Inventory: Group by availability zone (doesn't apply to replication groups) + + # Inventory: Group by node type (doesn't apply to replication groups) + + # Inventory: Group by VPC (information not available in the current + # AWS API version for replication groups + + # Inventory: Group by security group (doesn't apply to replication groups) + # Check this value in cluster level + + # Inventory: Group by engine (replication groups are always Redis) + if self.group_by_elasticache_engine: + self.push(self.inventory, 'elasticache_redis', dest) + if self.nested_groups: + self.push_group(self.inventory, 'elasticache_engines', 'redis') + + # Global Tag: all ElastiCache clusters + self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) + + host_info = self.get_host_info_dict_from_describe_dict(replication_group) + + self.inventory["_meta"]["hostvars"][dest] = host_info + + def get_route53_records(self): + ''' Get and store the map of resource records to domain names that + point to them. ''' + + r53_conn = route53.Route53Connection() + all_zones = r53_conn.get_zones() + + route53_zones = [ zone for zone in all_zones if zone.name[:-1] + not in self.route53_excluded_zones ] + + self.route53_records = {} + + for zone in route53_zones: + rrsets = r53_conn.get_all_rrsets(zone.id) + + for record_set in rrsets: + record_name = record_set.name + + if record_name.endswith('.'): + record_name = record_name[:-1] + + for resource in record_set.resource_records: + self.route53_records.setdefault(resource, set()) + self.route53_records[resource].add(record_name) + + + def get_instance_route53_names(self, instance): + ''' Check if an instance is referenced in the records we have from + Route53. If it is, return the list of domain names pointing to said + instance. If nothing points to it, return an empty list. ''' + + instance_attributes = [ 'public_dns_name', 'private_dns_name', + 'ip_address', 'private_ip_address' ] + + name_list = set() + + for attrib in instance_attributes: + try: + value = getattr(instance, attrib) + except AttributeError: + continue + + if value in self.route53_records: + name_list.update(self.route53_records[value]) + + return list(name_list) + + def get_host_info_dict_from_instance(self, instance): + instance_vars = {} + for key in vars(instance): + value = getattr(instance, key) + key = self.to_safe('ec2_' + key) + + # Handle complex types + # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 + if key == 'ec2__state': + instance_vars['ec2_state'] = instance.state or '' + instance_vars['ec2_state_code'] = instance.state_code + elif key == 'ec2__previous_state': + instance_vars['ec2_previous_state'] = instance.previous_state or '' + instance_vars['ec2_previous_state_code'] = instance.previous_state_code + elif type(value) in [int, bool]: + instance_vars[key] = value + elif isinstance(value, six.string_types): + instance_vars[key] = value.strip() + elif type(value) == type(None): + instance_vars[key] = '' + elif key == 'ec2_region': + instance_vars[key] = value.name + elif key == 'ec2__placement': + instance_vars['ec2_placement'] = value.zone + elif key == 'ec2_tags': + for k, v in value.items(): + if self.expand_csv_tags and ',' in v: + v = map(lambda x: x.strip(), v.split(',')) + key = self.to_safe('ec2_tag_' + k) + instance_vars[key] = v + elif key == 'ec2_groups': + group_ids = [] + group_names = [] + for group in value: + group_ids.append(group.id) + group_names.append(group.name) + instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) + instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) + else: + pass + # TODO Product codes if someone finds them useful + #print key + #print type(value) + #print value + + return instance_vars + + def get_host_info_dict_from_describe_dict(self, describe_dict): + ''' Parses the dictionary returned by the API call into a flat list + of parameters. This method should be used only when 'describe' is + used directly because Boto doesn't provide specific classes. ''' + + # I really don't agree with prefixing everything with 'ec2' + # because EC2, RDS and ElastiCache are different services. + # I'm just following the pattern used until now to not break any + # compatibility. + + host_info = {} + for key in describe_dict: + value = describe_dict[key] + key = self.to_safe('ec2_' + self.uncammelize(key)) + + # Handle complex types + + # Target: Memcached Cache Clusters + if key == 'ec2_configuration_endpoint' and value: + host_info['ec2_configuration_endpoint_address'] = value['Address'] + host_info['ec2_configuration_endpoint_port'] = value['Port'] + + # Target: Cache Nodes and Redis Cache Clusters (single node) + if key == 'ec2_endpoint' and value: + host_info['ec2_endpoint_address'] = value['Address'] + host_info['ec2_endpoint_port'] = value['Port'] + + # Target: Redis Replication Groups + if key == 'ec2_node_groups' and value: + host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] + host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] + replica_count = 0 + for node in value[0]['NodeGroupMembers']: + if node['CurrentRole'] == 'primary': + host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] + host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] + host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] + elif node['CurrentRole'] == 'replica': + host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] + host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] + host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] + replica_count += 1 + + # Target: Redis Replication Groups + if key == 'ec2_member_clusters' and value: + host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) + + # Target: All Cache Clusters + elif key == 'ec2_cache_parameter_group': + host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) + host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] + host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] + + # Target: Almost everything + elif key == 'ec2_security_groups': + + # Skip if SecurityGroups is None + # (it is possible to have the key defined but no value in it). + if value is not None: + sg_ids = [] + for sg in value: + sg_ids.append(sg['SecurityGroupId']) + host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) + + # Target: Everything + # Preserve booleans and integers + elif type(value) in [int, bool]: + host_info[key] = value + + # Target: Everything + # Sanitize string values + elif isinstance(value, six.string_types): + host_info[key] = value.strip() + + # Target: Everything + # Replace None by an empty string + elif type(value) == type(None): + host_info[key] = '' + + else: + # Remove non-processed complex types + pass + + return host_info + + def get_host_info(self): + ''' Get variables about a specific host ''' + + if len(self.index) == 0: + # Need to load index from cache + self.load_index_from_cache() + + if not self.args.host in self.index: + # try updating the cache + self.do_api_calls_update_cache() + if not self.args.host in self.index: + # host might not exist anymore + return self.json_format_dict({}, True) + + (region, instance_id) = self.index[self.args.host] + + instance = self.get_instance(region, instance_id) + return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) + + def push(self, my_dict, key, element): + ''' Push an element onto an array that may not have been defined in + the dict ''' + group_info = my_dict.setdefault(key, []) + if isinstance(group_info, dict): + host_list = group_info.setdefault('hosts', []) + host_list.append(element) + else: + group_info.append(element) + + def push_group(self, my_dict, key, element): + ''' Push a group as a child of another group. ''' + parent_group = my_dict.setdefault(key, {}) + if not isinstance(parent_group, dict): + parent_group = my_dict[key] = {'hosts': parent_group} + child_groups = parent_group.setdefault('children', []) + if element not in child_groups: + child_groups.append(element) + + def get_inventory_from_cache(self): + ''' Reads the inventory from the cache file and returns it as a JSON + object ''' + + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + + def load_index_from_cache(self): + ''' Reads the index from the cache file sets self.index ''' + + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + + def write_to_cache(self, data, filename): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def uncammelize(self, key): + temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' + regex = "[^A-Za-z0-9\_" + if not self.replace_dash_in_groups: + regex += "\-" + return re.sub(regex + "]", "_", word) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +Ec2Inventory() + diff --git a/contrib/inventory/fleet.py b/contrib/inventory/fleet.py new file mode 100755 index 00000000000000..788e1a5f511e6d --- /dev/null +++ b/contrib/inventory/fleet.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +""" +fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and +returns it under the host group 'coreos' +""" + +# Copyright (C) 2014 Andrew Rothstein +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# +# Thanks to the vagrant.py inventory script for giving me the basic structure +# of this. +# + +import sys +import subprocess +import re +import string +from optparse import OptionParser +try: + import json +except: + import simplejson as json + +# Options +#------------------------------ + +parser = OptionParser(usage="%prog [options] --list | --host ") +parser.add_option('--list', default=False, dest="list", action="store_true", + help="Produce a JSON consumable grouping of servers in your fleet") +parser.add_option('--host', default=None, dest="host", + help="Generate additional host specific details for given host for Ansible") +(options, args) = parser.parse_args() + +# +# helper functions +# + +def get_ssh_config(): + configs = [] + for box in list_running_boxes(): + config = get_a_ssh_config(box) + configs.append(config) + return configs + +#list all the running instances in the fleet +def list_running_boxes(): + boxes = [] + for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): + matcher = re.search("[^\s]+[\s]+([^\s]+).+", line) + if matcher and matcher.group(1) != "IP": + boxes.append(matcher.group(1)) + + return boxes + +def get_a_ssh_config(box_name): + config = {} + config['Host'] = box_name + config['ansible_ssh_user'] = 'core' + config['ansible_python_interpreter'] = '/opt/bin/python' + return config + +# List out servers that vagrant has running +#------------------------------ +if options.list: + ssh_config = get_ssh_config() + hosts = { 'coreos': []} + + for data in ssh_config: + hosts['coreos'].append(data['Host']) + + print(json.dumps(hosts)) + sys.exit(1) + +# Get out the host details +#------------------------------ +elif options.host: + result = {} + ssh_config = get_ssh_config() + + details = filter(lambda x: (x['Host'] == options.host), ssh_config) + if len(details) > 0: + #pass through the port, in case it's non standard. + result = details[0] + result + + print(json.dumps(result)) + sys.exit(1) + + +# Print out help +#------------------------------ +else: + parser.print_help() + sys.exit(1) diff --git a/plugins/inventory/freeipa.py b/contrib/inventory/freeipa.py similarity index 90% rename from plugins/inventory/freeipa.py rename to contrib/inventory/freeipa.py index caf336239ccd8b..a2632621ca9589 100755 --- a/plugins/inventory/freeipa.py +++ b/contrib/inventory/freeipa.py @@ -13,7 +13,11 @@ def initialize(): api.bootstrap(context='cli') api.finalize() - api.Backend.xmlclient.connect() + try: + api.Backend.rpcclient.connect() + except AttributeError: + #FreeIPA < 4.0 compatibility + api.Backend.xmlclient.connect() return api @@ -37,7 +41,7 @@ def list_groups(api): inventory['_meta'] = {'hostvars': hostvars} inv_string = json.dumps(inventory, indent=1, sort_keys=True) - print inv_string + print(inv_string) return None @@ -65,7 +69,7 @@ def print_host(host): This function expects one string, this hostname to lookup variables for. ''' - print json.dumps({}) + print(json.dumps({})) return None diff --git a/plugins/inventory/gce.ini b/contrib/inventory/gce.ini similarity index 100% rename from plugins/inventory/gce.ini rename to contrib/inventory/gce.ini diff --git a/contrib/inventory/gce.py b/contrib/inventory/gce.py new file mode 100755 index 00000000000000..b13c194a6e743f --- /dev/null +++ b/contrib/inventory/gce.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +''' +GCE external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests +Google Compute Engine via the libcloud library. Full install/configuration +instructions for the gce* modules can be found in the comments of +ansible/test/gce_tests.py. + +When run against a specific host, this script returns the following variables +based on the data obtained from the libcloud Node object: + - gce_uuid + - gce_id + - gce_image + - gce_machine_type + - gce_private_ip + - gce_public_ip + - gce_name + - gce_description + - gce_status + - gce_zone + - gce_tags + - gce_metadata + - gce_network + +When run in --list mode, instances are grouped by the following categories: + - zone: + zone group name examples are us-central1-b, europe-west1-a, etc. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - machine type + types follow a pattern like n1-standard-4, g1-small, etc. + - running status: + group name prefixed with 'status_' (e.g. status_running, status_stopped,..) + - image: + when using an ephemeral/scratch disk, this will be set to the image name + used when creating the instance (e.g. debian-7-wheezy-v20130816). when + your instance was created with a root persistent disk it will be set to + 'persistent_disk' since there is no current way to determine the image. + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" + + Use the GCE inventory script to print out instance specific information + $ contrib/inventory/gce.py --host my_instance + +Author: Eric Johnson +Version: 0.0.1 +''' + +__requires__ = ['pycrypto>=2.6'] +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + +USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" +USER_AGENT_VERSION="v1" + +import sys +import os +import argparse +import ConfigParser + +try: + import json +except ImportError: + import simplejson as json + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + _ = Provider.GCE +except: + print("GCE inventory script requires libcloud >= 0.13") + sys.exit(1) + + +class GceInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.parse_cli_args() + self.driver = self.get_gce_driver() + + # Just display data for specific host + if self.args.host: + print(self.json_format_dict(self.node_to_dict( + self.get_instance(self.args.host)), + pretty=self.args.pretty)) + sys.exit(0) + + # Otherwise, assume user wants all instances grouped + print(self.json_format_dict(self.group_instances(), + pretty=self.args.pretty)) + sys.exit(0) + + def get_gce_driver(self): + """Determine the GCE authorization settings and return a + libcloud driver. + """ + gce_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "gce.ini") + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'gce_service_account_email_address': '', + 'gce_service_account_pem_file_path': '', + 'gce_project_id': '', + 'libcloud_secrets': '', + }) + if 'gce' not in config.sections(): + config.add_section('gce') + config.read(gce_ini_path) + + # Attempt to get GCE params from a configuration file, if one + # exists. + secrets_path = config.get('gce', 'libcloud_secrets') + secrets_found = False + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify libcloud secrets file as " + err += "/absolute/path/to/secrets.py" + print(err) + sys.exit(1) + sys.path.append(os.path.dirname(secrets_path)) + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + if not secrets_found: + args = [ + config.get('gce','gce_service_account_email_address'), + config.get('gce','gce_service_account_pem_file_path') + ] + kwargs = {'project': config.get('gce', 'gce_project_id')} + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + args[0] = os.environ.get('GCE_EMAIL', args[0]) + args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) + kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + + # Retrieve and return the GCE driver. + gce = get_driver(Provider.GCE)(*args, **kwargs) + gce.connection.user_agent_append( + '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), + ) + return gce + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on GCE') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') + self.args = parser.parse_args() + + + def node_to_dict(self, inst): + md = {} + + if inst is None: + return {} + + if inst.extra['metadata'].has_key('items'): + for entry in inst.extra['metadata']['items']: + md[entry['key']] = entry['value'] + + net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + return { + 'gce_uuid': inst.uuid, + 'gce_id': inst.id, + 'gce_image': inst.image, + 'gce_machine_type': inst.size, + 'gce_private_ip': inst.private_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, + 'gce_name': inst.name, + 'gce_description': inst.extra['description'], + 'gce_status': inst.extra['status'], + 'gce_zone': inst.extra['zone'].name, + 'gce_tags': inst.extra['tags'], + 'gce_metadata': md, + 'gce_network': net, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] + } + + def get_instance(self, instance_name): + '''Gets details about a specific instance ''' + try: + return self.driver.ex_get_node(instance_name) + except Exception as e: + return None + + def group_instances(self): + '''Group all instances''' + groups = {} + meta = {} + meta["hostvars"] = {} + + for node in self.driver.list_nodes(): + name = node.name + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.extra['zone'].name + if groups.has_key(zone): groups[zone].append(name) + else: groups[zone] = [name] + + tags = node.extra['tags'] + for t in tags: + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t + if groups.has_key(tag): groups[tag].append(name) + else: groups[tag] = [name] + + net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] + net = 'network_%s' % net + if groups.has_key(net): groups[net].append(name) + else: groups[net] = [name] + + machine_type = node.size + if groups.has_key(machine_type): groups[machine_type].append(name) + else: groups[machine_type] = [name] + + image = node.image and node.image or 'persistent_disk' + if groups.has_key(image): groups[image].append(name) + else: groups[image] = [name] + + status = node.extra['status'] + stat = 'status_%s' % status.lower() + if groups.has_key(stat): groups[stat].append(name) + else: groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +# Run the script +GceInventory() diff --git a/contrib/inventory/jail.py b/contrib/inventory/jail.py new file mode 100755 index 00000000000000..843812b33cab8e --- /dev/null +++ b/contrib/inventory/jail.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# (c) 2013, Michael Scherer +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from subprocess import Popen,PIPE +import sys +import json + +result = {} +result['all'] = {} + +pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True) +result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] +result['all']['vars'] = {} +result['all']['vars']['ansible_connection'] = 'jail' + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({'ansible_connection': 'jail'})) +else: + print("Need an argument, either --list or --host ") diff --git a/contrib/inventory/landscape.py b/contrib/inventory/landscape.py new file mode 100755 index 00000000000000..4b53171c34eb0f --- /dev/null +++ b/contrib/inventory/landscape.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Canonical's +# Landscape (http://www.ubuntu.com/management/landscape-features). +# +# Requires the `landscape_api` Python module +# See: +# - https://landscape.canonical.com/static/doc/api/api-client-package.html +# - https://landscape.canonical.com/static/doc/api/python-api.html +# +# Environment variables +# --------------------- +# - `LANDSCAPE_API_URI` +# - `LANDSCAPE_API_KEY` +# - `LANDSCAPE_API_SECRET` +# - `LANDSCAPE_API_SSL_CA_FILE` (optional) + + +import argparse +import collections +import os +import sys + +from landscape_api.base import API, HTTPError + +try: + import json +except ImportError: + import simplejson as json + +_key = 'landscape' + + +class EnvironmentConfig(object): + uri = os.getenv('LANDSCAPE_API_URI') + access_key = os.getenv('LANDSCAPE_API_KEY') + secret_key = os.getenv('LANDSCAPE_API_SECRET') + ssl_ca_file = os.getenv('LANDSCAPE_API_SSL_CA_FILE') + + +def _landscape_client(): + env = EnvironmentConfig() + return API( + uri=env.uri, + access_key=env.access_key, + secret_key=env.secret_key, + ssl_ca_file=env.ssl_ca_file) + + +def get_landscape_members_data(): + return _landscape_client().get_computers() + + +def get_nodes(data): + return [node['hostname'] for node in data] + + +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for value in node['tags']: + groups[value].append(node['hostname']) + + return groups + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['hostname']] = {'tags': node['tags']} + return meta + + +def print_list(): + data = get_landscape_members_data() + nodes = get_nodes(data) + groups = get_groups(data) + meta = get_meta(data) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) + + +def print_host(host): + data = get_landscape_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from landscape cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from landscape cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/plugins/inventory/libcloud.ini b/contrib/inventory/libcloud.ini similarity index 100% rename from plugins/inventory/libcloud.ini rename to contrib/inventory/libcloud.ini diff --git a/contrib/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py new file mode 100755 index 00000000000000..cb34d473cdad7b --- /dev/null +++ b/contrib/inventory/libvirt_lxc.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +# (c) 2013, Michael Scherer +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from subprocess import Popen,PIPE +import sys +import json + +result = {} +result['all'] = {} + +pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True) +result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()] +result['all']['vars'] = {} +result['all']['vars']['ansible_connection'] = 'libvirt_lxc' + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({'ansible_connection': 'libvirt_lxc'})) +else: + print("Need an argument, either --list or --host ") diff --git a/plugins/inventory/linode.ini b/contrib/inventory/linode.ini similarity index 100% rename from plugins/inventory/linode.ini rename to contrib/inventory/linode.ini diff --git a/plugins/inventory/linode.py b/contrib/inventory/linode.py similarity index 94% rename from plugins/inventory/linode.py rename to contrib/inventory/linode.py index cbce5f8a693668..0aa7098b316095 100755 --- a/plugins/inventory/linode.py +++ b/contrib/inventory/linode.py @@ -101,7 +101,7 @@ from chube.linode_obj import Linode sys.path = old_path - except Exception, e: + except Exception as e: raise Exception("could not import chube") load_chube_config() @@ -139,7 +139,7 @@ def __init__(self): else: data_to_print = self.json_format_dict(self.inventory, True) - print data_to_print + print(data_to_print) def is_cache_valid(self): """Determines if the cache file has expired, or if it is still valid.""" @@ -184,20 +184,20 @@ def get_nodes(self): try: for node in Linode.search(status=Linode.STATUS_RUNNING): self.add_node(node) - except chube_api.linode_api.ApiError, e: - print "Looks like Linode's API is down:" - print - print e + except chube_api.linode_api.ApiError as e: + print("Looks like Linode's API is down:") + print("") + print(e) sys.exit(1) def get_node(self, linode_id): """Gets details about a specific node.""" try: return Linode.find(api_id=linode_id) - except chube_api.linode_api.ApiError, e: - print "Looks like Linode's API is down:" - print - print e + except chube_api.linode_api.ApiError as e: + print("Looks like Linode's API is down:") + print("") + print(e) sys.exit(1) def populate_datacenter_cache(self): @@ -280,6 +280,11 @@ def get_host_info(self): node_vars["datacenter_city"] = self.get_datacenter_city(node) node_vars["public_ip"] = [addr.address for addr in node.ipaddresses if addr.is_public][0] + # Set the SSH host information, so these inventory items can be used if + # their labels aren't FQDNs + node_vars['ansible_ssh_host'] = node_vars["public_ip"] + node_vars['ansible_host'] = node_vars["public_ip"] + private_ips = [addr.address for addr in node.ipaddresses if not addr.is_public] if private_ips: diff --git a/contrib/inventory/nagios_ndo.ini b/contrib/inventory/nagios_ndo.ini new file mode 100644 index 00000000000000..1e133a29f39a1e --- /dev/null +++ b/contrib/inventory/nagios_ndo.ini @@ -0,0 +1,10 @@ +# Ansible Nagios external inventory script settings +# + +[ndo] +# NDO database URI +# Make sure that data is returned as strings and not bytes if using python 3. +# See http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html +# for supported databases and URI format. +# Example for mysqlclient module : +database_uri=mysql+mysqldb://user:passwd@hostname/ndo?charset=utf8&use_unicode=1 diff --git a/contrib/inventory/nagios_ndo.py b/contrib/inventory/nagios_ndo.py new file mode 100755 index 00000000000000..49ec56392aab3f --- /dev/null +++ b/contrib/inventory/nagios_ndo.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python + +# (c) 2014, Jonathan Lestrelin +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +Nagios NDO external inventory script. +======================================== + +Returns hosts and hostgroups from Nagios NDO. + +Configuration is read from `nagios_ndo.ini`. +""" + +import os +import argparse +try: + import configparser +except ImportError: + import ConfigParser + configparser = ConfigParser +import json + +try: + from sqlalchemy import text + from sqlalchemy.engine import create_engine +except ImportError: + print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") + exit(1) + +class NagiosNDOInventory(object): + + def read_settings(self): + config = configparser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini') + if config.has_option('ndo', 'database_uri'): + self.ndo_database_uri = config.get('ndo', 'database_uri') + + def read_cli(self): + parser = argparse.ArgumentParser() + parser.add_argument('--host', nargs=1) + parser.add_argument('--list', action='store_true') + self.options = parser.parse_args() + + def get_hosts(self): + engine = create_engine(self.ndo_database_uri) + connection = engine.connect() + select_hosts = text("SELECT display_name \ + FROM nagios_hosts") + select_hostgroups = text("SELECT alias \ + FROM nagios_hostgroups") + select_hostgroup_hosts = text("SELECT h.display_name \ + FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \ + WHERE hgm.hostgroup_id = hg.hostgroup_id \ + AND hgm.host_object_id = h.host_object_id \ + AND hg.alias =:hostgroup_alias") + + hosts = connection.execute(select_hosts) + self.result['all']['hosts'] = [host['display_name'] for host in hosts] + + for hostgroup in connection.execute(select_hostgroups): + hostgroup_alias = hostgroup['alias'] + self.result[hostgroup_alias] = {} + hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias) + self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts] + + def __init__(self): + + self.defaultgroup = 'group_all' + self.ndo_database_uri = None + self.options = None + + self.read_settings() + self.read_cli() + + self.result = {} + self.result['all'] = {} + self.result['all']['hosts'] = [] + self.result['_meta'] = {} + self.result['_meta']['hostvars'] = {} + + if self.ndo_database_uri: + self.get_hosts() + if self.options.host: + print(json.dumps({})) + elif self.options.list: + print(json.dumps(self.result)) + else: + print("usage: --list or --host HOSTNAME") + exit(1) + else: + print("Error: Database configuration is missing. See nagios_ndo.ini.") + exit(1) + +NagiosNDOInventory() diff --git a/contrib/inventory/nova.ini b/contrib/inventory/nova.ini new file mode 100644 index 00000000000000..c5cfeef8104efc --- /dev/null +++ b/contrib/inventory/nova.ini @@ -0,0 +1,48 @@ +# Ansible OpenStack external inventory script +# DEPRECATED: please use openstack.py inventory which is configured for +# auth using the os-client-config library and either clouds.yaml or standard +# openstack environment variables + +[openstack] + +#------------------------------------------------------------------------- +# Required settings +#------------------------------------------------------------------------- + +# API version +version = 2 + +# OpenStack nova username +username = + +# OpenStack nova api_key or password +api_key = + +# OpenStack nova auth_url +auth_url = + +# OpenStack nova project_id or tenant name +project_id = + +#------------------------------------------------------------------------- +# Optional settings +#------------------------------------------------------------------------- + +# Authentication system +# auth_system = keystone + +# Serverarm region name to use +# region_name = + +# Specify a preference for public or private IPs (public is default) +# prefer_private = False + +# What service type (required for newer nova client) +# service_type = compute + + +# TODO: Some other options +# insecure = +# endpoint_type = +# extensions = +# service_name = diff --git a/contrib/inventory/nova.py b/contrib/inventory/nova.py new file mode 100755 index 00000000000000..e8f3b9a62602f7 --- /dev/null +++ b/contrib/inventory/nova.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python + +# (c) 2012, Marco Vito Moscaritolo +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WARNING: This file is deprecated. New work should focus on the openstack.py +# inventory module, which properly handles multiple clouds as well as keystone +# v3 and keystone auth plugins + +import sys +import re +import os +import ConfigParser +from novaclient import client as nova_client +from six import iteritems + +try: + import json +except ImportError: + import simplejson as json + + +sys.stderr.write("WARNING: this inventory module is deprecated. please migrate usage to openstack.py\n") + +################################################### +# executed with no parameters, return the list of +# all groups and hosts + +NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini", + os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")), + "/etc/ansible/nova.ini"] + +NOVA_DEFAULTS = { + 'auth_system': None, + 'region_name': None, + 'service_type': 'compute', +} + + +def nova_load_config_file(): + p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS) + + for path in NOVA_CONFIG_FILES: + if os.path.exists(path): + p.read(path) + return p + + return None + + +def get_fallback(config, value, section="openstack"): + """ + Get value from config object and return the value + or false + """ + try: + return config.get(section, value) + except ConfigParser.NoOptionError: + return False + + +def push(data, key, element): + """ + Assist in items to a dictionary of lists + """ + if (not element) or (not key): + return + + if key in data: + data[key].append(element) + else: + data[key] = [element] + + +def to_safe(word): + ''' + Converts 'bad' characters in a string to underscores so they can + be used as Ansible groups + ''' + return re.sub(r"[^A-Za-z0-9\-]", "_", word) + + +def get_ips(server, access_ip=True): + """ + Returns a list of the server's IPs, or the preferred + access IP + """ + private = [] + public = [] + address_list = [] + # Iterate through each servers network(s), get addresses and get type + addresses = getattr(server, 'addresses', {}) + if len(addresses) > 0: + for network in addresses.itervalues(): + for address in network: + if address.get('OS-EXT-IPS:type', False) == 'fixed': + private.append(address['addr']) + elif address.get('OS-EXT-IPS:type', False) == 'floating': + public.append(address['addr']) + + if not access_ip: + address_list.append(server.accessIPv4) + address_list.extend(private) + address_list.extend(public) + return address_list + + access_ip = None + # Append group to list + if server.accessIPv4: + access_ip = server.accessIPv4 + if (not access_ip) and public and not (private and prefer_private): + access_ip = public[0] + if private and not access_ip: + access_ip = private[0] + + return access_ip + + +def get_metadata(server): + """Returns dictionary of all host metadata""" + get_ips(server, False) + results = {} + for key in vars(server): + # Extract value + value = getattr(server, key) + + # Generate sanitized key + key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower() + + # Att value to instance result (exclude manager class) + #TODO: maybe use value.__class__ or similar inside of key_name + if key != 'os_manager': + results[key] = value + return results + +config = nova_load_config_file() +if not config: + sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES)) + +# Load up connections info based on config and then environment +# variables +username = (get_fallback(config, 'username') or + os.environ.get('OS_USERNAME', None)) +api_key = (get_fallback(config, 'api_key') or + os.environ.get('OS_PASSWORD', None)) +auth_url = (get_fallback(config, 'auth_url') or + os.environ.get('OS_AUTH_URL', None)) +project_id = (get_fallback(config, 'project_id') or + os.environ.get('OS_TENANT_NAME', None)) +region_name = (get_fallback(config, 'region_name') or + os.environ.get('OS_REGION_NAME', None)) +auth_system = (get_fallback(config, 'auth_system') or + os.environ.get('OS_AUTH_SYSTEM', None)) + +# Determine what type of IP is preferred to return +prefer_private = False +try: + prefer_private = config.getboolean('openstack', 'prefer_private') +except ConfigParser.NoOptionError: + pass + +client = nova_client.Client( + version=config.get('openstack', 'version'), + username=username, + api_key=api_key, + auth_url=auth_url, + region_name=region_name, + project_id=project_id, + auth_system=auth_system, + service_type=config.get('openstack', 'service_type'), +) + +# Default or added list option +if (len(sys.argv) == 2 and sys.argv[1] == '--list') or len(sys.argv) == 1: + groups = {'_meta': {'hostvars': {}}} + # Cycle on servers + for server in client.servers.list(): + access_ip = get_ips(server) + + # Push to name group of 1 + push(groups, server.name, access_ip) + + # Run through each metadata item and add instance to it + for key, value in iteritems(server.metadata): + composed_key = to_safe('tag_{0}_{1}'.format(key, value)) + push(groups, composed_key, access_ip) + + # Do special handling of group for backwards compat + # inventory groups + group = server.metadata['group'] if 'group' in server.metadata else 'undefined' + push(groups, group, access_ip) + + # Add vars to _meta key for performance optimization in + # Ansible 1.3+ + groups['_meta']['hostvars'][access_ip] = get_metadata(server) + + # Return server list + print(json.dumps(groups, sort_keys=True, indent=2)) + sys.exit(0) + +##################################################### +# executed with a hostname as a parameter, return the +# variables for that host + +elif len(sys.argv) == 3 and (sys.argv[1] == '--host'): + results = {} + ips = [] + for server in client.servers.list(): + if sys.argv[2] in (get_ips(server) or []): + results = get_metadata(server) + print(json.dumps(results, sort_keys=True, indent=2)) + sys.exit(0) + +else: + print("usage: --list ..OR.. --host ") + sys.exit(1) diff --git a/contrib/inventory/nsot.py b/contrib/inventory/nsot.py new file mode 100644 index 00000000000000..0ca1625df3773a --- /dev/null +++ b/contrib/inventory/nsot.py @@ -0,0 +1,341 @@ +#!/bin/env python + +''' +nsot +==== + +Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox + +Features +-------- + +* Define host groups in form of NSoT device attribute criteria + +* All parameters defined by the spec as of 2015-09-05 are supported. + + + ``--list``: Returns JSON hash of host groups -> hosts and top-level + ``_meta`` -> ``hostvars`` which correspond to all device attributes. + + Group vars can be specified in the YAML configuration, noted below. + + + ``--host ``: Returns JSON hash where every item is a device + attribute. + +* In addition to all attributes assigned to resource being returned, script + will also append ``site_id`` and ``id`` as facts to utilize. + + +Confguration +------------ + +Since it'd be annoying and failure prone to guess where you're configuration +file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it. + +This file should adhere to the YAML spec. All top-level variable must be +desired Ansible group-name hashed with single 'query' item to define the NSoT +attribute query. + +Queries follow the normal NSoT query syntax, `shown here`_ + +.. _shown here: https://github.com/dropbox/pynsot#set-queries + +.. code:: yaml + + routers: + query: 'deviceType=ROUTER' + vars: + a: b + c: d + + juniper_fw: + query: 'deviceType=FIREWALL manufacturer=JUNIPER' + + not_f10: + query: '-manufacturer=FORCE10' + +The inventory will automatically use your ``.pynsotrc`` like normal pynsot from +cli would, so make sure that's configured appropriately. + +.. note:: + + Attributes I'm showing above are influenced from ones that the Trigger + project likes. As is the spirit of NSoT, use whichever attributes work best + for your workflow. + +If config file is blank or absent, the following default groups will be +created: + +* ``routers``: deviceType=ROUTER +* ``switches``: deviceType=SWITCH +* ``firewalls``: deviceType=FIREWALL + +These are likely not useful for everyone so please use the configuration. :) + +.. note:: + + By default, resources will only be returned for what your default + site is set for in your ``~/.pynsotrc``. + + If you want to specify, add an extra key under the group for ``site: n``. + +Output Examples +--------------- + +Here are some examples shown from just calling the command directly:: + + $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.' + { + "routers": { + "hosts": [ + "test1.example.com" + ], + "vars": { + "cool_level": "very", + "group": "routers" + } + }, + "firewalls": { + "hosts": [ + "test2.example.com" + ], + "vars": { + "cool_level": "enough", + "group": "firewalls" + } + }, + "_meta": { + "hostvars": { + "test2.example.com": { + "make": "SRX", + "site_id": 1, + "id": 108 + }, + "test1.example.com": { + "make": "MX80", + "site_id": 1, + "id": 107 + } + } + }, + "rtr_and_fw": { + "hosts": [ + "test1.example.com", + "test2.example.com" + ], + "vars": {} + } + } + + + $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.' + { + "make": "MX80", + "site_id": 1, + "id": 107 + } + +''' + +from __future__ import print_function +import sys +import os +import pkg_resources +import argparse +import json +import yaml +from textwrap import dedent +from pynsot.client import get_api_client +from pynsot.app import HttpServerError +from click.exceptions import UsageError + + +def warning(*objs): + print("WARNING: ", *objs, file=sys.stderr) + + +class NSoTInventory(object): + '''NSoT Client object for gather inventory''' + + def __init__(self): + self.config = dict() + config_env = os.environ.get('NSOT_INVENTORY_CONFIG') + if config_env: + try: + config_file = os.path.abspath(config_env) + except IOError: # If file non-existent, use default config + self._config_default() + except Exception as e: + sys.exit('%s\n' % e) + + with open(config_file) as f: + try: + self.config.update(yaml.safe_load(f)) + except TypeError: # If empty file, use default config + warning('Empty config file') + self._config_default() + except Exception as e: + sys.exit('%s\n' % e) + else: # Use defaults if env var missing + self._config_default() + self.groups = self.config.keys() + self.client = get_api_client() + self._meta = {'hostvars': dict()} + + def _config_default(self): + default_yaml = ''' + --- + routers: + query: deviceType=ROUTER + switches: + query: deviceType=SWITCH + firewalls: + query: deviceType=FIREWALL + ''' + self.config = yaml.safe_load(dedent(default_yaml)) + + def do_list(self): + '''Direct callback for when ``--list`` is provided + + Relies on the configuration generated from init to run + _inventory_group() + ''' + inventory = dict() + for group, contents in self.config.iteritems(): + group_response = self._inventory_group(group, contents) + inventory.update(group_response) + inventory.update({'_meta': self._meta}) + return json.dumps(inventory) + + def do_host(self, host): + return json.dumps(self._hostvars(host)) + + def _hostvars(self, host): + '''Return dictionary of all device attributes + + Depending on number of devices in NSoT, could be rather slow since this + has to request every device resource to filter through + ''' + device = [i for i in self.client.devices.get()['data']['devices'] + if host in i['hostname']][0] + attributes = device['attributes'] + attributes.update({'site_id': device['site_id'], 'id': device['id']}) + return attributes + + def _inventory_group(self, group, contents): + '''Takes a group and returns inventory for it as dict + + :param group: Group name + :type group: str + :param contents: The contents of the group's YAML config + :type contents: dict + + contents param should look like:: + + { + 'query': 'xx', + 'vars': + 'a': 'b' + } + + Will return something like:: + + { group: { + hosts: [], + vars: {}, + } + ''' + query = contents.get('query') + hostvars = contents.get('vars', dict()) + site = contents.get('site', dict()) + obj = {group: dict()} + obj[group]['hosts'] = [] + obj[group]['vars'] = hostvars + try: + assert isinstance(query, basestring) + except: + sys.exit('ERR: Group queries must be a single string\n' + ' Group: %s\n' + ' Query: %s\n' % (group, query) + ) + try: + if site: + site = self.client.sites(site) + devices = site.devices.query.get(query=query) + else: + devices = self.client.devices.query.get(query=query) + except HttpServerError as e: + if '500' in str(e.response): + _site = 'Correct site id?' + _attr = 'Queried attributes actually exist?' + questions = _site + '\n' + _attr + sys.exit('ERR: 500 from server.\n%s' % questions) + else: + raise + except UsageError: + sys.exit('ERR: Could not connect to server. Running?') + + # Would do a list comprehension here, but would like to save code/time + # and also acquire attributes in this step + for host in devices['data']['devices']: + # Iterate through each device that matches query, assign hostname + # to the group's hosts array and then use this single iteration as + # a chance to update self._meta which will be used in the final + # return + hostname = host['hostname'] + obj[group]['hosts'].append(hostname) + attributes = host['attributes'] + attributes.update({'site_id': host['site_id'], 'id': host['id']}) + self._meta['hostvars'].update({hostname: attributes}) + + return obj + + +def parse_args(): + desc = __doc__.splitlines()[4] # Just to avoid being redundant + + # Establish parser with options and error out if no action provided + parser = argparse.ArgumentParser( + description=desc, + conflict_handler='resolve', + ) + + # Arguments + # + # Currently accepting (--list | -l) and (--host | -h) + # These must not be allowed together + parser.add_argument( + '--list', '-l', + help='Print JSON object containing hosts to STDOUT', + action='store_true', + dest='list_', # Avoiding syntax highlighting for list + ) + + parser.add_argument( + '--host', '-h', + help='Print JSON object containing hostvars for ', + action='store', + ) + args = parser.parse_args() + + if not args.list_ and not args.host: # Require at least one option + parser.exit(status=1, message='No action requested') + + if args.list_ and args.host: # Do not allow multiple options + parser.exit(status=1, message='Too many actions requested') + + return args + + +def main(): + '''Set up argument handling and callback routing''' + args = parse_args() + client = NSoTInventory() + + # Callback condition + if args.list_: + print(client.do_list()) + elif args.host: + print(client.do_host(args.host)) + +if __name__ == '__main__': + main() diff --git a/contrib/inventory/nsot.yaml b/contrib/inventory/nsot.yaml new file mode 100644 index 00000000000000..ebddbc823464f8 --- /dev/null +++ b/contrib/inventory/nsot.yaml @@ -0,0 +1,22 @@ +--- +juniper_routers: + query: 'deviceType=ROUTER manufacturer=JUNIPER' + vars: + group: juniper_routers + netconf: true + os: junos + +cisco_asa: + query: 'manufacturer=CISCO deviceType=FIREWALL' + vars: + group: cisco_asa + routed_vpn: false + stateful: true + +old_cisco_asa: + query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+' + vars: + old_nat: true + +not_f10: + query: '-manufacturer=FORCE10' diff --git a/plugins/inventory/openshift.py b/contrib/inventory/openshift.py similarity index 76% rename from plugins/inventory/openshift.py rename to contrib/inventory/openshift.py index c6acb6ff8c4c6e..67d37a7330a15e 100755 --- a/plugins/inventory/openshift.py +++ b/contrib/inventory/openshift.py @@ -28,7 +28,6 @@ author: Michael Scherer ''' -import urllib2 try: import json except ImportError: @@ -39,6 +38,8 @@ import ConfigParser import StringIO +from ansible.module_utils.urls import open_url + configparser = None @@ -61,39 +62,26 @@ def get_config(env_var, config_var): if not result: result = get_from_rhc_config(config_var) if not result: - print "failed=True msg='missing %s'" % env_var + print("failed=True msg='missing %s'" % env_var) sys.exit(1) return result -def get_json_from_api(url): - req = urllib2.Request(url, None, {'Accept': 'application/json; version=1.5'}) - response = urllib2.urlopen(req) +def get_json_from_api(url, username, password): + headers = {'Accept': 'application/json; version=1.5'} + response = open_url(url, headers=headers, url_username=username, url_password=password) return json.loads(response.read())['data'] -def passwd_setup(top_level_url, username, password): - # create a password manager - password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() - password_mgr.add_password(None, top_level_url, username, password) - - handler = urllib2.HTTPBasicAuthHandler(password_mgr) - opener = urllib2.build_opener(handler) - - urllib2.install_opener(opener) - - username = get_config('ANSIBLE_OPENSHIFT_USERNAME', 'default_rhlogin') password = get_config('ANSIBLE_OPENSHIFT_PASSWORD', 'password') broker_url = 'https://%s/broker/rest/' % get_config('ANSIBLE_OPENSHIFT_BROKER', 'libra_server') -passwd_setup(broker_url, username, password) - -response = get_json_from_api(broker_url + '/domains') +response = get_json_from_api(broker_url + '/domains', username, password) response = get_json_from_api("%s/domains/%s/applications" % - (broker_url, response[0]['id'])) + (broker_url, response[0]['id']), username, password) result = {} for app in response: @@ -109,8 +97,8 @@ def passwd_setup(top_level_url, username, password): result[app_name]['vars']['ansible_ssh_user'] = user if len(sys.argv) == 2 and sys.argv[1] == '--list': - print json.dumps(result) + print(json.dumps(result)) elif len(sys.argv) == 3 and sys.argv[1] == '--host': - print json.dumps({}) + print(json.dumps({})) else: - print "Need an argument, either --list or --host " + print("Need an argument, either --list or --host ") diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py new file mode 100755 index 00000000000000..b82a042c29e16e --- /dev/null +++ b/contrib/inventory/openstack.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python + +# Copyright (c) 2012, Marco Vito Moscaritolo +# Copyright (c) 2013, Jesse Keating +# Copyright (c) 2015, Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +# The OpenStack Inventory module uses os-client-config for configuration. +# https://github.com/stackforge/os-client-config +# This means it will either: +# - Respect normal OS_* environment variables like other OpenStack tools +# - Read values from a clouds.yaml file. +# If you want to configure via clouds.yaml, you can put the file in: +# - Current directory +# - ~/.config/openstack/clouds.yaml +# - /etc/openstack/clouds.yaml +# - /etc/ansible/openstack.yml +# The clouds.yaml file can contain entries for multiple clouds and multiple +# regions of those clouds. If it does, this inventory module will connect to +# all of them and present them as one contiguous inventory. +# +# See the adjacent openstack.yml file for an example config file +# There are two ansible inventory specific options that can be set in +# the inventory section. +# expand_hostvars controls whether or not the inventory will make extra API +# calls to fill out additional information about each server +# use_hostnames changes the behavior from registering every host with its UUID +# and making a group of its hostname to only doing this if the +# hostname in question has more than one server + +import argparse +import collections +import os +import sys +import time + +try: + import json +except: + import simplejson as json + +import os_client_config +import shade +import shade.inventory + +CONFIG_FILES = ['/etc/ansible/openstack.yaml'] + + +def get_groups_from_server(server_vars, namegroup=True): + groups = [] + + region = server_vars['region'] + cloud = server_vars['cloud'] + metadata = server_vars.get('metadata', {}) + + # Create a group for the cloud + groups.append(cloud) + + # Create a group on region + groups.append(region) + + # And one by cloud_region + groups.append("%s_%s" % (cloud, region)) + + # Check if group metadata key in servers' metadata + if 'group' in metadata: + groups.append(metadata['group']) + + for extra_group in metadata.get('groups', '').split(','): + if extra_group: + groups.append(extra_group) + + groups.append('instance-%s' % server_vars['id']) + if namegroup: + groups.append(server_vars['name']) + + for key in ('flavor', 'image'): + if 'name' in server_vars[key]: + groups.append('%s-%s' % (key, server_vars[key]['name'])) + + for key, value in iter(metadata.items()): + groups.append('meta-%s_%s' % (key, value)) + + az = server_vars.get('az', None) + if az: + # Make groups for az, region_az and cloud_region_az + groups.append(az) + groups.append('%s_%s' % (region, az)) + groups.append('%s_%s_%s' % (cloud, region, az)) + return groups + + +def get_host_groups(inventory, refresh=False): + (cache_file, cache_expiration_time) = get_cache_settings() + if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): + groups = to_json(get_host_groups_from_cloud(inventory)) + open(cache_file, 'w').write(groups) + else: + groups = open(cache_file, 'r').read() + return groups + + +def get_host_groups_from_cloud(inventory): + groups = collections.defaultdict(list) + firstpass = collections.defaultdict(list) + hostvars = {} + list_args = {} + if hasattr(inventory, 'extra_config'): + use_hostnames = inventory.extra_config['use_hostnames'] + list_args['expand'] = inventory.extra_config['expand_hostvars'] + else: + use_hostnames = False + + for server in inventory.list_hosts(**list_args): + + if 'interface_ip' not in server: + continue + firstpass[server['name']].append(server) + for name, servers in firstpass.items(): + if len(servers) == 1 and use_hostnames: + server = servers[0] + hostvars[name] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=False): + groups[group].append(server['name']) + else: + for server in servers: + server_id = server['id'] + hostvars[server_id] = dict( + ansible_ssh_host=server['interface_ip'], + openstack=server) + for group in get_groups_from_server(server, namegroup=True): + groups[group].append(server_id) + groups['_meta'] = {'hostvars': hostvars} + return groups + + +def is_cache_stale(cache_file, cache_expiration_time, refresh=False): + ''' Determines if cache file has expired, or if it is still valid ''' + if refresh: + return True + if os.path.isfile(cache_file): + mod_time = os.path.getmtime(cache_file) + current_time = time.time() + if (mod_time + cache_expiration_time) > current_time: + return False + return True + + +def get_cache_settings(): + config = os_client_config.config.OpenStackConfig( + config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) + # For inventory-wide caching + cache_expiration_time = config.get_cache_expiration_time() + cache_path = config.get_cache_path() + if not os.path.exists(cache_path): + os.makedirs(cache_path) + cache_file = os.path.join(cache_path, 'ansible-inventory.cache') + return (cache_file, cache_expiration_time) + + +def to_json(in_dict): + return json.dumps(in_dict, sort_keys=True, indent=2) + + +def parse_args(): + parser = argparse.ArgumentParser(description='OpenStack Inventory Module') + parser.add_argument('--private', + action='store_true', + help='Use private address for ansible host') + parser.add_argument('--refresh', action='store_true', + help='Refresh cached information') + parser.add_argument('--debug', action='store_true', default=False, + help='Enable debug output') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specific host') + + return parser.parse_args() + + +def main(): + args = parse_args() + try: + config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES + shade.simple_logging(debug=args.debug) + inventory_args = dict( + refresh=args.refresh, + config_files=config_files, + private=args.private, + ) + if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): + inventory_args.update(dict( + config_key='ansible', + config_defaults={ + 'use_hostnames': False, + 'expand_hostvars': True, + } + )) + + inventory = shade.inventory.OpenStackInventory(**inventory_args) + + if args.list: + output = get_host_groups(inventory, refresh=args.refresh) + elif args.host: + output = to_json(inventory.get_host(args.host)) + print(output) + except shade.OpenStackCloudException as e: + sys.stderr.write('%s\n' % e.message) + sys.exit(1) + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/plugins/inventory/openstack.yml b/contrib/inventory/openstack.yml similarity index 92% rename from plugins/inventory/openstack.yml rename to contrib/inventory/openstack.yml index a99bb020580754..1520e2937ec966 100644 --- a/plugins/inventory/openstack.yml +++ b/contrib/inventory/openstack.yml @@ -26,3 +26,6 @@ clouds: username: stack password: stack project_name: stack +ansible: + use_hostnames: True + expand_hostvars: False diff --git a/contrib/inventory/openvz.py b/contrib/inventory/openvz.py new file mode 100755 index 00000000000000..68d51c8a42e81f --- /dev/null +++ b/contrib/inventory/openvz.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# openvz.py +# +# Copyright 2014 jordonr +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +# Inspired by libvirt_lxc.py inventory script +# https://github.com/ansible/ansible/blob/e5ef0eca03cbb6c8950c06dc50d0ca22aa8902f4/plugins/inventory/libvirt_lxc.py +# +# Groups are determined by the description field of openvz guests +# multiple groups can be separated by commas: webserver,dbserver + +from subprocess import Popen,PIPE +import sys +import json + + +#List openvz hosts +vzhosts = ['vzhost1','vzhost2','vzhost3'] +#Add openvz hosts to the inventory and Add "_meta" trick +inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} +#default group, when description not defined +default_group = ['vzguest'] + +def get_guests(): + #Loop through vzhosts + for h in vzhosts: + #SSH to vzhost and get the list of guests in json + pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True) + + #Load Json info of guests + json_data = json.loads(pipe.stdout.read()) + + #loop through guests + for j in json_data: + #Add information to host vars + inventory['_meta']['hostvars'][j['hostname']] = {'ctid': j['ctid'], 'veid': j['veid'], 'vpsid': j['vpsid'], 'private_path': j['private'], 'root_path': j['root'], 'ip': j['ip']} + + #determine group from guest description + if j['description'] is not None: + groups = j['description'].split(",") + else: + groups = default_group + + #add guest to inventory + for g in groups: + if g not in inventory: + inventory[g] = {'hosts': []} + + inventory[g]['hosts'].append(j['hostname']) + + return inventory + + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + inv_json = get_guests() + print(json.dumps(inv_json, sort_keys=True)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({})) +else: + print("Need an argument, either --list or --host ") diff --git a/contrib/inventory/ovirt.ini b/contrib/inventory/ovirt.ini new file mode 100644 index 00000000000000..a52f9d63ff5ba6 --- /dev/null +++ b/contrib/inventory/ovirt.ini @@ -0,0 +1,33 @@ +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +# Author: Josha Inglis based on the gce.ini by Eric Johnson + +[ovirt] +# ovirt Service Account configuration information can be stored in the +# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already +# exist in your PYTHONPATH and be picked up automatically with an import +# statement in the inventory script. However, you can specify an absolute +# path to the secrets.py file with 'libcloud_secrets' parameter. +ovirt_api_secrets = + +# If you are not going to use a 'secrets.py' file, you can set the necessary +# authorization parameters here. +ovirt_url = +ovirt_username = +ovirt_password = diff --git a/contrib/inventory/ovirt.py b/contrib/inventory/ovirt.py new file mode 100755 index 00000000000000..23646fa2068d50 --- /dev/null +++ b/contrib/inventory/ovirt.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python +# Copyright 2015 IIX Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +ovirt external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests to +oVirt via the ovirt-engine-sdk-python library. + +When run against a specific host, this script returns the following variables +based on the data obtained from the ovirt_sdk Node object: + - ovirt_uuid + - ovirt_id + - ovirt_image + - ovirt_machine_type + - ovirt_ips + - ovirt_name + - ovirt_description + - ovirt_status + - ovirt_zone + - ovirt_tags + - ovirt_stats + +When run in --list mode, instances are grouped by the following categories: + + - zone: + zone group name. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - running status: + group name prefixed with 'status_' (e.g. status_up, status_down,..) + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i ovirt.py us-central1-a -m shell -a "/bin/uname -a" + + Use the ovirt inventory script to print out instance specific information + $ contrib/inventory/ovirt.py --host my_instance + +Author: Josha Inglis based on the gce.py by Eric Johnson +Version: 0.0.1 +""" + +USER_AGENT_PRODUCT = "Ansible-ovirt_inventory_plugin" +USER_AGENT_VERSION = "v1" + +import sys +import os +import argparse +import ConfigParser +from collections import defaultdict + +try: + import json +except ImportError: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import simplejson as json + +try: + # noinspection PyUnresolvedReferences + from ovirtsdk.api import API + # noinspection PyUnresolvedReferences + from ovirtsdk.xml import params +except ImportError: + print("ovirt inventory script requires ovirt-engine-sdk-python") + sys.exit(1) + + +class OVirtInventory(object): + def __init__(self): + # Read settings and parse CLI arguments + self.args = self.parse_cli_args() + self.driver = self.get_ovirt_driver() + + # Just display data for specific host + if self.args.host: + print(self.json_format_dict( + self.node_to_dict(self.get_instance(self.args.host)), + pretty=self.args.pretty + )) + sys.exit(0) + + # Otherwise, assume user wants all instances grouped + print( + self.json_format_dict( + data=self.group_instances(), + pretty=self.args.pretty + ) + ) + sys.exit(0) + + @staticmethod + def get_ovirt_driver(): + """ + Determine the ovirt authorization settings and return a ovirt_sdk driver. + + :rtype : ovirtsdk.api.API + """ + kwargs = {} + + ovirt_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "ovirt.ini") + ovirt_ini_path = os.environ.get('OVIRT_INI_PATH', ovirt_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = ConfigParser.SafeConfigParser(defaults={ + 'ovirt_url': '', + 'ovirt_username': '', + 'ovirt_password': '', + 'ovirt_api_secrets': '', + }) + if 'ovirt' not in config.sections(): + config.add_section('ovirt') + config.read(ovirt_ini_path) + + # Attempt to get ovirt params from a configuration file, if one + # exists. + secrets_path = config.get('ovirt', 'ovirt_api_secrets') + secrets_found = False + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + secrets_found = True + except ImportError: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify ovirt_sdk secrets file as /absolute/path/to/secrets.py" + print(err) + sys.exit(1) + sys.path.append(os.path.dirname(secrets_path)) + try: + # noinspection PyUnresolvedReferences,PyPackageRequirements + import secrets + + kwargs = getattr(secrets, 'OVIRT_KEYWORD_PARAMS', {}) + except ImportError: + pass + if not secrets_found: + kwargs = { + 'url': config.get('ovirt', 'ovirt_url'), + 'username': config.get('ovirt', 'ovirt_username'), + 'password': config.get('ovirt', 'ovirt_password'), + } + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + kwargs['url'] = os.environ.get('OVIRT_URL') + kwargs['username'] = os.environ.get('OVIRT_EMAIL') + kwargs['password'] = os.environ.get('OVIRT_PASS') + + # Retrieve and return the ovirt driver. + return API(insecure=True, **kwargs) + + @staticmethod + def parse_cli_args(): + """ + Command line argument processing + + :rtype : argparse.Namespace + """ + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on ovirt') + parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') + parser.add_argument('--host', action='store', help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, help='Pretty format (default: False)') + return parser.parse_args() + + def node_to_dict(self, inst): + """ + :type inst: params.VM + """ + if inst is None: + return {} + + inst.get_custom_properties() + ips = [ip.get_address() for ip in inst.get_guest_info().get_ips().get_ip()] \ + if inst.get_guest_info() is not None else [] + stats = {} + for stat in inst.get_statistics().list(): + stats[stat.get_name()] = stat.get_values().get_value()[0].get_datum() + + return { + 'ovirt_uuid': inst.get_id(), + 'ovirt_id': inst.get_id(), + 'ovirt_image': inst.get_os().get_type(), + 'ovirt_machine_type': inst.get_instance_type(), + 'ovirt_ips': ips, + 'ovirt_name': inst.get_name(), + 'ovirt_description': inst.get_description(), + 'ovirt_status': inst.get_status().get_state(), + 'ovirt_zone': inst.get_cluster().get_id(), + 'ovirt_tags': self.get_tags(inst), + 'ovirt_stats': stats, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': ips[0] if len(ips) > 0 else None + } + + @staticmethod + def get_tags(inst): + """ + :type inst: params.VM + """ + return [x.get_name() for x in inst.get_tags().list()] + + # noinspection PyBroadException,PyUnusedLocal + def get_instance(self, instance_name): + """Gets details about a specific instance """ + try: + return self.driver.vms.get(name=instance_name) + except Exception as e: + return None + + def group_instances(self): + """Group all instances""" + groups = defaultdict(list) + meta = {"hostvars": {}} + + for node in self.driver.vms.list(): + assert isinstance(node, params.VM) + name = node.get_name() + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.get_cluster().get_name() + groups[zone].append(name) + + tags = self.get_tags(node) + for t in tags: + tag = 'tag_%s' % t + groups[tag].append(name) + + nets = [x.get_name() for x in node.get_nics().list()] + for net in nets: + net = 'network_%s' % net + groups[net].append(name) + + status = node.get_status().get_state() + stat = 'status_%s' % status.lower() + if stat in groups: + groups[stat].append(name) + else: + groups[stat] = [name] + + groups["_meta"] = meta + + return groups + + @staticmethod + def json_format_dict(data, pretty=False): + """ Converts a dict to a JSON object and dumps it as a formatted + string """ + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + +# Run the script +OVirtInventory() diff --git a/contrib/inventory/proxmox.py b/contrib/inventory/proxmox.py new file mode 100755 index 00000000000000..c0ffb0b16c64c3 --- /dev/null +++ b/contrib/inventory/proxmox.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python + +# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Updated 2016 by Matt Harris +# +# Added support for Proxmox VE 4.x +# Added support for using the Notes field of a VM to define groups and variables: +# A well-formatted JSON object in the Notes field will be added to the _meta +# section for that VM. In addition, the "groups" key of this JSON object may be +# used to specify group membership: +# +# { "groups": ["utility", "databases"], "a": false, "b": true } + +import urllib +try: + import json +except ImportError: + import simplejson as json +import os +import sys +from optparse import OptionParser + +from six import iteritems + +from ansible.module_utils.urls import open_url + +class ProxmoxNodeList(list): + def get_names(self): + return [node['node'] for node in self] + +class ProxmoxVM(dict): + def get_variables(self): + variables = {} + for key, value in iteritems(self): + variables['proxmox_' + key] = value + return variables + +class ProxmoxVMList(list): + def __init__(self, data=[]): + for item in data: + self.append(ProxmoxVM(item)) + + def get_names(self): + return [vm['name'] for vm in self if vm['template'] != 1] + + def get_by_name(self, name): + results = [vm for vm in self if vm['name'] == name] + return results[0] if len(results) > 0 else None + + def get_variables(self): + variables = {} + for vm in self: + variables[vm['name']] = vm.get_variables() + + return variables + +class ProxmoxPoolList(list): + def get_names(self): + return [pool['poolid'] for pool in self] + +class ProxmoxPool(dict): + def get_members_name(self): + return [member['name'] for member in self['members'] if member['template'] != 1] + +class ProxmoxAPI(object): + def __init__(self, options): + self.options = options + self.credentials = None + + if not options.url: + raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).') + elif not options.username: + raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).') + elif not options.password: + raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).') + + def auth(self): + request_path = '{}api2/json/access/ticket'.format(self.options.url) + + request_params = urllib.urlencode({ + 'username': self.options.username, + 'password': self.options.password, + }) + + data = json.load(open_url(request_path, data=request_params)) + + self.credentials = { + 'ticket': data['data']['ticket'], + 'CSRFPreventionToken': data['data']['CSRFPreventionToken'], + } + + def get(self, url, data=None): + request_path = '{}{}'.format(self.options.url, url) + + headers = {'Cookie': 'PVEAuthCookie={}'.format(self.credentials['ticket'])} + request = open_url(request_path, data=data, headers=headers) + + response = json.load(request) + return response['data'] + + def nodes(self): + return ProxmoxNodeList(self.get('api2/json/nodes')) + + def vms_by_type(self, node, type): + return ProxmoxVMList(self.get('api2/json/nodes/{}/{}'.format(node, type))) + + def vm_description_by_type(self, node, vm, type): + return self.get('api2/json/nodes/{}/{}/{}/config'.format(node, type, vm)) + + def node_qemu(self, node): + return self.vms_by_type(node, 'qemu') + + def node_qemu_description(self, node, vm): + return self.vm_description_by_type(node, vm, 'qemu') + + def node_lxc(self, node): + return self.vms_by_type(node, 'lxc') + + def node_lxc_description(self, node, vm): + return self.vm_description_by_type(node, vm, 'lxc') + + def pools(self): + return ProxmoxPoolList(self.get('api2/json/pools')) + + def pool(self, poolid): + return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid))) + +def main_list(options): + results = { + 'all': { + 'hosts': [], + }, + '_meta': { + 'hostvars': {}, + } + } + + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + for node in proxmox_api.nodes().get_names(): + qemu_list = proxmox_api.node_qemu(node) + results['all']['hosts'] += qemu_list.get_names() + results['_meta']['hostvars'].update(qemu_list.get_variables()) + lxc_list = proxmox_api.node_lxc(node) + results['all']['hosts'] += lxc_list.get_names() + results['_meta']['hostvars'].update(lxc_list.get_variables()) + + for vm in results['_meta']['hostvars']: + vmid = results['_meta']['hostvars'][vm]['proxmox_vmid'] + try: + type = results['_meta']['hostvars'][vm]['proxmox_type'] + except KeyError: + type = 'qemu' + try: + description = proxmox_api.vm_description_by_type(node, vmid, type)['description'] + except KeyError: + description = None + + try: + metadata = json.loads(description) + except TypeError: + metadata = {} + except ValueError: + metadata = { + 'notes': description + } + + if 'groups' in metadata: + # print metadata + for group in metadata['groups']: + if group not in results: + results[group] = { + 'hosts': [] + } + results[group]['hosts'] += [vm] + + results['_meta']['hostvars'][vm].update(metadata) + + # pools + for pool in proxmox_api.pools().get_names(): + results[pool] = { + 'hosts': proxmox_api.pool(pool).get_members_name(), + } + + return results + +def main_host(options): + proxmox_api = ProxmoxAPI(options) + proxmox_api.auth() + + for node in proxmox_api.nodes().get_names(): + qemu_list = proxmox_api.node_qemu(node) + qemu = qemu_list.get_by_name(options.host) + if qemu: + return qemu.get_variables() + + return {} + +def main(): + parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') + parser.add_option('--list', action="store_true", default=False, dest="list") + parser.add_option('--host', dest="host") + parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url') + parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username') + parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password') + parser.add_option('--pretty', action="store_true", default=False, dest='pretty') + (options, args) = parser.parse_args() + + if options.list: + data = main_list(options) + elif options.host: + data = main_host(options) + else: + parser.print_help() + sys.exit(1) + + indent = None + if options.pretty: + indent = 2 + + print(json.dumps(data, indent=indent)) + +if __name__ == '__main__': + main() diff --git a/plugins/inventory/rax.ini b/contrib/inventory/rax.ini similarity index 85% rename from plugins/inventory/rax.ini rename to contrib/inventory/rax.ini index 5a269e16a3ac1c..15948e7b2e66ac 100644 --- a/plugins/inventory/rax.ini +++ b/contrib/inventory/rax.ini @@ -55,3 +55,12 @@ # will be ignored, and 4 will be used. Accepts a comma separated list, # the first found wins. # access_ip_version = 4 + +# Environment Variable: RAX_CACHE_MAX_AGE +# Default: 600 +# +# A configuration the changes the behavior or the inventory cache. +# Inventory listing performed before this value will be returned from +# the cache instead of making a full request for all inventory. Setting +# this value to 0 will force a full request. +# cache_max_age = 600 \ No newline at end of file diff --git a/contrib/inventory/rax.py b/contrib/inventory/rax.py new file mode 100755 index 00000000000000..4ac6b0f47e9e3c --- /dev/null +++ b/contrib/inventory/rax.py @@ -0,0 +1,455 @@ +#!/usr/bin/env python + +# (c) 2013, Jesse Keating , +# Matt Martz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +""" +Rackspace Cloud Inventory + +Authors: + Jesse Keating , + Matt Martz + + +Description: + Generates inventory that Ansible can understand by making API request to + Rackspace Public Cloud API + + When run against a specific host, this script returns variables similar to: + rax_os-ext-sts_task_state + rax_addresses + rax_links + rax_image + rax_os-ext-sts_vm_state + rax_flavor + rax_id + rax_rax-bandwidth_bandwidth + rax_user_id + rax_os-dcf_diskconfig + rax_accessipv4 + rax_accessipv6 + rax_progress + rax_os-ext-sts_power_state + rax_metadata + rax_status + rax_updated + rax_hostid + rax_name + rax_created + rax_tenant_id + rax_loaded + +Configuration: + rax.py can be configured using a rax.ini file or via environment + variables. The rax.ini file should live in the same directory along side + this script. + + The section header for configuration values related to this + inventory plugin is [rax] + + [rax] + creds_file = ~/.rackspace_cloud_credentials + regions = IAD,ORD,DFW + env = prod + meta_prefix = meta + access_network = public + access_ip_version = 4 + + Each of these configurations also has a corresponding environment variable. + An environment variable will override a configuration file value. + + creds_file: + Environment Variable: RAX_CREDS_FILE + + An optional configuration that points to a pyrax-compatible credentials + file. + + If not supplied, rax.py will look for a credentials file + at ~/.rackspace_cloud_credentials. It uses the Rackspace Python SDK, + and therefore requires a file formatted per the SDK's specifications. + + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + regions: + Environment Variable: RAX_REGION + + An optional environment variable to narrow inventory search + scope. If used, needs a value like ORD, DFW, SYD (a Rackspace + datacenter) and optionally accepts a comma-separated list. + + environment: + Environment Variable: RAX_ENV + + A configuration that will use an environment as configured in + ~/.pyrax.cfg, see + https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md + + meta_prefix: + Environment Variable: RAX_META_PREFIX + Default: meta + + A configuration that changes the prefix used for meta key/value groups. + For compatibility with ec2.py set to "tag" + + access_network: + Environment Variable: RAX_ACCESS_NETWORK + Default: public + + A configuration that will tell the inventory script to use a specific + server network to determine the ansible_ssh_host value. If no address + is found, ansible_ssh_host will not be set. Accepts a comma-separated + list of network names, the first found wins. + + access_ip_version: + Environment Variable: RAX_ACCESS_IP_VERSION + Default: 4 + + A configuration related to "access_network" that will attempt to + determine the ansible_ssh_host value for either IPv4 or IPv6. If no + address is found, ansible_ssh_host will not be set. + Acceptable values are: 4 or 6. Values other than 4 or 6 + will be ignored, and 4 will be used. Accepts a comma-separated list, + the first found wins. + +Examples: + List server instances + $ RAX_CREDS_FILE=~/.raxpub rax.py --list + + List servers in ORD datacenter only + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD rax.py --list + + List servers in ORD and DFW datacenters + $ RAX_CREDS_FILE=~/.raxpub RAX_REGION=ORD,DFW rax.py --list + + Get server details for server named "server.example.com" + $ RAX_CREDS_FILE=~/.raxpub rax.py --host server.example.com + + Use the instance private IP to connect (instead of public IP) + $ RAX_CREDS_FILE=~/.raxpub RAX_ACCESS_NETWORK=private rax.py --list +""" + +import os +import re +import sys +import argparse +import warnings +import collections +import ConfigParser + +from six import iteritems + +from ansible.constants import get_config, mk_boolean + +try: + import json +except ImportError: + import simplejson as json + +try: + import pyrax + from pyrax.utils import slugify +except ImportError: + print('pyrax is required for this module') + sys.exit(1) + +from time import time + + +NON_CALLABLES = (basestring, bool, dict, int, list, type(None)) + + +def load_config_file(): + p = ConfigParser.ConfigParser() + config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'rax.ini') + try: + p.read(config_file) + except ConfigParser.Error: + return None + else: + return p +p = load_config_file() + + +def rax_slugify(value): + return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_')) + + +def to_dict(obj): + instance = {} + for key in dir(obj): + value = getattr(obj, key) + if isinstance(value, NON_CALLABLES) and not key.startswith('_'): + key = rax_slugify(key) + instance[key] = value + + return instance + + +def host(regions, hostname): + hostvars = {} + + for region in regions: + # Connect to the region + cs = pyrax.connect_to_cloudservers(region=region) + for server in cs.servers.list(): + if server.name == hostname: + for key, value in to_dict(server).items(): + hostvars[key] = value + + # And finally, add an IP address + hostvars['ansible_ssh_host'] = server.accessIPv4 + print(json.dumps(hostvars, sort_keys=True, indent=4)) + + +def _list_into_cache(regions): + groups = collections.defaultdict(list) + hostvars = collections.defaultdict(dict) + images = {} + cbs_attachments = collections.defaultdict(dict) + + prefix = get_config(p, 'rax', 'meta_prefix', 'RAX_META_PREFIX', 'meta') + + networks = get_config(p, 'rax', 'access_network', 'RAX_ACCESS_NETWORK', + 'public', islist=True) + try: + ip_versions = map(int, get_config(p, 'rax', 'access_ip_version', + 'RAX_ACCESS_IP_VERSION', 4, + islist=True)) + except: + ip_versions = [4] + else: + ip_versions = [v for v in ip_versions if v in [4, 6]] + if not ip_versions: + ip_versions = [4] + + # Go through all the regions looking for servers + for region in regions: + # Connect to the region + cs = pyrax.connect_to_cloudservers(region=region) + if cs is None: + warnings.warn( + 'Connecting to Rackspace region "%s" has caused Pyrax to ' + 'return None. Is this a valid region?' % region, + RuntimeWarning) + continue + for server in cs.servers.list(): + # Create a group on region + groups[region].append(server.name) + + # Check if group metadata key in servers' metadata + group = server.metadata.get('group') + if group: + groups[group].append(server.name) + + for extra_group in server.metadata.get('groups', '').split(','): + if extra_group: + groups[extra_group].append(server.name) + + # Add host metadata + for key, value in to_dict(server).items(): + hostvars[server.name][key] = value + + hostvars[server.name]['rax_region'] = region + + for key, value in iteritems(server.metadata): + groups['%s_%s_%s' % (prefix, key, value)].append(server.name) + + groups['instance-%s' % server.id].append(server.name) + groups['flavor-%s' % server.flavor['id']].append(server.name) + + # Handle boot from volume + if not server.image: + if not cbs_attachments[region]: + cbs = pyrax.connect_to_cloud_blockstorage(region) + for vol in cbs.list(): + if mk_boolean(vol.bootable): + for attachment in vol.attachments: + metadata = vol.volume_image_metadata + server_id = attachment['server_id'] + cbs_attachments[region][server_id] = { + 'id': metadata['image_id'], + 'name': slugify(metadata['image_name']) + } + image = cbs_attachments[region].get(server.id) + if image: + server.image = {'id': image['id']} + hostvars[server.name]['rax_image'] = server.image + hostvars[server.name]['rax_boot_source'] = 'volume' + images[image['id']] = image['name'] + else: + hostvars[server.name]['rax_boot_source'] = 'local' + + try: + imagegroup = 'image-%s' % images[server.image['id']] + groups[imagegroup].append(server.name) + groups['image-%s' % server.image['id']].append(server.name) + except KeyError: + try: + image = cs.images.get(server.image['id']) + except cs.exceptions.NotFound: + groups['image-%s' % server.image['id']].append(server.name) + else: + images[image.id] = image.human_id + groups['image-%s' % image.human_id].append(server.name) + groups['image-%s' % server.image['id']].append(server.name) + + # And finally, add an IP address + ansible_ssh_host = None + # use accessIPv[46] instead of looping address for 'public' + for network_name in networks: + if ansible_ssh_host: + break + if network_name == 'public': + for version_name in ip_versions: + if ansible_ssh_host: + break + if version_name == 6 and server.accessIPv6: + ansible_ssh_host = server.accessIPv6 + elif server.accessIPv4: + ansible_ssh_host = server.accessIPv4 + if not ansible_ssh_host: + addresses = server.addresses.get(network_name, []) + for address in addresses: + for version_name in ip_versions: + if ansible_ssh_host: + break + if address.get('version') == version_name: + ansible_ssh_host = address.get('addr') + break + if ansible_ssh_host: + hostvars[server.name]['ansible_ssh_host'] = ansible_ssh_host + + if hostvars: + groups['_meta'] = {'hostvars': hostvars} + + with open(get_cache_file_path(regions), 'w') as cache_file: + json.dump(groups, cache_file) + + +def get_cache_file_path(regions): + regions_str = '.'.join([reg.strip().lower() for reg in regions]) + ansible_tmp_path = os.path.join(os.path.expanduser("~"), '.ansible', 'tmp') + if not os.path.exists(ansible_tmp_path): + os.makedirs(ansible_tmp_path) + return os.path.join(ansible_tmp_path, + 'ansible-rax-%s-%s.cache' % ( + pyrax.identity.username, regions_str)) + + +def _list(regions, refresh_cache=True): + cache_max_age = int(get_config(p, 'rax', 'cache_max_age', + 'RAX_CACHE_MAX_AGE', 600)) + + if (not os.path.exists(get_cache_file_path(regions)) or + refresh_cache or + (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age): + # Cache file doesn't exist or older than 10m or refresh cache requested + _list_into_cache(regions) + + with open(get_cache_file_path(regions), 'r') as cache_file: + groups = json.load(cache_file) + print(json.dumps(groups, sort_keys=True, indent=4)) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Ansible Rackspace Cloud ' + 'inventory module') + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('--list', action='store_true', + help='List active servers') + group.add_argument('--host', help='List details about the specific host') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help=('Force refresh of cache, making API requests to' + 'RackSpace (default: False - use cache files)')) + return parser.parse_args() + + +def setup(): + default_creds_file = os.path.expanduser('~/.rackspace_cloud_credentials') + + env = get_config(p, 'rax', 'environment', 'RAX_ENV', None) + if env: + pyrax.set_environment(env) + + keyring_username = pyrax.get_setting('keyring_username') + + # Attempt to grab credentials from environment first + creds_file = get_config(p, 'rax', 'creds_file', + 'RAX_CREDS_FILE', None) + if creds_file is not None: + creds_file = os.path.expanduser(creds_file) + else: + # But if that fails, use the default location of + # ~/.rackspace_cloud_credentials + if os.path.isfile(default_creds_file): + creds_file = default_creds_file + elif not keyring_username: + sys.stderr.write('No value in environment variable %s and/or no ' + 'credentials file at %s\n' + % ('RAX_CREDS_FILE', default_creds_file)) + sys.exit(1) + + identity_type = pyrax.get_setting('identity_type') + pyrax.set_setting('identity_type', identity_type or 'rackspace') + + region = pyrax.get_setting('region') + + try: + if keyring_username: + pyrax.keyring_auth(keyring_username, region=region) + else: + pyrax.set_credential_file(creds_file, region=region) + except Exception as e: + sys.stderr.write("%s: %s\n" % (e, e.message)) + sys.exit(1) + + regions = [] + if region: + regions.append(region) + else: + region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all', + islist=True) + for region in region_list: + region = region.strip().upper() + if region == 'ALL': + regions = pyrax.regions + break + elif region not in pyrax.regions: + sys.stderr.write('Unsupported region %s' % region) + sys.exit(1) + elif region not in regions: + regions.append(region) + + return regions + + +def main(): + args = parse_args() + regions = setup() + if args.list: + _list(regions, refresh_cache=args.refresh_cache) + elif args.host: + host(regions, args.host) + sys.exit(0) + + +if __name__ == '__main__': + main() diff --git a/contrib/inventory/rudder.ini b/contrib/inventory/rudder.ini new file mode 100644 index 00000000000000..376674bb9028b9 --- /dev/null +++ b/contrib/inventory/rudder.ini @@ -0,0 +1,35 @@ +# Rudder external inventory script settings +# + +[rudder] + +# Your Rudder server API URL, typically: +# https://rudder.local/rudder/api +uri = https://rudder.local/rudder/api + +# By default, Rudder uses a self-signed certificate. Set this to True +# to disable certificate validation. +disable_ssl_certificate_validation = True + +# Your Rudder API token, created in the Web interface. +token = aaabbbccc + +# Rudder API version to use, use "latest" for lastest available +# version. +version = latest + +# Property to use as group name in the output. +# Can generally be "id" or "displayName". +group_name = displayName + +# Fail if there are two groups with the same name or two hosts with the +# same hostname in the output. +fail_if_name_collision = True + +# We cache the results of Rudder API in a local file +cache_path = /tmp/ansible-rudder.cache + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# Set to 0 to disable cache. +cache_max_age = 500 diff --git a/contrib/inventory/rudder.py b/contrib/inventory/rudder.py new file mode 100755 index 00000000000000..5cf16c9761c830 --- /dev/null +++ b/contrib/inventory/rudder.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python + +# Copyright (c) 2015, Normation SAS +# +# Inspired by the EC2 inventory plugin: +# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +''' +Rudder external inventory script +================================= + +Generates inventory that Ansible can understand by making API request to +a Rudder server. This script is compatible with Rudder 2.10 or later. + +The output JSON includes all your Rudder groups, containing the hostnames of +their nodes. Groups and nodes have a variable called rudder_group_id and +rudder_node_id, which is the Rudder internal id of the item, allowing to identify +them uniquely. Hosts variables also include your node properties, which are +key => value properties set by the API and specific to each node. + +This script assumes there is an rudder.ini file alongside it. To specify a +different path to rudder.ini, define the RUDDER_INI_PATH environment variable: + + export RUDDER_INI_PATH=/path/to/my_rudder.ini + +You have to configure your Rudder server information, either in rudder.ini or +by overriding it with environment variables: + + export RUDDER_API_VERSION='latest' + export RUDDER_API_TOKEN='my_token' + export RUDDER_API_URI='https://rudder.local/rudder/api' +''' + + +import sys +import os +import re +import argparse +import six +import httplib2 as http +from time import time +from six.moves import configparser + +try: + from urlparse import urlparse +except ImportError: + from urllib.parse import urlparse + +try: + import json +except ImportError: + import simplejson as json + + +class RudderInventory(object): + def __init__(self): + ''' Main execution path ''' + + # Empty inventory by default + self.inventory = {} + + # Read settings and parse CLI arguments + self.read_settings() + self.parse_cli_args() + + # Create connection + self.conn = http.Http(disable_ssl_certificate_validation=self.disable_ssl_validation) + + # Cache + if self.args.refresh_cache: + self.update_cache() + elif not self.is_cache_valid(): + self.update_cache() + else: + self.load_cache() + + data_to_print = {} + + if self.args.host: + data_to_print = self.get_host_info(self.args.host) + elif self.args.list: + data_to_print = self.get_list_info() + + print(self.json_format_dict(data_to_print, True)) + + def read_settings(self): + ''' Reads the settings from the rudder.ini file ''' + if six.PY2: + config = configparser.SafeConfigParser() + else: + config = configparser.ConfigParser() + rudder_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rudder.ini') + rudder_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('RUDDER_INI_PATH', rudder_default_ini_path))) + config.read(rudder_ini_path) + + self.token = os.environ.get('RUDDER_API_TOKEN', config.get('rudder', 'token')) + self.version = os.environ.get('RUDDER_API_VERSION', config.get('rudder', 'version')) + self.uri = os.environ.get('RUDDER_API_URI', config.get('rudder', 'uri')) + + self.disable_ssl_validation = config.getboolean('rudder', 'disable_ssl_certificate_validation') + self.group_name = config.get('rudder', 'group_name') + self.fail_if_name_collision = config.getboolean('rudder', 'fail_if_name_collision') + + self.cache_path = config.get('rudder', 'cache_path') + self.cache_max_age = config.getint('rudder', 'cache_max_age') + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Rudder inventory') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all the variables about a specific instance') + parser.add_argument('--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests to Rudder (default: False - use cache files)') + self.args = parser.parse_args() + + def is_cache_valid(self): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if os.path.isfile(self.cache_path): + mod_time = os.path.getmtime(self.cache_path) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + return True + + return False + + def load_cache(self): + ''' Reads the cache from the cache file sets self.cache ''' + + cache = open(self.cache_path, 'r') + json_cache = cache.read() + + try: + self.inventory = json.loads(json_cache) + except ValueError as e: + self.fail_with_error('Could not parse JSON response from local cache', 'parsing local cache') + + def write_cache(self): + ''' Writes data in JSON format to a file ''' + + json_data = self.json_format_dict(self.inventory, True) + cache = open(self.cache_path, 'w') + cache.write(json_data) + cache.close() + + def get_nodes(self): + ''' Gets the nodes list from Rudder ''' + + path = '/nodes?select=nodeAndPolicyServer' + result = self.api_call(path) + + nodes = {} + + for node in result['data']['nodes']: + nodes[node['id']] = {} + nodes[node['id']]['hostname'] = node['hostname'] + if 'properties' in node: + nodes[node['id']]['properties'] = node['properties'] + else: + nodes[node['id']]['properties'] = [] + + return nodes + + def get_groups(self): + ''' Gets the groups list from Rudder ''' + + path = '/groups' + result = self.api_call(path) + + groups = {} + + for group in result['data']['groups']: + groups[group['id']] = {'hosts': group['nodeIds'], 'name': self.to_safe(group[self.group_name])} + + return groups + + def update_cache(self): + ''' Fetches the inventory information from Rudder and creates the inventory ''' + + nodes = self.get_nodes() + groups = self.get_groups() + + inventory = {} + + for group in groups: + # Check for name collision + if self.fail_if_name_collision: + if groups[group]['name'] in inventory: + self.fail_with_error('Name collision on groups: "%s" appears twice' % groups[group]['name'], 'creating groups') + # Add group to inventory + inventory[groups[group]['name']] = {} + inventory[groups[group]['name']]['hosts'] = [] + inventory[groups[group]['name']]['vars'] = {} + inventory[groups[group]['name']]['vars']['rudder_group_id'] = group + for node in groups[group]['hosts']: + # Add node to group + inventory[groups[group]['name']]['hosts'].append(nodes[node]['hostname']) + + properties = {} + + for node in nodes: + # Check for name collision + if self.fail_if_name_collision: + if nodes[node]['hostname'] in properties: + self.fail_with_error('Name collision on hosts: "%s" appears twice' % nodes[node]['hostname'], 'creating hosts') + # Add node properties to inventory + properties[nodes[node]['hostname']] = {} + properties[nodes[node]['hostname']]['rudder_node_id'] = node + for node_property in nodes[node]['properties']: + properties[nodes[node]['hostname']][self.to_safe(node_property['name'])] = node_property['value'] + + inventory['_meta'] = {} + inventory['_meta']['hostvars'] = properties + + self.inventory = inventory + + if self.cache_max_age > 0: + self.write_cache() + + def get_list_info(self): + ''' Gets inventory information from local cache ''' + + return self.inventory + + def get_host_info(self, hostname): + ''' Gets information about a specific host from local cache ''' + + if hostname in self.inventory['_meta']['hostvars']: + return self.inventory['_meta']['hostvars'][hostname] + else: + return {} + + def api_call(self, path): + ''' Performs an API request ''' + + headers = { + 'X-API-Token': self.token, + 'X-API-Version': self.version, + 'Content-Type': 'application/json;charset=utf-8' + } + + target = urlparse(self.uri + path) + method = 'GET' + body = '' + + try: + response, content = self.conn.request(target.geturl(), method, body, headers) + except: + self.fail_with_error('Error connecting to Rudder server') + + try: + data = json.loads(content) + except ValueError as e: + self.fail_with_error('Could not parse JSON response from Rudder API', 'reading API response') + + return data + + def fail_with_error(self, err_msg, err_operation=None): + ''' Logs an error to std err for ansible-playbook to consume and exit ''' + if err_operation: + err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( + err_msg=err_msg, err_operation=err_operation) + sys.stderr.write(err_msg) + sys.exit(1) + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + def to_safe(self, word): + ''' Converts 'bad' characters in a string to underscores so they can be + used as Ansible variable names ''' + + return re.sub('[^A-Za-z0-9\_]', '_', word) + +# Run the script +RudderInventory() diff --git a/contrib/inventory/serf.py b/contrib/inventory/serf.py new file mode 100755 index 00000000000000..e1340da92df596 --- /dev/null +++ b/contrib/inventory/serf.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python + +# (c) 2015, Marc Abramowitz +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use nodes discovered by Serf +# (https://serfdom.io/). +# +# Requires the `serfclient` Python module from +# https://pypi.python.org/pypi/serfclient +# +# Environment variables +# --------------------- +# - `SERF_RPC_ADDR` +# - `SERF_RPC_AUTH` +# +# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr + +import argparse +import collections +import os +import sys + +# https://pypi.python.org/pypi/serfclient +from serfclient import SerfClient, EnvironmentConfig + +try: + import json +except ImportError: + import simplejson as json + +_key = 'serf' + + +def _serf_client(): + env = EnvironmentConfig() + return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key) + + +def get_serf_members_data(): + return _serf_client().members().body['Members'] + + +def get_nodes(data): + return [node['Name'] for node in data] + + +def get_groups(data): + groups = collections.defaultdict(list) + + for node in data: + for key, value in node['Tags'].items(): + groups[value].append(node['Name']) + + return groups + + +def get_meta(data): + meta = {'hostvars': {}} + for node in data: + meta['hostvars'][node['Name']] = node['Tags'] + return meta + + +def print_list(): + data = get_serf_members_data() + nodes = get_nodes(data) + groups = get_groups(data) + meta = get_meta(data) + inventory_data = {_key: nodes, '_meta': meta} + inventory_data.update(groups) + print(json.dumps(inventory_data)) + + +def print_host(host): + data = get_serf_members_data() + meta = get_meta(data) + print(json.dumps(meta['hostvars'][host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script reading from serf cluster') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from serf cluster' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/plugins/inventory/softlayer.py b/contrib/inventory/softlayer.py similarity index 96% rename from plugins/inventory/softlayer.py rename to contrib/inventory/softlayer.py index ef8a2f6a7409a6..d9d11a5571fef5 100755 --- a/plugins/inventory/softlayer.py +++ b/contrib/inventory/softlayer.py @@ -53,10 +53,10 @@ def __init__(self): if self.args.list: self.get_all_servers() - print self.json_format_dict(self.inventory, True) + print(self.json_format_dict(self.inventory, True)) elif self.args.host: - self.get_virtual_servers(client) - print self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True) + self.get_virtual_servers() + print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True)) def to_safe(self, word): '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups''' diff --git a/contrib/inventory/spacewalk.ini b/contrib/inventory/spacewalk.ini new file mode 100644 index 00000000000000..5433c4221b2f08 --- /dev/null +++ b/contrib/inventory/spacewalk.ini @@ -0,0 +1,16 @@ +# Put this ini-file in the same directory as spacewalk.py +# Command line options have precedence over options defined in here. + +[spacewalk] +# To limit the script on one organization in spacewalk, uncomment org_number +# and fill in the organization ID: +# org_number=2 + +# To prefix the group names with the organization ID set prefix_org_name=true. +# This is convenient when org_number is not set and you have the same group names +# in multiple organizations within spacewalk +# The prefix is "org_number-" +prefix_org_name=false + +# Default cache_age for files created with spacewalk-report is 300sec. +cache_age=300 diff --git a/contrib/inventory/spacewalk.py b/contrib/inventory/spacewalk.py new file mode 100755 index 00000000000000..fb0a152ecab497 --- /dev/null +++ b/contrib/inventory/spacewalk.py @@ -0,0 +1,235 @@ +#!/bin/env python + +""" +Spacewalk external inventory script +================================= + +Ansible has a feature where instead of reading from /etc/ansible/hosts +as a text file, it can query external programs to obtain the list +of hosts, groups the hosts are in, and even variables to assign to each host. + +To use this, copy this file over /etc/ansible/hosts and chmod +x the file. +This, more or less, allows you to keep one central database containing +info about all of your managed instances. + +This script is dependent upon the spacealk-reports package being installed +on the same machine. It is basically a CSV-to-JSON converter from the +output of "spacewalk-report system-groups-systems|inventory". + +Tested with Ansible 1.9.2 and spacewalk 2.3 +""" +# +# Author:: Jon Miller +# Copyright:: Copyright (c) 2013, Jon Miller +# +# Extended for support of multiple organizations and +# adding the "_meta" dictionary to --list output by +# Bernhard Lichtinger 2015 +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or (at +# your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +from __future__ import print_function + +import sys +import os +import time +from optparse import OptionParser +import subprocess +import ConfigParser + +from six import iteritems + +try: + import json +except: + import simplejson as json + +base_dir = os.path.dirname(os.path.realpath(__file__)) +SW_REPORT = '/usr/bin/spacewalk-report' +CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports") +CACHE_AGE = 300 # 5min +INI_FILE = os.path.join(base_dir, "spacewalk.ini") + + # Sanity check +if not os.path.exists(SW_REPORT): + print('Error: %s is required for operation.' % (SW_REPORT), file=sys.stderr) + sys.exit(1) + +# Pre-startup work +if not os.path.exists(CACHE_DIR): + os.mkdir(CACHE_DIR) + os.chmod(CACHE_DIR, 2775) + +# Helper functions +#------------------------------ + +def spacewalk_report(name): + """Yield a dictionary form of each CSV output produced by the specified + spacewalk-report + """ + cache_filename = os.path.join(CACHE_DIR, name) + if not os.path.exists(cache_filename) or \ + (time.time() - os.stat(cache_filename).st_mtime) > CACHE_AGE: + # Update the cache + fh = open(cache_filename, 'w') + p = subprocess.Popen([SW_REPORT, name], stdout=fh) + p.wait() + fh.close() + + lines = open(cache_filename, 'r').readlines() + keys = lines[0].strip().split(',') + # add 'spacewalk_' prefix to the keys + keys = [ 'spacewalk_' + key for key in keys ] + for line in lines[1:]: + values = line.strip().split(',') + if len(keys) == len(values): + yield dict(zip(keys, values)) + + +# Options +#------------------------------ + +parser = OptionParser(usage="%prog [options] --list | --host ") +parser.add_option('--list', default=False, dest="list", action="store_true", + help="Produce a JSON consumable grouping of servers for Ansible") +parser.add_option('--host', default=None, dest="host", + help="Generate additional host specific details for given host for Ansible") +parser.add_option('-H', '--human', dest="human", + default=False, action="store_true", + help="Produce a friendlier version of either server list or host detail") +parser.add_option('-o', '--org', default=None, dest="org_number", + help="Limit to spacewalk organization number") +parser.add_option('-p', default=False, dest="prefix_org_name", action="store_true", + help="Prefix the group name with the organization number") +(options, args) = parser.parse_args() + + +# read spacewalk.ini if present +#------------------------------ +if os.path.exists(INI_FILE): + config = ConfigParser.SafeConfigParser() + config.read(INI_FILE) + if config.has_option('spacewalk' , 'cache_age'): + CACHE_AGE = config.get('spacewalk' , 'cache_age') + if not options.org_number and config.has_option('spacewalk' , 'org_number'): + options.org_number = config.get('spacewalk' , 'org_number') + if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'): + options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name') + + +# Generate dictionary for mapping group_id to org_id +#------------------------------ +org_groups = {} +try: + for group in spacewalk_report('system-groups'): + org_groups[group['spacewalk_group_id']] = group['spacewalk_org_id'] + +except (OSError) as e: + print('Problem executing the command "%s system-groups": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + +# List out the known server from Spacewalk +#------------------------------ +if options.list: + + # to build the "_meta"-Group with hostvars first create dictionary for later use + host_vars = {} + try: + for item in spacewalk_report('inventory'): + host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() ) + + except (OSError) as e: + print('Problem executing the command "%s inventory": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + groups = {} + meta = { "hostvars" : {} } + try: + for system in spacewalk_report('system-groups-systems'): + # first get org_id of system + org_id = org_groups[ system['spacewalk_group_id'] ] + + # shall we add the org_id as prefix to the group name: + if options.prefix_org_name: + prefix = org_id + "-" + group_name = prefix + system['spacewalk_group_name'] + else: + group_name = system['spacewalk_group_name'] + + # if we are limited to one organization: + if options.org_number: + if org_id == options.org_number: + if group_name not in groups: + groups[group_name] = set() + + groups[group_name].add(system['spacewalk_server_name']) + if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]: + meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ] + # or we list all groups and systems: + else: + if group_name not in groups: + groups[group_name] = set() + + groups[group_name].add(system['spacewalk_server_name']) + if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]: + meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ] + + except (OSError) as e: + print('Problem executing the command "%s system-groups-systems": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + if options.human: + for group, systems in iteritems(groups): + print('[%s]\n%s\n' % (group, '\n'.join(systems))) + else: + final = dict( [ (k, list(s)) for k, s in iteritems(groups) ] ) + final["_meta"] = meta + print(json.dumps( final )) + #print(json.dumps(groups)) + sys.exit(0) + + +# Return a details information concerning the spacewalk server +#------------------------------ +elif options.host: + + host_details = {} + try: + for system in spacewalk_report('inventory'): + if system['spacewalk_hostname'] == options.host: + host_details = system + break + + except (OSError) as e: + print('Problem executing the command "%s inventory": %s' % + (SW_REPORT, str(e)), file=sys.stderr) + sys.exit(2) + + if options.human: + print('Host: %s' % options.host) + for k, v in iteritems(host_details): + print(' %s: %s' % (k, '\n '.join(v.split(';')))) + else: + print( json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) ) ) + sys.exit(0) + +else: + + parser.print_help() + sys.exit(1) diff --git a/contrib/inventory/ssh_config.py b/contrib/inventory/ssh_config.py new file mode 100755 index 00000000000000..3ff7eb9658d0e6 --- /dev/null +++ b/contrib/inventory/ssh_config.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python + +# (c) 2014, Tomas Karasek +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# Dynamic inventory script which lets you use aliases from ~/.ssh/config. +# +# There were some issues with various Paramiko versions. I took a deeper look +# and tested heavily. Now, ansible parses this alright with Paramiko versions +# 1.7.2 to 1.15.2. +# +# It prints inventory based on parsed ~/.ssh/config. You can refer to hosts +# with their alias, rather than with the IP or hostname. It takes advantage +# of the ansible_ssh_{host,port,user,private_key_file}. +# +# If you have in your .ssh/config: +# Host git +# HostName git.domain.org +# User tkarasek +# IdentityFile /home/tomk/keys/thekey +# +# You can do +# $ ansible git -m ping +# +# Example invocation: +# ssh_config.py --list +# ssh_config.py --host + +import argparse +import os.path +import sys +import paramiko + +try: + import json +except ImportError: + import simplejson as json + +SSH_CONF = '~/.ssh/config' + +_key = 'ssh_config' + +_ssh_to_ansible = [('user', 'ansible_ssh_user'), + ('hostname', 'ansible_ssh_host'), + ('identityfile', 'ansible_ssh_private_key_file'), + ('port', 'ansible_ssh_port')] + + +def get_config(): + if not os.path.isfile(os.path.expanduser(SSH_CONF)): + return {} + with open(os.path.expanduser(SSH_CONF)) as f: + cfg = paramiko.SSHConfig() + cfg.parse(f) + ret_dict = {} + for d in cfg._config: + if type(d['host']) is list: + alias = d['host'][0] + else: + alias = d['host'] + if ('?' in alias) or ('*' in alias): + continue + _copy = dict(d) + del _copy['host'] + if 'config' in _copy: + ret_dict[alias] = _copy['config'] + else: + ret_dict[alias] = _copy + return ret_dict + + +def print_list(): + cfg = get_config() + meta = {'hostvars': {}} + for alias, attributes in cfg.items(): + tmp_dict = {} + for ssh_opt, ans_opt in _ssh_to_ansible: + if ssh_opt in attributes: + # If the attribute is a list, just take the first element. + # Private key is returned in a list for some reason. + attr = attributes[ssh_opt] + if type(attr) is list: + attr = attr[0] + tmp_dict[ans_opt] = attr + if tmp_dict: + meta['hostvars'][alias] = tmp_dict + + print(json.dumps({_key: list(set(meta['hostvars'].keys())), '_meta': meta})) + + +def print_host(host): + cfg = get_config() + print(json.dumps(cfg[host])) + + +def get_args(args_list): + parser = argparse.ArgumentParser( + description='ansible inventory script parsing .ssh/config') + mutex_group = parser.add_mutually_exclusive_group(required=True) + help_list = 'list all hosts from .ssh/config inventory' + mutex_group.add_argument('--list', action='store_true', help=help_list) + help_host = 'display variables for a host' + mutex_group.add_argument('--host', help=help_host) + return parser.parse_args(args_list) + + +def main(args_list): + + args = get_args(args_list) + if args.list: + print_list() + if args.host: + print_host(args.host) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/contrib/inventory/vagrant.py b/contrib/inventory/vagrant.py new file mode 100755 index 00000000000000..e7ba0dbe5878b2 --- /dev/null +++ b/contrib/inventory/vagrant.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +""" +Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and +returns it under the host group 'vagrant' + +Example Vagrant configuration using this script: + + config.vm.provision :ansible do |ansible| + ansible.playbook = "./provision/your_playbook.yml" + ansible.inventory_file = "./provision/inventory/vagrant.py" + ansible.verbose = true + end +""" + +# Copyright (C) 2013 Mark Mandel +# 2015 Igor Khomyakov +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# +# Thanks to the spacewalk.py inventory script for giving me the basic structure +# of this. +# + +import sys +import os.path +import subprocess +import re +from paramiko import SSHConfig +from cStringIO import StringIO +from optparse import OptionParser +from collections import defaultdict +try: + import json +except: + import simplejson as json + +_group = 'vagrant' # a default group +_ssh_to_ansible = [('user', 'ansible_ssh_user'), + ('hostname', 'ansible_ssh_host'), + ('identityfile', 'ansible_ssh_private_key_file'), + ('port', 'ansible_ssh_port')] + +# Options +# ------------------------------ + +parser = OptionParser(usage="%prog [options] --list | --host ") +parser.add_option('--list', default=False, dest="list", action="store_true", + help="Produce a JSON consumable grouping of Vagrant servers for Ansible") +parser.add_option('--host', default=None, dest="host", + help="Generate additional host specific details for given host for Ansible") +(options, args) = parser.parse_args() + +# +# helper functions +# + + +# get all the ssh configs for all boxes in an array of dictionaries. +def get_ssh_config(): + return {k: get_a_ssh_config(k) for k in list_running_boxes()} + + +# list all the running boxes +def list_running_boxes(): + output = subprocess.check_output(["vagrant", "status"]).split('\n') + + boxes = [] + + for line in output: + matcher = re.search("([^\s]+)[\s]+running \(.+", line) + if matcher: + boxes.append(matcher.group(1)) + + return boxes + + +# get the ssh config for a single box +def get_a_ssh_config(box_name): + """Gives back a map of all the machine's ssh configurations""" + + output = subprocess.check_output(["vagrant", "ssh-config", box_name]) + config = SSHConfig() + config.parse(StringIO(output)) + host_config = config.lookup(box_name) + + # man 5 ssh_config: + # > It is possible to have multiple identity files ... + # > all these identities will be tried in sequence. + for id in host_config['identityfile']: + if os.path.isfile(id): + host_config['identityfile'] = id + + return {v: host_config[k] for k, v in _ssh_to_ansible} + +# List out servers that vagrant has running +# ------------------------------ +if options.list: + ssh_config = get_ssh_config() + meta = defaultdict(dict) + + for host in ssh_config: + meta['hostvars'][host] = ssh_config[host] + + print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta})) + sys.exit(0) + +# Get out the host details +# ------------------------------ +elif options.host: + print(json.dumps(get_a_ssh_config(options.host))) + sys.exit(0) + +# Print out help +# ------------------------------ +else: + parser.print_help() + sys.exit(0) diff --git a/plugins/inventory/vbox.py b/contrib/inventory/vbox.py similarity index 92% rename from plugins/inventory/vbox.py rename to contrib/inventory/vbox.py index ff31785d7e3de0..bd926b38e9188b 100755 --- a/plugins/inventory/vbox.py +++ b/contrib/inventory/vbox.py @@ -23,6 +23,11 @@ except ImportError: import simplejson as json +class SetEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, set): + return list(obj) + return json.JSONEncoder.default(self, obj) VBOX="VBoxManage" @@ -110,5 +115,4 @@ def get_hosts(host=None): else: inventory = get_hosts() - import pprint - print pprint.pprint(inventory) + sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder)) diff --git a/plugins/inventory/vmware.ini b/contrib/inventory/vmware.ini similarity index 91% rename from plugins/inventory/vmware.ini rename to contrib/inventory/vmware.ini index 964be18c14e203..5097735fd0e18d 100644 --- a/plugins/inventory/vmware.ini +++ b/contrib/inventory/vmware.ini @@ -23,6 +23,10 @@ guests_only = True # caching will be disabled. #cache_dir = ~/.cache/ansible +# Specify a prefix filter. Any VMs with names beginning with this string will +# not be returned. +# prefix_filter = test_ + [auth] # Specify hostname or IP address of vCenter/ESXi server. A port may be diff --git a/contrib/inventory/vmware.py b/contrib/inventory/vmware.py new file mode 100755 index 00000000000000..8f723a638dde2e --- /dev/null +++ b/contrib/inventory/vmware.py @@ -0,0 +1,436 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +''' +VMware Inventory Script +======================= + +Retrieve information about virtual machines from a vCenter server or +standalone ESX host. When `group_by=false` (in the INI file), host systems +are also returned in addition to VMs. + +This script will attempt to read configuration from an INI file with the same +base filename if present, or `vmware.ini` if not. It is possible to create +symlinks to the inventory script to support multiple configurations, e.g.: + +* `vmware.py` (this script) +* `vmware.ini` (default configuration, will be read by `vmware.py`) +* `vmware_test.py` (symlink to `vmware.py`) +* `vmware_test.ini` (test configuration, will be read by `vmware_test.py`) +* `vmware_other.py` (symlink to `vmware.py`, will read `vmware.ini` since no + `vmware_other.ini` exists) + +The path to an INI file may also be specified via the `VMWARE_INI` environment +variable, in which case the filename matching rules above will not apply. + +Host and authentication parameters may be specified via the `VMWARE_HOST`, +`VMWARE_USER` and `VMWARE_PASSWORD` environment variables; these options will +take precedence over options present in the INI file. An INI file is not +required if these options are specified using environment variables. +''' + +from __future__ import print_function + +import collections +import json +import logging +import optparse +import os +import sys +import time +import ConfigParser + +from six import text_type + +# Disable logging message trigged by pSphere/suds. +try: + from logging import NullHandler +except ImportError: + from logging import Handler + class NullHandler(Handler): + def emit(self, record): + pass +logging.getLogger('psphere').addHandler(NullHandler()) +logging.getLogger('suds').addHandler(NullHandler()) + +from psphere.client import Client +from psphere.errors import ObjectNotFoundError +from psphere.managedobjects import HostSystem, VirtualMachine, ManagedObject, Network +from suds.sudsobject import Object as SudsObject + + +class VMwareInventory(object): + + def __init__(self, guests_only=None): + self.config = ConfigParser.SafeConfigParser() + if os.environ.get('VMWARE_INI', ''): + config_files = [os.environ['VMWARE_INI']] + else: + config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini'] + for config_file in config_files: + if os.path.exists(config_file): + self.config.read(config_file) + break + + # Retrieve only guest VMs, or include host systems? + if guests_only is not None: + self.guests_only = guests_only + elif self.config.has_option('defaults', 'guests_only'): + self.guests_only = self.config.getboolean('defaults', 'guests_only') + else: + self.guests_only = True + + # Read authentication information from VMware environment variables + # (if set), otherwise from INI file. + auth_host = os.environ.get('VMWARE_HOST') + if not auth_host and self.config.has_option('auth', 'host'): + auth_host = self.config.get('auth', 'host') + auth_user = os.environ.get('VMWARE_USER') + if not auth_user and self.config.has_option('auth', 'user'): + auth_user = self.config.get('auth', 'user') + auth_password = os.environ.get('VMWARE_PASSWORD') + if not auth_password and self.config.has_option('auth', 'password'): + auth_password = self.config.get('auth', 'password') + + # Create the VMware client connection. + self.client = Client(auth_host, auth_user, auth_password) + + def _put_cache(self, name, value): + ''' + Saves the value to cache with the name given. + ''' + if self.config.has_option('defaults', 'cache_dir'): + cache_dir = os.path.expanduser(self.config.get('defaults', 'cache_dir')) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + cache_file = os.path.join(cache_dir, name) + with open(cache_file, 'w') as cache: + json.dump(value, cache) + + def _get_cache(self, name, default=None): + ''' + Retrieves the value from cache for the given name. + ''' + if self.config.has_option('defaults', 'cache_dir'): + cache_dir = self.config.get('defaults', 'cache_dir') + cache_file = os.path.join(cache_dir, name) + if os.path.exists(cache_file): + if self.config.has_option('defaults', 'cache_max_age'): + cache_max_age = self.config.getint('defaults', 'cache_max_age') + else: + cache_max_age = 0 + cache_stat = os.stat(cache_file) + if (cache_stat.st_mtime + cache_max_age) >= time.time(): + with open(cache_file) as cache: + return json.load(cache) + return default + + def _flatten_dict(self, d, parent_key='', sep='_'): + ''' + Flatten nested dicts by combining keys with a separator. Lists with + only string items are included as is; any other lists are discarded. + ''' + items = [] + for k, v in d.items(): + if k.startswith('_'): + continue + new_key = parent_key + sep + k if parent_key else k + if isinstance(v, collections.MutableMapping): + items.extend(self._flatten_dict(v, new_key, sep).items()) + elif isinstance(v, (list, tuple)): + if all([isinstance(x, basestring) for x in v]): + items.append((new_key, v)) + else: + items.append((new_key, v)) + return dict(items) + + def _get_obj_info(self, obj, depth=99, seen=None): + ''' + Recursively build a data structure for the given pSphere object (depth + only applies to ManagedObject instances). + ''' + seen = seen or set() + if isinstance(obj, ManagedObject): + try: + obj_unicode = text_type(getattr(obj, 'name')) + except AttributeError: + obj_unicode = () + if obj in seen: + return obj_unicode + seen.add(obj) + if depth <= 0: + return obj_unicode + d = {} + for attr in dir(obj): + if attr.startswith('_'): + continue + try: + val = getattr(obj, attr) + obj_info = self._get_obj_info(val, depth - 1, seen) + if obj_info != (): + d[attr] = obj_info + except Exception as e: + pass + return d + elif isinstance(obj, SudsObject): + d = {} + for key, val in iter(obj): + obj_info = self._get_obj_info(val, depth, seen) + if obj_info != (): + d[key] = obj_info + return d + elif isinstance(obj, (list, tuple)): + l = [] + for val in iter(obj): + obj_info = self._get_obj_info(val, depth, seen) + if obj_info != (): + l.append(obj_info) + return l + elif isinstance(obj, (type(None), bool, int, long, float, basestring)): + return obj + else: + return () + + def _get_host_info(self, host, prefix='vmware'): + ''' + Return a flattened dict with info about the given host system. + ''' + host_info = { + 'name': host.name, + } + for attr in ('datastore', 'network', 'vm'): + try: + value = getattr(host, attr) + host_info['%ss' % attr] = self._get_obj_info(value, depth=0) + except AttributeError: + host_info['%ss' % attr] = [] + for k, v in self._get_obj_info(host.summary, depth=0).items(): + if isinstance(v, collections.MutableMapping): + for k2, v2 in v.items(): + host_info[k2] = v2 + elif k != 'host': + host_info[k] = v + try: + host_info['ipAddress'] = host.config.network.vnic[0].spec.ip.ipAddress + except Exception as e: + print(e, file=sys.stderr) + host_info = self._flatten_dict(host_info, prefix) + if ('%s_ipAddress' % prefix) in host_info: + host_info['ansible_ssh_host'] = host_info['%s_ipAddress' % prefix] + return host_info + + def _get_vm_info(self, vm, prefix='vmware'): + ''' + Return a flattened dict with info about the given virtual machine. + ''' + vm_info = { + 'name': vm.name, + } + for attr in ('datastore', 'network'): + try: + value = getattr(vm, attr) + vm_info['%ss' % attr] = self._get_obj_info(value, depth=0) + except AttributeError: + vm_info['%ss' % attr] = [] + try: + vm_info['resourcePool'] = self._get_obj_info(vm.resourcePool, depth=0) + except AttributeError: + vm_info['resourcePool'] = '' + try: + vm_info['guestState'] = vm.guest.guestState + except AttributeError: + vm_info['guestState'] = '' + for k, v in self._get_obj_info(vm.summary, depth=0).items(): + if isinstance(v, collections.MutableMapping): + for k2, v2 in v.items(): + if k2 == 'host': + k2 = 'hostSystem' + vm_info[k2] = v2 + elif k != 'vm': + vm_info[k] = v + vm_info = self._flatten_dict(vm_info, prefix) + if ('%s_ipAddress' % prefix) in vm_info: + vm_info['ansible_ssh_host'] = vm_info['%s_ipAddress' % prefix] + return vm_info + + def _add_host(self, inv, parent_group, host_name): + ''' + Add the host to the parent group in the given inventory. + ''' + p_group = inv.setdefault(parent_group, []) + if isinstance(p_group, dict): + group_hosts = p_group.setdefault('hosts', []) + else: + group_hosts = p_group + if host_name not in group_hosts: + group_hosts.append(host_name) + + def _add_child(self, inv, parent_group, child_group): + ''' + Add a child group to a parent group in the given inventory. + ''' + if parent_group != 'all': + p_group = inv.setdefault(parent_group, {}) + if not isinstance(p_group, dict): + inv[parent_group] = {'hosts': p_group} + p_group = inv[parent_group] + group_children = p_group.setdefault('children', []) + if child_group not in group_children: + group_children.append(child_group) + inv.setdefault(child_group, []) + + def get_inventory(self, meta_hostvars=True): + ''' + Reads the inventory from cache or VMware API via pSphere. + ''' + # Use different cache names for guests only vs. all hosts. + if self.guests_only: + cache_name = '__inventory_guests__' + else: + cache_name = '__inventory_all__' + + inv = self._get_cache(cache_name, None) + if inv is not None: + return inv + + inv = {'all': {'hosts': []}} + if meta_hostvars: + inv['_meta'] = {'hostvars': {}} + + default_group = os.path.basename(sys.argv[0]).rstrip('.py') + + if not self.guests_only: + if self.config.has_option('defaults', 'hw_group'): + hw_group = self.config.get('defaults', 'hw_group') + else: + hw_group = default_group + '_hw' + + if self.config.has_option('defaults', 'vm_group'): + vm_group = self.config.get('defaults', 'vm_group') + else: + vm_group = default_group + '_vm' + + if self.config.has_option('defaults', 'prefix_filter'): + prefix_filter = self.config.get('defaults', 'prefix_filter') + else: + prefix_filter = None + + # Loop through physical hosts: + for host in HostSystem.all(self.client): + + if not self.guests_only: + self._add_host(inv, 'all', host.name) + self._add_host(inv, hw_group, host.name) + host_info = self._get_host_info(host) + if meta_hostvars: + inv['_meta']['hostvars'][host.name] = host_info + self._put_cache(host.name, host_info) + + # Loop through all VMs on physical host. + for vm in host.vm: + if prefix_filter: + if vm.name.startswith( prefix_filter ): + continue + self._add_host(inv, 'all', vm.name) + self._add_host(inv, vm_group, vm.name) + vm_info = self._get_vm_info(vm) + if meta_hostvars: + inv['_meta']['hostvars'][vm.name] = vm_info + self._put_cache(vm.name, vm_info) + + # Group by resource pool. + vm_resourcePool = vm_info.get('vmware_resourcePool', None) + if vm_resourcePool: + self._add_child(inv, vm_group, 'resource_pools') + self._add_child(inv, 'resource_pools', vm_resourcePool) + self._add_host(inv, vm_resourcePool, vm.name) + + # Group by datastore. + for vm_datastore in vm_info.get('vmware_datastores', []): + self._add_child(inv, vm_group, 'datastores') + self._add_child(inv, 'datastores', vm_datastore) + self._add_host(inv, vm_datastore, vm.name) + + # Group by network. + for vm_network in vm_info.get('vmware_networks', []): + self._add_child(inv, vm_group, 'networks') + self._add_child(inv, 'networks', vm_network) + self._add_host(inv, vm_network, vm.name) + + # Group by guest OS. + vm_guestId = vm_info.get('vmware_guestId', None) + if vm_guestId: + self._add_child(inv, vm_group, 'guests') + self._add_child(inv, 'guests', vm_guestId) + self._add_host(inv, vm_guestId, vm.name) + + # Group all VM templates. + vm_template = vm_info.get('vmware_template', False) + if vm_template: + self._add_child(inv, vm_group, 'templates') + self._add_host(inv, 'templates', vm.name) + + self._put_cache(cache_name, inv) + return inv + + def get_host(self, hostname): + ''' + Read info about a specific host or VM from cache or VMware API. + ''' + inv = self._get_cache(hostname, None) + if inv is not None: + return inv + + if not self.guests_only: + try: + host = HostSystem.get(self.client, name=hostname) + inv = self._get_host_info(host) + except ObjectNotFoundError: + pass + + if inv is None: + try: + vm = VirtualMachine.get(self.client, name=hostname) + inv = self._get_vm_info(vm) + except ObjectNotFoundError: + pass + + if inv is not None: + self._put_cache(hostname, inv) + return inv or {} + + +def main(): + parser = optparse.OptionParser() + parser.add_option('--list', action='store_true', dest='list', + default=False, help='Output inventory groups and hosts') + parser.add_option('--host', dest='host', default=None, metavar='HOST', + help='Output variables only for the given hostname') + # Additional options for use when running the script standalone, but never + # used by Ansible. + parser.add_option('--pretty', action='store_true', dest='pretty', + default=False, help='Output nicely-formatted JSON') + parser.add_option('--include-host-systems', action='store_true', + dest='include_host_systems', default=False, + help='Include host systems in addition to VMs') + parser.add_option('--no-meta-hostvars', action='store_false', + dest='meta_hostvars', default=True, + help='Exclude [\'_meta\'][\'hostvars\'] with --list') + options, args = parser.parse_args() + + if options.include_host_systems: + vmware_inventory = VMwareInventory(guests_only=False) + else: + vmware_inventory = VMwareInventory() + if options.host is not None: + inventory = vmware_inventory.get_host(options.host) + else: + inventory = vmware_inventory.get_inventory(options.meta_hostvars) + + json_kwargs = {} + if options.pretty: + json_kwargs.update({'indent': 4, 'sort_keys': True}) + json.dump(inventory, sys.stdout, **json_kwargs) + + +if __name__ == '__main__': + main() diff --git a/plugins/inventory/windows_azure.ini b/contrib/inventory/windows_azure.ini similarity index 91% rename from plugins/inventory/windows_azure.ini rename to contrib/inventory/windows_azure.ini index 133a5e5ff63e5d..c37f79c6292a47 100644 --- a/plugins/inventory/windows_azure.ini +++ b/contrib/inventory/windows_azure.ini @@ -13,8 +13,9 @@ # API calls to Windows Azure may be slow. For this reason, we cache the results # of an API call. Set this to the path you want cache files to be written to. -# One file will be written to this directory: +# Two files will be written to this directory: # - ansible-azure.cache +# - ansible-azure.index # cache_path = /tmp diff --git a/contrib/inventory/windows_azure.py b/contrib/inventory/windows_azure.py new file mode 100755 index 00000000000000..d566b0c4d3171e --- /dev/null +++ b/contrib/inventory/windows_azure.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python + +''' +Windows Azure external inventory script +======================================= + +Generates inventory that Ansible can understand by making API request to +Windows Azure using the azure python library. + +NOTE: This script assumes Ansible is being executed where azure is already +installed. + + pip install azure + +Adapted from the ansible Linode plugin by Dan Slimmon. +''' + +# (c) 2013, John Whitbeck +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +###################################################################### + +# Standard imports +import re +import sys +import argparse +import os +from urlparse import urlparse +from time import time +try: + import json +except ImportError: + import simplejson as json + +try: + import azure + from azure import WindowsAzureError + from azure.servicemanagement import ServiceManagementService +except ImportError as e: + print("failed=True msg='`azure` library required for this script'") + sys.exit(1) + + +# Imports for ansible +import ConfigParser + +class AzureInventory(object): + def __init__(self): + """Main execution path.""" + # Inventory grouped by display group + self.inventory = {} + # Index of deployment name -> host + self.index = {} + self.host_metadata = {} + + # Cache setting defaults. + # These can be overridden in settings (see `read_settings`). + cache_dir = os.path.expanduser('~') + self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache') + self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index') + self.cache_max_age = 0 + + # Read settings and parse CLI arguments + self.read_settings() + self.read_environment() + self.parse_cli_args() + + # Initialize Azure ServiceManagementService + self.sms = ServiceManagementService(self.subscription_id, self.cert_path) + + # Cache + if self.args.refresh_cache: + self.do_api_calls_update_cache() + elif not self.is_cache_valid(): + self.do_api_calls_update_cache() + + if self.args.list_images: + data_to_print = self.json_format_dict(self.get_images(), True) + elif self.args.list or self.args.host: + # Display list of nodes for inventory + if len(self.inventory) == 0: + data = json.loads(self.get_inventory_from_cache()) + else: + data = self.inventory + + if self.args.host: + data_to_print = self.get_host(self.args.host) + else: + # Add the `['_meta']['hostvars']` information. + hostvars = {} + if len(data) > 0: + for host in set([h for hosts in data.values() for h in hosts if h]): + hostvars[host] = self.get_host(host, jsonify=False) + data['_meta'] = {'hostvars': hostvars} + + # JSONify the data. + data_to_print = self.json_format_dict(data, pretty=True) + print(data_to_print) + + def get_host(self, hostname, jsonify=True): + """Return information about the given hostname, based on what + the Windows Azure API provides. + """ + if hostname not in self.host_metadata: + return "No host found: %s" % json.dumps(self.host_metadata) + if jsonify: + return json.dumps(self.host_metadata[hostname]) + return self.host_metadata[hostname] + + def get_images(self): + images = [] + for image in self.sms.list_os_images(): + if str(image.label).lower().find(self.args.list_images.lower()) >= 0: + images.append(vars(image)) + return json.loads(json.dumps(images, default=lambda o: o.__dict__)) + + def is_cache_valid(self): + """Determines if the cache file has expired, or if it is still valid.""" + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + self.cache_max_age) > current_time: + if os.path.isfile(self.cache_path_index): + return True + return False + + def read_settings(self): + """Reads the settings from the .ini file.""" + config = ConfigParser.SafeConfigParser() + config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini') + + # Credentials related + if config.has_option('azure', 'subscription_id'): + self.subscription_id = config.get('azure', 'subscription_id') + if config.has_option('azure', 'cert_path'): + self.cert_path = config.get('azure', 'cert_path') + + # Cache related + if config.has_option('azure', 'cache_path'): + cache_path = os.path.expandvars(os.path.expanduser(config.get('azure', 'cache_path'))) + self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache') + self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index') + if config.has_option('azure', 'cache_max_age'): + self.cache_max_age = config.getint('azure', 'cache_max_age') + + def read_environment(self): + ''' Reads the settings from environment variables ''' + # Credentials + if os.getenv("AZURE_SUBSCRIPTION_ID"): + self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID") + if os.getenv("AZURE_CERT_PATH"): + self.cert_path = os.getenv("AZURE_CERT_PATH") + + def parse_cli_args(self): + """Command line argument processing""" + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on Azure', + ) + parser.add_argument('--list', action='store_true', default=True, + help='List nodes (default: True)') + parser.add_argument('--list-images', action='store', + help='Get all available images.') + parser.add_argument('--refresh-cache', + action='store_true', default=False, + help='Force refresh of thecache by making API requests to Azure ' + '(default: False - use cache files)', + ) + parser.add_argument('--host', action='store', + help='Get all information about an instance.') + self.args = parser.parse_args() + + def do_api_calls_update_cache(self): + """Do API calls, and save data in cache files.""" + self.add_cloud_services() + self.write_to_cache(self.inventory, self.cache_path_cache) + self.write_to_cache(self.index, self.cache_path_index) + + def add_cloud_services(self): + """Makes an Azure API call to get the list of cloud services.""" + try: + for cloud_service in self.sms.list_hosted_services(): + self.add_deployments(cloud_service) + except WindowsAzureError as e: + print("Looks like Azure's API is down:") + print("") + print(e) + sys.exit(1) + + def add_deployments(self, cloud_service): + """Makes an Azure API call to get the list of virtual machines + associated with a cloud service. + """ + try: + for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments: + self.add_deployment(cloud_service, deployment) + except WindowsAzureError as e: + print("Looks like Azure's API is down:") + print("") + print(e) + sys.exit(1) + + def add_deployment(self, cloud_service, deployment): + """Adds a deployment to the inventory and index""" + for role in deployment.role_instance_list.role_instances: + try: + # Default port 22 unless port found with name 'SSH' + port = '22' + for ie in role.instance_endpoints.instance_endpoints: + if ie.name == 'SSH': + port = ie.public_port + break + except AttributeError as e: + pass + finally: + self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status) + + def add_instance(self, hostname, deployment, ssh_port, cloud_service, status): + """Adds an instance to the inventory and index""" + + dest = urlparse(deployment.url).hostname + + # Add to index + self.index[hostname] = deployment.name + + self.host_metadata[hostname] = dict(ansible_ssh_host=dest, + ansible_ssh_port=int(ssh_port), + instance_status=status, + private_id=deployment.private_id) + + # List of all azure deployments + self.push(self.inventory, "azure", hostname) + + # Inventory: Group by service name + self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname) + + if int(ssh_port) == 22: + self.push(self.inventory, "Cloud_services", hostname) + + # Inventory: Group by region + self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname) + + def push(self, my_dict, key, element): + """Pushed an element onto an array that may not have been defined in the dict.""" + if key in my_dict: + my_dict[key].append(element); + else: + my_dict[key] = [element] + + def get_inventory_from_cache(self): + """Reads the inventory from the cache file and returns it as a JSON object.""" + cache = open(self.cache_path_cache, 'r') + json_inventory = cache.read() + return json_inventory + + def load_index_from_cache(self): + """Reads the index from the cache file and sets self.index.""" + cache = open(self.cache_path_index, 'r') + json_index = cache.read() + self.index = json.loads(json_index) + + def write_to_cache(self, data, filename): + """Writes data in JSON format to a file.""" + json_data = self.json_format_dict(data, True) + cache = open(filename, 'w') + cache.write(json_data) + cache.close() + + def to_safe(self, word): + """Escapes any characters that would be invalid in an ansible group name.""" + return re.sub("[^A-Za-z0-9\-]", "_", word) + + def json_format_dict(self, data, pretty=False): + """Converts a dict to a JSON object and dumps it as a formatted string.""" + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + + +AzureInventory() diff --git a/plugins/inventory/zabbix.ini b/contrib/inventory/zabbix.ini similarity index 100% rename from plugins/inventory/zabbix.ini rename to contrib/inventory/zabbix.ini diff --git a/plugins/inventory/zabbix.py b/contrib/inventory/zabbix.py similarity index 80% rename from plugins/inventory/zabbix.py rename to contrib/inventory/zabbix.py index 2bc1e2e1ccc8d2..28b643bca63027 100755 --- a/plugins/inventory/zabbix.py +++ b/contrib/inventory/zabbix.py @@ -30,6 +30,8 @@ Tested with Zabbix Server 2.0.6. """ +from __future__ import print_function + import os, sys import argparse import ConfigParser @@ -37,7 +39,8 @@ try: from zabbix_api import ZabbixAPI except: - print >> sys.stderr, "Error: Zabbix API library must be installed: pip install zabbix-api." + print("Error: Zabbix API library must be installed: pip install zabbix-api.", + file=sys.stderr) sys.exit(1) try: @@ -49,7 +52,11 @@ class ZabbixInventory(object): def read_settings(self): config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini') + conf_path = './zabbix.ini' + if not os.path.exists(conf_path): + conf_path = os.path.dirname(os.path.realpath(__file__)) + '/zabbix.ini' + if os.path.exists(conf_path): + config.read(conf_path) # server if config.has_option('zabbix', 'server'): self.zabbix_server = config.get('zabbix', 'server') @@ -109,24 +116,24 @@ def __init__(self): try: api = ZabbixAPI(server=self.zabbix_server) api.login(user=self.zabbix_username, password=self.zabbix_password) - except BaseException, e: - print >> sys.stderr, "Error: Could not login to Zabbix server. Check your zabbix.ini." + except BaseException as e: + print("Error: Could not login to Zabbix server. Check your zabbix.ini.", file=sys.stderr) sys.exit(1) if self.options.host: data = self.get_host(api, self.options.host) - print json.dumps(data, indent=2) + print(json.dumps(data, indent=2)) elif self.options.list: data = self.get_list(api) - print json.dumps(data, indent=2) + print(json.dumps(data, indent=2)) else: - print >> sys.stderr, "usage: --list ..OR.. --host " + print("usage: --list ..OR.. --host ", file=sys.stderr) sys.exit(1) else: - print >> sys.stderr, "Error: Configuration of server and credentials are required. See zabbix.ini." + print("Error: Configuration of server and credentials are required. See zabbix.ini.", file=sys.stderr) sys.exit(1) ZabbixInventory() diff --git a/contrib/inventory/zone.py b/contrib/inventory/zone.py new file mode 100755 index 00000000000000..2c71056cc50119 --- /dev/null +++ b/contrib/inventory/zone.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python + +# (c) 2015, Dagobert Michelsen +# +# This file is part of Ansible, +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from subprocess import Popen,PIPE +import sys +import json + +result = {} +result['all'] = {} + +pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True) +result['all']['hosts'] = [] +for l in pipe.stdout.readlines(): + # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared + s = l.split(':') + if s[1] != 'global': + result['all']['hosts'].append(s[1]) + +result['all']['vars'] = {} +result['all']['vars']['ansible_connection'] = 'zone' + +if len(sys.argv) == 2 and sys.argv[1] == '--list': + print(json.dumps(result)) +elif len(sys.argv) == 3 and sys.argv[1] == '--host': + print(json.dumps({'ansible_connection': 'zone'})) +else: + print("Need an argument, either --list or --host ") diff --git a/docs/man/man1/ansible-doc.1 b/docs/man/man1/ansible-doc.1 deleted file mode 100644 index 2d5068d0d3a46f..00000000000000 --- a/docs/man/man1/ansible-doc.1 +++ /dev/null @@ -1,66 +0,0 @@ -'\" t -.\" Title: ansible-doc -.\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 -.\" Manual: System administration commands -.\" Source: Ansible 1.9 -.\" Language: English -.\" -.TH "ANSIBLE\-DOC" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" -ansible-doc \- show documentation on Ansible modules -.SH "SYNOPSIS" -.sp -ansible\-doc [\-M module_path] [\-l] [\-s] [module\&...] -.SH "DESCRIPTION" -.sp -\fBansible\-doc\fR displays information on modules installed in Ansible libraries\&. It displays a terse listing of modules and their short descriptions, provides a printout of their DOCUMENTATION strings, and it can create a short "snippet" which can be pasted into a playbook\&. -.SH "OPTIONS" -.PP -\fB\-M\fR \fIdirectory\fR, \fB\-\-module\-path=\fR\fIdirectory\fR -.RS 4 -Add an additional directory to the default path for finding module libraries\&. -.RE -.PP -\fB\-s\fR, \fB\-\-snippet=\fR -.RS 4 -Produce a snippet which can be copied into a playbook for modification, like a kind of task template\&. -.RE -.PP -\fB\-l\fR, \fB\-\-list=\fR -.RS 4 -Produce a terse listing of modules and a short description of each\&. -.RE -.SH "AUTHOR" -.sp -ansible\-doc was originally written by Jan\-Piet Mens\&. See the AUTHORS file for a complete list of contributors\&. -.SH "COPYRIGHT" -.sp -Copyright \(co 2012, Jan\-Piet Mens -.sp -Ansible is released under the terms of the GPLv3 License\&. -.SH "SEE ALSO" -.sp -\fBansible\-playbook\fR(1), \fBansible\fR(1), \fBansible\-pull\fR(1) -.sp -Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible-doc.1.asciidoc.in b/docs/man/man1/ansible-doc.1.asciidoc.in index 46d4c2fd26c1ac..f46db17b60d170 100644 --- a/docs/man/man1/ansible-doc.1.asciidoc.in +++ b/docs/man/man1/ansible-doc.1.asciidoc.in @@ -28,9 +28,11 @@ playbook. OPTIONS ------- -*-M* 'directory', *--module-path=*'directory':: +*-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: -Add an additional directory to the default path for finding module libraries. +the 'DIRECTORY' search path to load modules from. The default is +'/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY +environment variable. *-s*, *--snippet=*:: @@ -41,6 +43,24 @@ a kind of task template. Produce a terse listing of modules and a short description of each. + +ENVIRONMENT +----------- + +ANSIBLE_LIBRARY -- Override the default ansible module library path + + +FILES +----- + +/usr/share/ansible/ -- Default module library + +/etc/ansible/ansible.cfg -- Config file, used if present + +~/.ansible.cfg -- User config file, overrides the default config if present + + + AUTHOR ------ @@ -59,7 +79,7 @@ Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- -*ansible-playbook*(1), *ansible*(1), *ansible-pull*(1) +*ansible-playbook*(1), *ansible*(1), *ansible-pull*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found diff --git a/docs/man/man1/ansible-galaxy.1 b/docs/man/man1/ansible-galaxy.1 deleted file mode 100644 index eac74b6a85dd46..00000000000000 --- a/docs/man/man1/ansible-galaxy.1 +++ /dev/null @@ -1,185 +0,0 @@ -'\" t -.\" Title: ansible-galaxy -.\" Author: [see the "AUTHOR" section] -.\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 -.\" Manual: System administration commands -.\" Source: Ansible 1.9 -.\" Language: English -.\" -.TH "ANSIBLE\-GALAXY" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" -ansible-galaxy \- manage roles using galaxy\&.ansible\&.com -.SH "SYNOPSIS" -.sp -ansible\-galaxy [init|info|install|list|remove] [\-\-help] [options] \&... -.SH "DESCRIPTION" -.sp -\fBAnsible Galaxy\fR is a shared repository for Ansible roles (added in ansible version 1\&.2)\&. The ansible\-galaxy command can be used to manage these roles, or by creating a skeleton framework for roles you\(cqd like to upload to Galaxy\&. -.SH "COMMON OPTIONS" -.PP -\fB\-h\fR, \fB\-\-help\fR -.RS 4 -Show a help message related to the given sub\-command\&. -.RE -.SH "INSTALL" -.sp -The \fBinstall\fR sub\-command is used to install roles\&. -.SS "USAGE" -.sp -$ ansible\-galaxy install [options] [\-r FILE | role_name(s)[,version] | tar_file(s)] -.sp -Roles can be installed in several different ways: -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -A username\&.rolename[,version] \- this will install a single role\&. The Galaxy API will be contacted to provide the information about the role, and the corresponding \&.tar\&.gz will be downloaded from -\fBgithub\&.com\fR\&. If the version is omitted, the most recent version available will be installed\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -A file name, using -\fB\-r\fR -\- this will install multiple roles listed one per line\&. The format of each line is the same as above: username\&.rolename[,version] -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -A \&.tar\&.gz of a valid role you\(cqve downloaded directly from -\fBgithub\&.com\fR\&. This is mainly useful when the system running Ansible does not have access to the Galaxy API, for instance when behind a firewall or proxy\&. -.RE -.SS "OPTIONS" -.PP -\fB\-f\fR, \fB\-\-force\fR -.RS 4 -Force overwriting an existing role\&. -.RE -.PP -\fB\-i\fR, \fB\-\-ignore\-errors\fR -.RS 4 -Ignore errors and continue with the next specified role\&. -.RE -.PP -\fB\-n\fR, \fB\-\-no\-deps\fR -.RS 4 -Don\(cqt download roles listed as dependencies\&. -.RE -.PP -\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR -.RS 4 -The path to the directory containing your roles\&. The default is the -\fBroles_path\fR -configured in your -\fBansible\&.cfg\fR -file (/etc/ansible/roles if not configured) -.RE -.PP -\fB\-r\fR \fIROLE_FILE\fR, \fB\-\-role\-file=\fR\fIROLE_FILE\fR -.RS 4 -A file containing a list of roles to be imported, as specified above\&. This option cannot be used if a rolename or \&.tar\&.gz have been specified\&. -.RE -.SH "REMOVE" -.sp -The \fBremove\fR sub\-command is used to remove one or more roles\&. -.SS "USAGE" -.sp -$ ansible\-galaxy remove role1 role2 \&... -.SS "OPTIONS" -.PP -\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR -.RS 4 -The path to the directory containing your roles\&. The default is the -\fBroles_path\fR -configured in your -\fBansible\&.cfg\fR -file (/etc/ansible/roles if not configured) -.RE -.SH "INIT" -.sp -The \fBinit\fR command is used to create an empty role suitable for uploading to https://galaxy\&.ansible\&.com (or for roles in general)\&. -.SS "USAGE" -.sp -$ ansible\-galaxy init [options] role_name -.SS "OPTIONS" -.PP -\fB\-f\fR, \fB\-\-force\fR -.RS 4 -Force overwriting an existing role\&. -.RE -.PP -\fB\-p\fR \fIINIT_PATH\fR, \fB\-\-init\-path=\fR\fIINIT_PATH\fR -.RS 4 -The path in which the skeleton role will be created\&.The default is the current working directory\&. -.RE -.PP -\fB\-\-offline\fR -.RS 4 -Don\(cqt query the galaxy API when creating roles -.RE -.SH "LIST" -.sp -The \fBlist\fR sub\-command is used to show what roles are currently instaled\&. You can specify a role name, and if installed only that role will be shown\&. -.SS "USAGE" -.sp -$ ansible\-galaxy list [role_name] -.SS "OPTIONS" -.PP -\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR -.RS 4 -The path to the directory containing your roles\&. The default is the -\fBroles_path\fR -configured in your -\fBansible\&.cfg\fR -file (/etc/ansible/roles if not configured) -.RE -.SH "AUTHOR" -.sp -Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. -.SH "COPYRIGHT" -.sp -Copyright \(co 2014, Michael DeHaan -.sp -Ansible is released under the terms of the GPLv3 License\&. -.SH "SEE ALSO" -.sp -\fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) -.sp -Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in index 3d59e317063c15..9ffe65e45a7c07 100644 --- a/docs/man/man1/ansible-galaxy.1.asciidoc.in +++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in @@ -12,16 +12,15 @@ ansible-galaxy - manage roles using galaxy.ansible.com SYNOPSIS -------- -ansible-galaxy [init|info|install|list|remove] [--help] [options] ... +ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ... DESCRIPTION ----------- -*Ansible Galaxy* is a shared repository for Ansible roles (added in -ansible version 1.2). The ansible-galaxy command can be used to manage -these roles, or by creating a skeleton framework for roles you'd like -to upload to Galaxy. +*Ansible Galaxy* is a shared repository for Ansible roles. +The ansible-galaxy command can be used to manage these roles, +or for creating a skeleton framework for roles you'd like to upload to Galaxy. COMMON OPTIONS -------------- @@ -30,7 +29,6 @@ COMMON OPTIONS Show a help message related to the given sub-command. - INSTALL ------- @@ -43,16 +41,16 @@ $ ansible-galaxy install [options] [-r FILE | role_name(s)[,version] | tar_file( Roles can be installed in several different ways: -* A username.rolename[,version] - this will install a single role. The Galaxy - API will be contacted to provide the information about the role, and the - corresponding .tar.gz will be downloaded from *github.com*. If the version +* A username.rolename[,version] - this will install a single role. The Galaxy + API will be contacted to provide the information about the role, and the + corresponding .tar.gz will be downloaded from *github.com*. If the version is omitted, the most recent version available will be installed. * A file name, using *-r* - this will install multiple roles listed one per line. The format of each line is the same as above: username.rolename[,version] * A .tar.gz of a valid role you've downloaded directly from *github.com*. This - is mainly useful when the system running Ansible does not have access to + is mainly useful when the system running Ansible does not have access to the Galaxy API, for instance when behind a firewall or proxy. @@ -146,6 +144,204 @@ The path to the directory containing your roles. The default is the *roles_path* configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) +SEARCH +------ + +The *search* sub-command returns a filtered list of roles found on the remote +server. + + +USAGE +~~~~~ + +$ ansible-galaxy search [options] [searchterm1 searchterm2] + + +OPTIONS +~~~~~~~ +*--galaxy-tags*:: + +Provide a comma separated list of Galaxy Tags on which to filter. + +*--platforms*:: + +Provide a comma separated list of Platforms on which to filter. + +*--author*:: + +Specify the username of a Galaxy contributor on which to filter. + +*-c*, *--ignore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +INFO +---- + +The *info* sub-command shows detailed information for a specific role. +Details returned about the role included information from the local copy +as well as information from galaxy.ansible.com. + +USAGE +~~~~~ + +$ ansible-galaxy info [options] role_name[, version] + +OPTIONS +~~~~~~~ + +*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH':: + +The path to the directory containing your roles. The default is the *roles_path* +configured in your *ansible.cfg* file (/etc/ansible/roles if not configured) + +*-c*, *--ignore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +LOGIN +----- + +The *login* sub-command is used to authenticate with galaxy.ansible.com. +Authentication is required to use the import, delete and setup commands. +It will authenticate the user, retrieve a token from Galaxy, and store it +in the user's home directory. + +USAGE +~~~~~ + +$ ansible-galaxy login [options] + +The *login* sub-command prompts for a *GitHub* username and password. It does +NOT send your password to Galaxy. It actually authenticates with GitHub and +creates a personal access token. It then sends the personal access token to +Galaxy, which in turn verifies that you are you and returns a Galaxy access +token. After authentication completes the *GitHub* personal access token is +destroyed. + +If you do not wish to use your GitHub password, or if you have two-factor +authentication enabled with GitHub, use the *--github-token* option to pass a +personal access token that you create. Log into GitHub, go to Settings and +click on Personal Access Token to create a token. + +OPTIONS +~~~~~~~ + +*-c*, *--ignore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--github-token*:: + +Authenticate using a *GitHub* personal access token rather than a password. + + +IMPORT +------ + +Import a role from *GitHub* to galaxy.ansible.com. Requires the user first +authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy import [options] github_user github_repo + +OPTIONS +~~~~~~~ +*-c*, *--ignore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +*--branch*:: + +Provide a specific branch to import. When a branch is not specified the +branch found in meta/main.yml is used. If no branch is specified in +meta/main.yml, the repo's default branch (usually master) is used. + + +DELETE +------ + +The *delete* sub-command will delete a role from galaxy.ansible.com. Requires +the user first authenticate with galaxy.ansible.com using the *login* subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy delete [options] github_user github_repo + +OPTIONS +~~~~~~~ + +*-c*, *--ignore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + + +SETUP +----- + +The *setup* sub-command creates an integration point for *Travis CI*, enabling +galaxy.ansible.com to receive notifications from *Travis* on build completion. +Requires the user first authenticate with galaxy.ansible.com using the *login* +subcommand. + +USAGE +~~~~~ + +$ ansible-galaxy setup [options] source github_user github_repo secret + +* Use *travis* as the source value. In the future additional source values may + be added. + +* Provide your *Travis* user token as the secret. The token is not stored by + galaxy.ansible.com. A hash is created using github_user, github_repo + and your token. The hash value is what actually gets stored. + +OPTIONS +~~~~~~~ + +*-c*, *--ignore-certs*:: + +Ignore TLS certificate errors. + +*-s*, *--server*:: + +Override the default server https://galaxy.ansible.com. + +--list:: + +Show your configured integrations. Provids the ID of each integration +which can be used with the remove option. + +--remove:: + +Remove a specific integration. Provide the ID of the integration to +be removed. + AUTHOR ------ @@ -164,7 +360,7 @@ Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- -*ansible*(1), *ansible-pull*(1), *ansible-doc*(1) +*ansible*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-playbook*(1), *ansible-vault*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found diff --git a/docs/man/man1/ansible-playbook.1 b/docs/man/man1/ansible-playbook.1 deleted file mode 100644 index 085c5f79f1e11a..00000000000000 --- a/docs/man/man1/ansible-playbook.1 +++ /dev/null @@ -1,183 +0,0 @@ -'\" t -.\" Title: ansible-playbook -.\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 -.\" Manual: System administration commands -.\" Source: Ansible 1.9 -.\" Language: English -.\" -.TH "ANSIBLE\-PLAYBOOK" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" -ansible-playbook \- run an ansible playbook -.SH "SYNOPSIS" -.sp -ansible\-playbook \&... [options] -.SH "DESCRIPTION" -.sp -\fBAnsible playbooks\fR are a configuration and multinode deployment system\&. Ansible\-playbook is the tool used to run them\&. See the project home page (link below) for more information\&. -.SH "ARGUMENTS" -.PP -\fBfilename\&.yml\fR -.RS 4 -The names of one or more YAML format files to run as ansible playbooks\&. -.RE -.SH "OPTIONS" -.PP -\fB\-v\fR, \fB\-\-verbose\fR -.RS 4 -Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. -.RE -.PP -\fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR -.RS 4 -The -\fIPATH\fR -to the inventory hosts file, which defaults to -\fI/etc/ansible/hosts\fR\&. -.RE -.PP -\fB\-M\fR \fIDIRECTORY\fR, \fB\-\-module\-path=\fR\fIDIRECTORY\fR -.RS 4 -The -\fIDIRECTORY\fR -search path to load modules from\&. The default is -\fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&. -.RE -.PP -\fB\-e\fR \fIVARS\fR, \fB\-\-extra\-vars=\fR\fIVARS\fR -.RS 4 -Extra variables to inject into a playbook, in key=value key=value format or as quoted JSON (hashes and arrays)\&. -.RE -.PP -\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR -.RS 4 -Level of parallelism\&. -\fINUM\fR -is specified as an integer, the default is 5\&. -.RE -.PP -\fB\-k\fR, \fB\-\-ask\-pass\fR -.RS 4 -Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. -.RE -.PP -\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR -.RS 4 -Prompt for the password to use for playbook plays that request sudo access, if any\&. -.RE -.PP -\fB\-U\fR, \fISUDO_USER\fR, \fB\-\-sudo\-user=\fR\fISUDO_USER\fR -.RS 4 -Desired sudo user (default=root)\&. -.RE -.PP -\fB\-t\fR, \fITAGS\fR, \fB\-\-tags=\fR\fITAGS\fR -.RS 4 -Only run plays and tasks tagged with these values\&. -.RE -.PP -\fB\-\-skip\-tags=\fR\fISKIP_TAGS\fR -.RS 4 -Only run plays and tasks whose tags do not match these values\&. -.RE -.PP -\fB\-\-syntax\-check\fR -.RS 4 -Look for syntax errors in the playbook, but don\(cqt run anything -.RE -.PP -\fB\-\-check\fR -.RS 4 -Do not make any changes on the remote system, but test resources to see what might have changed\&. Note this can not scan all possible resource types and is only a simulation\&. -.RE -.PP -\fB\-\-diff\fR -.RS 4 -When changing any templated files, show the unified diffs of how they changed\&. When used with \-\-check, shows how the files would have changed if \-\-check were not used\&. -.RE -.PP -\fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR -.RS 4 -Connection timeout to use when trying to talk to hosts, in -\fISECONDS\fR\&. -.RE -.PP -\fB\-s\fR, \fB\-\-sudo\fR -.RS 4 -Force all plays to use sudo, even if not marked as such\&. -.RE -.PP -\fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR -.RS 4 -Use this remote user name on playbook steps that do not indicate a user name to run as\&. -.RE -.PP -\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR -.RS 4 -Connection type to use\&. Possible options are -\fIparamiko\fR -(SSH), -\fIssh\fR, and -\fIlocal\fR\&. -\fIlocal\fR -is mostly useful for crontab or kickstarts\&. -.RE -.PP -\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR -.RS 4 -Further limits the selected host/group patterns\&. -.RE -.PP -\fB\-\-list\-hosts\fR -.RS 4 -Outputs a list of matching hosts; does not execute anything else\&. -.RE -.SH "ENVIRONMENT" -.sp -The following environment variables may be specified\&. -.sp -ANSIBLE_HOSTS \(em Override the default ansible hosts file -.sp -ANSIBLE_LIBRARY \(em Override the default ansible module library path -.SH "FILES" -.sp -/etc/ansible/hosts \(em Default inventory file -.sp -/usr/share/ansible/ \(em Default module library -.sp -/etc/ansible/ansible\&.cfg \(em Config file, used if present -.sp -~/\&.ansible\&.cfg \(em User config file, overrides the default config if present -.SH "AUTHOR" -.sp -Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. -.SH "COPYRIGHT" -.sp -Copyright \(co 2012, Michael DeHaan -.sp -Ansible is released under the terms of the GPLv3 License\&. -.SH "SEE ALSO" -.sp -\fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) -.sp -Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in index e6b6c680a76527..289e7917dddff0 100644 --- a/docs/man/man1/ansible-playbook.1.asciidoc.in +++ b/docs/man/man1/ansible-playbook.1.asciidoc.in @@ -34,16 +34,86 @@ The names of one or more YAML format files to run as ansible playbooks. OPTIONS ------- -*-v*, *--verbose*:: +*--ask-become-pass*:: -Verbose mode, more output from successful actions will be shown. Give -up to three times for more output. +Ask for privilege escalation password. + +*-k*, *--ask-pass*:: + +Prompt for the connection password, if it is needed for the transport used. +For example, using ssh and not having a key-based authentication with ssh-agent. + +*--ask-su-pass*:: + +Prompt for su password, used with --su (deprecated, use become). + +*-K*, *--ask-sudo-pass*:: + +Prompt for the password to use with --sudo, if any (deprecated, use become). + +*--ask-vault-pass*:: + +Prompt for vault password. + +*-C*, *--check*:: + +Do not make any changes on the remote system, but test resources to see what might +have changed. Note this can not scan all possible resource types and is only +a simulation. + +*-c* 'CONNECTION', *--connection=*'CONNECTION':: + +Connection type to use. Most common options are 'paramiko' (SSH), 'ssh', 'winrm' +and 'local'. 'local' is mostly useful for crontab or kickstarts. + +*-D*, *--diff*:: + +When changing any templated files, show the unified diffs of how they changed. When +used with --check, shows how the files would have changed if --check were not used. + +*-e* 'EXTRA_VARS', *--extra-vars=*'EXTRA_VARS':: + +Extra variables to inject into a playbook, in key=value key=value format or +as quoted YAML/JSON (hashes and arrays). To load variables from a file, specify +the file preceded by @ (e.g. @vars.yml). + +*--flush-cache*:: + +Clear the fact cache. + +*--force-handlers*:: + +Run handlers even if a task fails. + +*-f* 'NUM', *--forks=*'NUM':: + +Level of parallelism. 'NUM' is specified as an integer, the default is 5. + +*-h*, *--help*:: + +Show help page and exit *-i* 'PATH', *--inventory=*'PATH':: -The 'PATH' to the inventory hosts file, which defaults to -'/etc/ansible/hosts'. +The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'. +Alternatively, you can use a comma-separated list of hosts or a single host with a trailing comma 'host,'. + +*-l* 'SUBSET', *--limit=*'SUBSET':: + +Further limits the selected host/group patterns. +You can prefix it with '~' to indicate that the pattern in a regex. + +*--list-hosts*:: + +Outputs a list of matching hosts; does not execute anything else. + +*--list-tags*:: + +List all available tags; does not execute anything else. + +*--list-tasks*:: +List all tasks that would be executed; does not execute anything else. *-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: @@ -51,35 +121,55 @@ The 'DIRECTORY' search path to load modules from. The default is '/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY environment variable. -*-e* 'VARS', *--extra-vars=*'VARS':: +*--private-key=*'PRIVATE_KEY_FILE':: -Extra variables to inject into a playbook, in key=value key=value format or -as quoted JSON (hashes and arrays). To load variables from a file, specify -the file preceded by @ (e.g. @vars.yml). +Use this file to authenticate the connection -*-f* 'NUM', *--forks=*'NUM':: +*--skip-tages=*'SKIP_TAGS':: -Level of parallelism. 'NUM' is specified as an integer, the default is 5. +Only run plays and tasks whose tags do not match these values. +*--start-at-task=*'START_AT':: -*-k*, *--ask-pass*:: +Start the playbook at the task matching this name. -Prompt for the SSH password instead of assuming key-based -authentication with ssh-agent. +*--step*:: +One-step-at-a-time: confirm each task before running. -*-K*, *--ask-sudo-pass*:: +*-S*, --su*:: -Prompt for the password to use for playbook plays that request sudo -access, if any. +Run operations with su (deprecated, use become) -*-U*, 'SUDO_USER', *--sudo-user=*'SUDO_USER':: +*-R SU-USER*, *--su-user=*'SU_USER':: -Desired sudo user (default=root). +run operations with su as this user (default=root) (deprecated, use become) -*-t*, 'TAGS', *--tags=*'TAGS':: +*-s*, *--sudo*:: -Only run plays and tasks tagged with these values. +Run the command as the user given by -u and sudo to root (deprecated, use become). + +*--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...'':: + +Add the specified arguments to any sftp/scp/ssh command-line. Useful to +set a ProxyCommand to use a jump host, but any arguments that are +accepted by all three programs may be specified. + +*--sftp-extra-args=*''-f ...'':: + +Add the specified arguments to any sftp command-line. + +*--scp-extra-args=*''-l ...'':: + +Add the specified arguments to any scp command-line. + +*--ssh-extra-args=*''-R ...'':: + +Add the specified arguments to any ssh command-line. + +*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: + +Sudo to 'SUDO_USERNAME' deafult is root. (deprecated, use become). *--skip-tags=*'SKIP_TAGS':: @@ -89,54 +179,64 @@ Only run plays and tasks whose tags do not match these values. Look for syntax errors in the playbook, but don't run anything -*--check*:: - -Do not make any changes on the remote system, but test resources to see what might -have changed. Note this can not scan all possible resource types and is only -a simulation. - -*--diff*:: +*-t*, 'TAGS', *--tags=*'TAGS':: -When changing any templated files, show the unified diffs of how they changed. When -used with --check, shows how the files would have changed if --check were not used. +Only run plays and tasks tagged with these values. *-T* 'SECONDS', *--timeout=*'SECONDS':: Connection timeout to use when trying to talk to hosts, in 'SECONDS'. +*-u* 'USERNAME', *--user=*'USERNAME':: -*-s*, *--sudo*:: +Use this 'USERNAME' to login to the target host, instead of the current user. -Force all plays to use sudo, even if not marked as such. +*--vault-password-file=*'VAULT_PASSWORD_FILE':: +Vault password file. -*-u* 'USERNAME', *--user=*'USERNAME':: +*-v*, *--verbose*:: -Use this remote user name on playbook steps that do not indicate a -user name to run as. +Verbose mode, more output from successful actions will be shown. Give +up to three times for more output. -*-c* 'CONNECTION', *--connection=*'CONNECTION':: +*--version*:: -Connection type to use. Possible options are 'paramiko' (SSH), 'ssh', -and 'local'. 'local' is mostly useful for crontab or kickstarts. +Show program's version number and exit. -*-l* 'SUBSET', *--limit=*'SUBSET':: +EXIT STATUS +----------- -Further limits the selected host/group patterns. +*0* -- OK or no hosts matched -*--list-hosts*:: +*1* -- Error -Outputs a list of matching hosts; does not execute anything else. +*2* -- One or more hosts failed + +*3* -- One or more hosts were unreachable + +*4* -- Parser error + +*5* -- Bad or incomplete options + +*99* -- User interrupted execution + +*250* -- Unexpected error ENVIRONMENT ----------- The following environment variables may be specified. -ANSIBLE_HOSTS -- Override the default ansible hosts file +ANSIBLE_INVENTORY -- Override the default ansible inventory file ANSIBLE_LIBRARY -- Override the default ansible module library path +ANSIBLE_CONFIG -- Override the default ansible config file + +Many more are available for most options in ansible.cfg + + FILES ----- @@ -148,6 +248,7 @@ FILES ~/.ansible.cfg -- User config file, overrides the default config if present + AUTHOR ------ @@ -166,7 +267,7 @@ Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- -*ansible*(1), *ansible-pull*(1), *ansible-doc*(1) +*ansible*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found diff --git a/docs/man/man1/ansible-pull.1 b/docs/man/man1/ansible-pull.1 deleted file mode 100644 index a9b69788b47533..00000000000000 --- a/docs/man/man1/ansible-pull.1 +++ /dev/null @@ -1,106 +0,0 @@ -'\" t -.\" Title: ansible -.\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 -.\" Manual: System administration commands -.\" Source: Ansible 1.9 -.\" Language: English -.\" -.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" -ansible-pull \- set up a remote copy of ansible on each managed node -.SH "SYNOPSIS" -.sp -ansible\-pull \-d DEST \-U URL [options] [ ] -.SH "DESCRIPTION" -.sp -\fBAnsible\fR is an extra\-simple tool/framework/API for doing \*(Aqremote things\*(Aq over SSH\&. -.sp -Use ansible\-pull to set up a remote copy of ansible on each managed node, each set to run via cron and update playbook source via a source repository\&. This inverts the default \fBpush\fR architecture of ansible into a \fBpull\fR architecture, which has near\-limitless scaling potential\&. -.sp -The setup playbook can be tuned to change the cron frequency, logging locations, and parameters to ansible\-pull\&. -.sp -This is useful both for extreme scale\-out as well as periodic remediation\&. Usage of the \fIfetch\fR module to retrieve logs from ansible\-pull runs would be an excellent way to gather and analyze remote logs from ansible\-pull\&. -.SH "OPTIONAL ARGUMENT" -.PP -\fBfilename\&.yml\fR -.RS 4 -The name of one the YAML format files to run as an ansible playbook\&. This can be a relative path within the checkout\&. If not provided, ansible\-pull will look for a playbook based on the host\(cqs fully\-qualified domain name, on the host hostname and finally a playbook named -\fBlocal\&.yml\fR\&. -.RE -.SH "OPTIONS" -.PP -\fB\-d\fR \fIDEST\fR, \fB\-\-directory=\fR\fIDEST\fR -.RS 4 -Directory to checkout repository into\&. If not provided, a subdirectory of ~/\&.ansible/pull/ will be used\&. -.RE -.PP -\fB\-U\fR \fIURL\fR, \fB\-\-url=\fR\fIURL\fR -.RS 4 -URL of the playbook repository to checkout\&. -.RE -.PP -\fB\-C\fR \fICHECKOUT\fR, \fB\-\-checkout=\fR\fICHECKOUT\fR -.RS 4 -Branch/Tag/Commit to checkout\&. If not provided, uses default behavior of module used to check out playbook repository\&. -.RE -.PP -\fB\-f\fR, \fB\-\-force\fR -.RS 4 -Force running of playbook even if unable to update playbook repository\&. This can be useful, for example, to enforce run\-time state when a network connection may not always be up or possible\&. -.RE -.PP -\fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR -.RS 4 -The -\fIPATH\fR -to the inventory hosts file\&. This can be a relative path within the checkout\&. -.RE -.PP -\fB\-\-purge\fR -.RS 4 -Purge the checkout after the playbook is run\&. -.RE -.PP -\fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR -.RS 4 -Module used to checkout playbook repository\&. Defaults to git\&. -.RE -.PP -\fB\-o\fR, \fB\-\-only\-if\-changed\fR -.RS 4 -Run the playbook only if the repository has changed -.RE -.SH "AUTHOR" -.sp -Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. -.SH "COPYRIGHT" -.sp -Copyright \(co 2012, Michael DeHaan -.sp -Ansible is released under the terms of the GPLv3 License\&. -.SH "SEE ALSO" -.sp -\fBansible\fR(1), \fBansible\-playbook\fR(1), \fBansible\-doc\fR(1) -.sp -Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in index d75fc637946441..0afba2aeaac4fb 100644 --- a/docs/man/man1/ansible-pull.1.asciidoc.in +++ b/docs/man/man1/ansible-pull.1.asciidoc.in @@ -5,21 +5,21 @@ ansible(1) :man version: %VERSION% :man manual: System administration commands + NAME ---- -ansible-pull - set up a remote copy of ansible on each managed node +ansible-pull - pull playbooks from VCS server and run them using this machine as the target. SYNOPSIS -------- -ansible-pull -d DEST -U URL [options] [ ] +ansible-pull -U URL [options] [ ] DESCRIPTION ----------- -*Ansible* is an extra-simple tool/framework/API for doing \'remote things' over -SSH. +*Ansible* is an extra-simple tool/framework/API for doing \'remote things'. Use ansible-pull to set up a remote copy of ansible on each managed node, each set to run via cron and update playbook source via @@ -50,19 +50,44 @@ host hostname and finally a playbook named *local.yml*. OPTIONS ------- -*-d* 'DEST', *--directory=*'DEST':: +*--accept-host-key*:: -Directory to checkout repository into. If not provided, a subdirectory of -~/.ansible/pull/ will be used. +Adds the hostkey for the repo URL if not already added. -*-U* 'URL', *--url=*'URL':: +*--ask-become-pass*:: -URL of the playbook repository to checkout. +Ask for privilege escalation password. + +*-k*, *--ask-pass*:: + +Prompt for the connection password, if it is needed for the transport used. +For example, using ssh and not having a key-based authentication with ssh-agent. + +*--ask-su-pass*:: + +Prompt for su password, used with --su (deprecated, use become). + +*-K*, *--ask-sudo-pass*:: + +Prompt for the password to use with --sudo, if any (deprecated, use become). + +*--ask-vault-pass*:: + +Prompt for vault password. *-C* 'CHECKOUT', *--checkout=*'CHECKOUT':: -Branch/Tag/Commit to checkout. If not provided, uses default behavior -of module used to check out playbook repository. +Branch/Tag/Commit to checkout. If not provided, uses default behavior of module used to check out playbook repository. + +*-d* 'DEST', *--directory=*'DEST':: + +Directory to checkout repository into. If not provided, a subdirectory of ~/.ansible/pull/ will be used. + +*-e* 'EXTRA_VARS', *--extra-vars=*'EXTRA_VARS:: + +Extra variables to inject into a playbook, in key=value key=value format or +as quoted YAML/JSON (hashes and arrays). To load variables from a file, specify +the file preceded by @ (e.g. @vars.yml). *-f*, *--force*:: @@ -70,14 +95,22 @@ Force running of playbook even if unable to update playbook repository. This can be useful, for example, to enforce run-time state when a network connection may not always be up or possible. +*--full*:: + +Do a full clone of the repository. By default ansible-pull will do a shallow clone based on the last revision. + +*-h*, *--help*:: + +Show the help message and exit. + *-i* 'PATH', *--inventory=*'PATH':: -The 'PATH' to the inventory hosts file. This can be a relative path within -the checkout. +The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'. +Alternatively you can use a comma separated list of hosts or single host with traling comma 'host,'. -*--purge*:: +*--private-key=*'PRIVATE_KEY_FILE':: -Purge the checkout after the playbook is run. +Use this file to authenticate the connection. *-m* 'NAME', *--module-name=*'NAME':: @@ -85,27 +118,107 @@ Module used to checkout playbook repository. Defaults to git. *-o*, *--only-if-changed*:: -Run the playbook only if the repository has changed +Only run the playbook if the repository has been updated. + +*--purge*:: + +Purge the checkout after the playbook is run. + +*-s* 'SLEEP', *--sleep=*'SLEEP':: + +Sleep for random interval (between 0 and SLEEP number of seconds) before starting. This is a useful way ot disperse git requests. + +*--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...'':: + +Add the specified arguments to any sftp/scp/ssh command-line. Useful to +set a ProxyCommand to use a jump host, but any arguments that are +accepted by all three programs may be specified. + +*--sftp-extra-args=*''-f ...'':: + +Add the specified arguments to any sftp command-line. + +*--scp-extra-args=*''-l ...'':: + +Add the specified arguments to any scp command-line. + +*--ssh-extra-args=*''-R ...'':: + +Add the specified arguments to any ssh command-line. + +*-t* 'TAGS', *--tags=*'TAGS':: + +Only run plays and tasks tagged with these values. + +*-U* 'URL', *--url=*'URL':: + +URL of the playbook repository to checkout. + +*--vault-password-file=*'VAULT_PASSWORD_FILE':: + +Vault password file. + +*-v*, *--verbose*:: + +Pass -vvv to ansible-playbook. + + +INVENTORY +--------- + +Ansible stores the hosts it can potentially operate on in an inventory. +This can be an ini-like file, a script, directory or a list. +The ini syntax is one host per line. Groups headers are allowed and +are included on their own line, enclosed in square brackets that start the line. + +Ranges of hosts are also supported. For more information and +additional options, see the documentation on http://docs.ansible.com/. + + +ENVIRONMENT +----------- + +The following environment variables may be specified. + +ANSIBLE_INVENTORY -- Override the default ansible inventory file + +ANSIBLE_LIBRARY -- Override the default ansible module library path + +ANSIBLE_CONFIG -- Override the default ansible config file + +Many more are available for most options in ansible.cfg + + +FILES +----- + +/etc/ansible/hosts -- Default inventory file + +/usr/share/ansible/ -- Default module library + +/etc/ansible/ansible.cfg -- Config file, used if present + +~/.ansible.cfg -- User config file, overrides the default config if present + AUTHOR ------ -Ansible was originally written by Michael DeHaan. See the AUTHORS file -for a complete list of contributors. +Ansible was originally written by Michael DeHaan. +See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2012, Michael DeHaan - Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- -*ansible*(1), *ansible-playbook*(1), *ansible-doc*(1) +*ansible*(1) *ansible-playbook*(1), *ansible-doc*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found diff --git a/docs/man/man1/ansible-vault.1 b/docs/man/man1/ansible-vault.1 deleted file mode 100644 index 286e642748d8fb..00000000000000 --- a/docs/man/man1/ansible-vault.1 +++ /dev/null @@ -1,103 +0,0 @@ -'\" t -.\" Title: ansible-vault -.\" Author: [see the "AUTHOR" section] -.\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 -.\" Manual: System administration commands -.\" Source: Ansible 1.9 -.\" Language: English -.\" -.TH "ANSIBLE\-VAULT" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" -ansible-vault \- manage encrypted YAML data\&. -.SH "SYNOPSIS" -.sp -ansible\-vault [create|decrypt|edit|encrypt|rekey] [\-\-help] [options] file_name -.SH "DESCRIPTION" -.sp -\fBansible\-vault\fR can encrypt any structured data file used by Ansible\&. This can include \fBgroup_vars/\fR or \fBhost_vars/\fR inventory variables, variables loaded by \fBinclude_vars\fR or \fBvars_files\fR, or variable files passed on the ansible\-playbook command line with \fB\-e @file\&.yml\fR or \fB\-e @file\&.json\fR\&. Role variables and defaults are also included! -.sp -Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault\&. If you\(cqd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted\&. -.SH "COMMON OPTIONS" -.sp -The following options are available to all sub\-commands: -.PP -\fB\-\-vault\-password\-file=\fR\fIFILE\fR -.RS 4 -A file containing the vault password to be used during the encryption/decryption steps\&. Be sure to keep this file secured if it is used\&. -.RE -.PP -\fB\-h\fR, \fB\-\-help\fR -.RS 4 -Show a help message related to the given sub\-command\&. -.RE -.PP -\fB\-\-debug\fR -.RS 4 -Enable debugging output for troubleshooting\&. -.RE -.SH "CREATE" -.sp -\fB$ ansible\-vault create [options] FILE\fR -.sp -The \fBcreate\fR sub\-command is used to initialize a new encrypted file\&. -.sp -First you will be prompted for a password\&. The password used with vault currently must be the same for all files you wish to use together at the same time\&. -.sp -After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vim\&. Once you are done with the editor session, the file will be saved as encrypted data\&. -.sp -The default cipher is AES (which is shared\-secret based)\&. -.SH "EDIT" -.sp -\fB$ ansible\-vault edit [options] FILE\fR -.sp -The \fBedit\fR sub\-command is used to modify a file which was previously encrypted using ansible\-vault\&. -.sp -This command will decrypt the file to a temporary file and allow you to edit the file, saving it back when done and removing the temporary file\&. -.SH "REKEY" -.sp -*$ ansible\-vault rekey [options] FILE_1 [FILE_2, \&..., FILE_N] -.sp -The \fBrekey\fR command is used to change the password on a vault\-encrypted files\&. This command can update multiple files at once, and will prompt for both the old and new passwords before modifying any data\&. -.SH "ENCRYPT" -.sp -*$ ansible\-vault encrypt [options] FILE_1 [FILE_2, \&..., FILE_N] -.sp -The \fBencrypt\fR sub\-command is used to encrypt pre\-existing data files\&. As with the \fBrekey\fR command, you can specify multiple files in one command\&. -.SH "DECRYPT" -.sp -*$ ansible\-vault decrypt [options] FILE_1 [FILE_2, \&..., FILE_N] -.sp -The \fBdecrypt\fR sub\-command is used to remove all encryption from data files\&. The files will be stored as plain\-text YAML once again, so be sure that you do not run this command on data files with active passwords or other sensitive data\&. In most cases, users will want to use the \fBedit\fR sub\-command to modify the files securely\&. -.SH "AUTHOR" -.sp -Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. -.SH "COPYRIGHT" -.sp -Copyright \(co 2014, Michael DeHaan -.sp -Ansible is released under the terms of the GPLv3 License\&. -.SH "SEE ALSO" -.sp -\fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) -.sp -Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible-vault.1.asciidoc.in b/docs/man/man1/ansible-vault.1.asciidoc.in index daccd8772f4eef..3db033b3f71231 100644 --- a/docs/man/man1/ansible-vault.1.asciidoc.in +++ b/docs/man/man1/ansible-vault.1.asciidoc.in @@ -7,7 +7,7 @@ ansible-vault(1) NAME ---- -ansible-vault - manage encrypted YAML data. +ansible-vault - manage encrypted ansible vars files (YAML). SYNOPSIS @@ -18,15 +18,16 @@ ansible-vault [create|decrypt|edit|encrypt|rekey] [--help] [options] file_name DESCRIPTION ----------- -*ansible-vault* can encrypt any structured data file used by Ansible. This can include -*group_vars/* or *host_vars/* inventory variables, variables loaded by *include_vars* or -*vars_files*, or variable files passed on the ansible-playbook command line with -*-e @file.yml* or *-e @file.json*. Role variables and defaults are also included! +*ansible-vault* can encrypt any structured data file used by Ansible. +This can include *group_vars/* or *host_vars/* inventory variables, +variables loaded by *include_vars* or *vars_files*, or variable files +passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*. +Role variables and defaults are also included! -Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with -vault. If you’d like to not betray what variables you are even using, you can go as far to -keep an individual task file entirely encrypted. +Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault. +If you’d like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted. +The password used with vault currently must be the same for all files you wish to use together at the same time. COMMON OPTIONS -------------- @@ -36,28 +37,33 @@ The following options are available to all sub-commands: *--vault-password-file=*'FILE':: A file containing the vault password to be used during the encryption/decryption -steps. Be sure to keep this file secured if it is used. +steps. Be sure to keep this file secured if it is used. If the file is executable, +it will be run and its standard output will be used as the password. + +*--new-vault-password-file=*'FILE':: + +A file containing the new vault password to be used when rekeying a +file. Be sure to keep this file secured if it is used. If the file +is executable, it will be run and its standard output will be used as +the password. *-h*, *--help*:: Show a help message related to the given sub-command. -*--debug*:: -Enable debugging output for troubleshooting. +If '--valut-password-file' is not supplied ansib-vault will automatically prompt for passwords as required. + CREATE ------ *$ ansible-vault create [options] FILE* -The *create* sub-command is used to initialize a new encrypted file. - -First you will be prompted for a password. The password used with vault currently -must be the same for all files you wish to use together at the same time. +The *create* sub-command is used to initialize a new encrypted file. -After providing a password, the tool will launch whatever editor you have defined -with $EDITOR, and defaults to vim. Once you are done with the editor session, the +After providing a password, the tool will launch whatever editor you have defined +with $EDITOR, and defaults to vim. Once you are done with the editor session, the file will be saved as encrypted data. The default cipher is AES (which is shared-secret based). @@ -67,39 +73,62 @@ EDIT *$ ansible-vault edit [options] FILE* -The *edit* sub-command is used to modify a file which was previously encrypted -using ansible-vault. +The *edit* sub-command is used to modify a file which was previously encrypted using ansible-vault. + +This command will decrypt the file to a temporary file and allow you to edit the file, +saving it back when done and removing the temporary file. -This command will decrypt the file to a temporary file and allow you to edit the -file, saving it back when done and removing the temporary file. REKEY ----- -*$ ansible-vault rekey [options] FILE_1 [FILE_2, ..., FILE_N] +*$ ansible-vault rekey [options] FILE_1 [FILE_2, ..., FILE_N]* The *rekey* command is used to change the password on a vault-encrypted files. -This command can update multiple files at once, and will prompt for both the -old and new passwords before modifying any data. +This command can update multiple files at once. + ENCRYPT ------- -*$ ansible-vault encrypt [options] FILE_1 [FILE_2, ..., FILE_N] +*$ ansible-vault encrypt [options] FILE_1 [FILE_2, ..., FILE_N]* + +The *encrypt* sub-command is used to encrypt pre-existing data files. +As with the *rekey* command, you can specify multiple files in one command. + +The *encrypt* command accepts an *--output FILENAME* option to determine where +encrypted output is stored. With this option, input is read from the (at most one) +filename given on the command line; if no input file is given, input is read from stdin. +Either the input or the output file may be given as '-' for stdin and stdout respectively. +If neither input nor output file is given, the command acts as a filter, +reading plaintext from stdin and writing it to stdout. + +Thus any of the following invocations can be used: + +*$ ansible-vault encrypt* + +*$ ansible-vault encrypt --output OUTFILE* + +*$ ansible-vault encrypt INFILE --output OUTFILE* + +*$ echo secret|ansible-vault encrypt --output OUTFILE* -The *encrypt* sub-command is used to encrypt pre-existing data files. As with the -*rekey* command, you can specify multiple files in one command. +Reading from stdin and writing only encrypted output is a good way to prevent +sensitive data from ever hitting disk (either interactively or from a script). DECRYPT ------- -*$ ansible-vault decrypt [options] FILE_1 [FILE_2, ..., FILE_N] +*$ ansible-vault decrypt [options] FILE_1 [FILE_2, ..., FILE_N]* -The *decrypt* sub-command is used to remove all encryption from data files. The files -will be stored as plain-text YAML once again, so be sure that you do not run this -command on data files with active passwords or other sensitive data. In most cases, -users will want to use the *edit* sub-command to modify the files securely. +The *decrypt* sub-command is used to remove all encryption from data files. +The files will be stored as plain-text YAML once again, so be sure that you do not run this +command on data files with active passwords or other sensitive data. +In most cases, users will want to use the *edit* sub-command to modify the files securely. +As with *encrypt*, the *decrypt* subcommand also accepts the *--output FILENAME* +option to specify where plaintext output is stored, and stdin/stdout is handled +as described above. AUTHOR ------ @@ -119,7 +148,7 @@ Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- -*ansible*(1), *ansible-pull*(1), *ansible-doc*(1) +*ansible*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-playbook*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found diff --git a/docs/man/man1/ansible.1 b/docs/man/man1/ansible.1 deleted file mode 100644 index eb2e8aaeeb2915..00000000000000 --- a/docs/man/man1/ansible.1 +++ /dev/null @@ -1,223 +0,0 @@ -'\" t -.\" Title: ansible -.\" Author: :doctype:manpage -.\" Generator: DocBook XSL Stylesheets v1.78.1 -.\" Date: 12/09/2014 -.\" Manual: System administration commands -.\" Source: Ansible 1.9 -.\" Language: English -.\" -.TH "ANSIBLE" "1" "12/09/2014" "Ansible 1\&.9" "System administration commands" -.\" ----------------------------------------------------------------- -.\" * Define some portability stuff -.\" ----------------------------------------------------------------- -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.\" http://bugs.debian.org/507673 -.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html -.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.ie \n(.g .ds Aq \(aq -.el .ds Aq ' -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.SH "NAME" -ansible \- run a command somewhere else -.SH "SYNOPSIS" -.sp -ansible [\-f forks] [\-m module_name] [\-a args] -.SH "DESCRIPTION" -.sp -\fBAnsible\fR is an extra\-simple tool/framework/API for doing \*(Aqremote things\*(Aq over SSH\&. -.SH "ARGUMENTS" -.PP -\fBhost\-pattern\fR -.RS 4 -A name of a group in the inventory file, a shell\-like glob selecting hosts in inventory file, or any combination of the two separated by semicolons\&. -.RE -.SH "OPTIONS" -.PP -\fB\-v\fR, \fB\-\-verbose\fR -.RS 4 -Verbose mode, more output from successful actions will be shown\&. Give up to three times for more output\&. -.RE -.PP -\fB\-i\fR \fIPATH\fR, \fB\-\-inventory=\fR\fIPATH\fR -.RS 4 -The -\fIPATH\fR -to the inventory hosts file, which defaults to -\fI/etc/ansible/hosts\fR\&. -.RE -.PP -\fB\-f\fR \fINUM\fR, \fB\-\-forks=\fR\fINUM\fR -.RS 4 -Level of parallelism\&. -\fINUM\fR -is specified as an integer, the default is 5\&. -.RE -.PP -\fB\-\-private\-key=\fR\fIPRIVATE_KEY_FILE\fR -.RS 4 -Use this file to authenticate the connection\&. -.RE -.PP -\fB\-m\fR \fINAME\fR, \fB\-\-module\-name=\fR\fINAME\fR -.RS 4 -Execute the module called -\fINAME\fR\&. -.RE -.PP -\fB\-M\fR \fIDIRECTORY\fR, \fB\-\-module\-path=\fR\fIDIRECTORY\fR -.RS 4 -The -\fIDIRECTORY\fR -search path to load modules from\&. The default is -\fI/usr/share/ansible\fR\&. This can also be set with the ANSIBLE_LIBRARY environment variable\&. -.RE -.PP -\fB\-a\fR \*(Aq\fIARGUMENTS\fR\*(Aq, \fB\-\-args=\fR\*(Aq\fIARGUMENTS\fR\*(Aq -.RS 4 -The -\fIARGUMENTS\fR -to pass to the module\&. -.RE -.PP -\fB\-k\fR, \fB\-\-ask\-pass\fR -.RS 4 -Prompt for the SSH password instead of assuming key\-based authentication with ssh\-agent\&. -.RE -.PP -\fB--ask-su-pass\fR -.RS 4 -Prompt for the su password instead of assuming key\-based authentication with ssh\-agent\&. -.RE -.PP -\fB\-K\fR, \fB\-\-ask\-sudo\-pass\fR -.RS 4 -Prompt for the password to use with \-\-sudo, if any\&. -.RE -.PP -\fB\-o\fR, \fB\-\-one\-line\fR -.RS 4 -Try to output everything on one line\&. -.RE -.PP -\fB\-s\fR, \fB\-\-sudo\fR -.RS 4 -Run the command as the user given by \-u and sudo to root. -.RE -.PP -\fB\-S\fR, \fB\-\-su\fR -.RS 4 -Run operations with su\&. -.RE -.PP -\fB\-t\fR \fIDIRECTORY\fR, \fB\-\-tree=\fR\fIDIRECTORY\fR -.RS 4 -Save contents in this output -\fIDIRECTORY\fR, with the results saved in a file named after each host\&. -.RE -.PP -\fB\-T\fR \fISECONDS\fR, \fB\-\-timeout=\fR\fISECONDS\fR -.RS 4 -Connection timeout to use when trying to talk to hosts, in -\fISECONDS\fR\&. -.RE -.PP -\fB\-B\fR \fINUM\fR, \fB\-\-background=\fR\fINUM\fR -.RS 4 -Run commands in the background, killing the task after -\fINUM\fR -seconds\&. -.RE -.PP -\fB\-P\fR \fINUM\fR, \fB\-\-poll=\fR\fINUM\fR -.RS 4 -Poll a background job every -\fINUM\fR -seconds\&. Requires -\fB\-B\fR\&. -.RE -.PP -\fB\-u\fR \fIUSERNAME\fR, \fB\-\-user=\fR\fIUSERNAME\fR -.RS 4 -Use this remote -\fIUSERNAME\fR -instead of the current user\&. -.RE -.PP -\fB\-U\fR \fISUDO_USERNAME\fR, \fB\-\-sudo\-user=\fR\fISUDO_USERNAME\fR -.RS 4 -Sudo to -\fISUDO_USERNAME\fR -instead of root\&. Implies \-\-sudo\&. -.RE -.PP -\fB\-c\fR \fICONNECTION\fR, \fB\-\-connection=\fR\fICONNECTION\fR -.RS 4 -Connection type to use\&. Possible options are -\fIparamiko\fR -(SSH), -\fIssh\fR, and -\fIlocal\fR\&. -\fIlocal\fR -is mostly useful for crontab or kickstarts\&. -.RE -.PP -\fB\-l\fR \fISUBSET\fR, \fB\-\-limit=\fR\fISUBSET\fR -.RS 4 -Further limits the selected host/group patterns\&. -.RE -.PP -\fB\-l\fR \fI~REGEX\fR, \fB\-\-limit=\fR\fI~REGEX\fR -.RS 4 -Further limits hosts with a regex pattern\&. -.RE -.PP -\fB\-\-list\-hosts\fR -.RS 4 -Outputs a list of matching hosts; does not execute anything else\&. -.RE -.SH "INVENTORY" -.sp -Ansible stores the hosts it can potentially operate on in an inventory file\&. The syntax is one host per line\&. Groups headers are allowed and are included on their own line, enclosed in square brackets that start the line\&. -.sp -Ranges of hosts are also supported\&. For more information and additional options, see the documentation on http://docs\&.ansible\&.com/\&. -.SH "FILES" -.sp -/etc/ansible/hosts \(em Default inventory file -.sp -/usr/share/ansible/ \(em Default module library -.sp -/etc/ansible/ansible\&.cfg \(em Config file, used if present -.sp -~/\&.ansible\&.cfg \(em User config file, overrides the default config if present -.SH "ENVIRONMENT" -.sp -The following environment variables may be specified\&. -.sp -ANSIBLE_HOSTS \(em Override the default ansible hosts file -.sp -ANSIBLE_LIBRARY \(em Override the default ansible module library path -.sp -ANSIBLE_CONFIG \(em Override the default ansible config file -.SH "AUTHOR" -.sp -Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&. -.SH "COPYRIGHT" -.sp -Copyright \(co 2012, Michael DeHaan -.sp -Ansible is released under the terms of the GPLv3 License\&. -.SH "SEE ALSO" -.sp -\fBansible\-playbook\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1) -.sp -Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible diff --git a/docs/man/man1/ansible.1.asciidoc.in b/docs/man/man1/ansible.1.asciidoc.in index 5ac1e49404335c..4cabe6c1dce17d 100644 --- a/docs/man/man1/ansible.1.asciidoc.in +++ b/docs/man/man1/ansible.1.asciidoc.in @@ -7,19 +7,19 @@ ansible(1) NAME ---- -ansible - run a command somewhere else +ansible - run a task on a target host(s) SYNOPSIS -------- -ansible [-f forks] [-m module_name] [-a args] +ansible [-m module_name] [-a args] [options] DESCRIPTION ----------- -*Ansible* is an extra-simple tool/framework/API for doing \'remote things' over -SSH. +*Ansible* is an extra-simple tool/framework/API for doing \'remote things'. +This is the adhoc command that allows for a \'single task playbook' run. ARGUMENTS @@ -27,62 +27,144 @@ ARGUMENTS *host-pattern*:: -A name of a group in the inventory file, a shell-like glob selecting -hosts in inventory file, or any combination of the two separated by -semicolons. +A name of a group in the inventory, a shell-like glob selecting +hosts in inventory or any combination of the two separated by commas. OPTIONS ------- -*-v*, *--verbose*:: +*-a* \'_ARGUMENTS_', *--args=*\'_ARGUMENTS_':: -Verbose mode, more output from successful actions will be shown. Give -up to three times for more output. +The 'ARGUMENTS' to pass to the module. -*-i* 'PATH', *--inventory=*'PATH':: +*--ask-become-pass*:: + +Ask for privilege escalation password. + +*-k*, *--ask-pass*:: + +Prompt for the connection password, if it is needed for the transport used. +For example, using ssh and not having a key-based authentication with ssh-agent. + +*--ask-su-pass*:: + +Prompt for su password, used with --su (deprecated, use become). + +*-K*, *--ask-sudo-pass*:: + +Prompt for the password to use with --sudo, if any (deprecated, use become). + +*--ask-vault-pass*:: -The 'PATH' to the inventory hosts file, which defaults to '/etc/ansible/hosts'. +Prompt for vault password. +*-B* 'NUM', *--background=*'NUM':: + +Run commands in the background, killing the task after 'NUM' seconds. + +*--become-method=*'BECOME_METHOD':: + +Privilege escalation method to use (default=sudo), +valid choices: [ sudo | su | pbrun | pfexec | runas | doas ] + +*--become-user=*'BECOME_USER':: + +Run operations as this user (default=root). + +*-C*, *--check*:: + +Do not make any changes on the remote system, but test resources to see what might +have changed. Note this can not scan all possible resource types and is only +a simulation. + +*-c* 'CONNECTION', *--connection=*'CONNECTION':: + +Connection type to use. Most common options are 'paramiko' (SSH), 'ssh', 'winrm' +and 'local'. 'local' is mostly useful for crontab or kickstarts. + +*-e* 'EXTRA_VARS, *--extra-vars=*'EXTRA_VARS':: + +Extra variables to inject into a playbook, in key=value key=value format or +as quoted YAML/JSON (hashes and arrays). To load variables from a file, specify +the file preceded by @ (e.g. @vars.yml). *-f* 'NUM', *--forks=*'NUM':: Level of parallelism. 'NUM' is specified as an integer, the default is 5. -*--private-key=*'PRIVATE_KEY_FILE':: +*-h*, *--help*:: -Use this file to authenticate the connection. +Show help message and exit. + +*-i* 'PATH', *--inventory=*'PATH':: + +The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'. +Alternatively you can use a comma separated list of hosts or single host with traling comma 'host,'. + +*-l* 'SUBSET', *--limit=*'SUBSET':: + +Further limits the selected host/group patterns. +You can prefix it with '~' to indicate that the patter in a regex. + +*--list-hosts*:: +Outputs a list of matching hosts; does not execute anything else. *-m* 'NAME', *--module-name=*'NAME':: Execute the module called 'NAME'. - *-M* 'DIRECTORY', *--module-path=*'DIRECTORY':: The 'DIRECTORY' search path to load modules from. The default is '/usr/share/ansible'. This can also be set with the ANSIBLE_LIBRARY environment variable. -*-a* \'_ARGUMENTS_', *--args=*\'_ARGUMENTS_':: +*-o*, *--one-line*:: -The 'ARGUMENTS' to pass to the module. +Try to output everything on one line. -*-k*, *--ask-pass*:: +*-P* 'NUM', *--poll=*'NUM':: -Prompt for the SSH password instead of assuming key-based authentication with ssh-agent. +Poll a background job every 'NUM' seconds. Requires *-B*. -*-K*, *--ask-sudo-pass*:: +*--private-key=*'PRIVATE_KEY_FILE':: -Prompt for the password to use with --sudo, if any +Use this file to authenticate the connection. -*-o*, *--one-line*:: +*-S*, *--su*:: -Try to output everything on one line. +Run operations with su (deprecated, use become). + +*-R* 'SU_USER', *--se-user=*'SUDO_USER':: + +Run operations with su as this user (default=root) (deprecated, use become). *-s*, *--sudo*:: -Run the command as the user given by -u and sudo to root. +Run the command as the user given by -u and sudo to root (deprecated, use become). + +*--ssh-common-args=*''-o ProxyCommand="ssh -W %h:%p ..." ...'':: + +Add the specified arguments to any sftp/scp/ssh command-line. Useful to +set a ProxyCommand to use a jump host, but any arguments that are +accepted by all three programs may be specified. + +*--sftp-extra-args=*''-f ...'':: + +Add the specified arguments to any sftp command-line. + +*--scp-extra-args=*''-l ...'':: + +Add the specified arguments to any scp command-line. + +*--ssh-extra-args=*''-R ...'':: + +Add the specified arguments to any ssh command-line. + +*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: + +Sudo to 'SUDO_USERNAME' default is root. (deprecated, use become). *-t* 'DIRECTORY', *--tree=*'DIRECTORY':: @@ -93,49 +175,50 @@ file named after each host. Connection timeout to use when trying to talk to hosts, in 'SECONDS'. -*-B* 'NUM', *--background=*'NUM':: +*-u* 'USERNAME', *--user=*'USERNAME':: -Run commands in the background, killing the task after 'NUM' seconds. +Use this 'USERNAME' to login to the target host, instead of the current user. -*-P* 'NUM', *--poll=*'NUM':: +*--vault-password-file=*'VAULT_PASSWORD_FILE':: -Poll a background job every 'NUM' seconds. Requires *-B*. +A file containing the vault password to be used during the decryption of vault encrypted files. +Be sure to keep this file secured if it is used. If the file is executable, +it will be run and its standard output will be used as the password. -*-u* 'USERNAME', *--user=*'USERNAME':: +*-v*, *--verbose*:: -Use this remote 'USERNAME' instead of the current user. +Verbose mode, more output from successful actions will be shown. +Give up to three times for more output. -*-U* 'SUDO_USERNAME', *--sudo-user=*'SUDO_USERNAME':: +*--version*:: -Sudo to 'SUDO_USERNAME' instead of root. Implies --sudo. +Show program version number and exit. -*-c* 'CONNECTION', *--connection=*'CONNECTION':: +INVENTORY +--------- -Connection type to use. Possible options are 'paramiko' (SSH), 'ssh', -and 'local'. 'local' is mostly useful for crontab or kickstarts. +Ansible stores the hosts it can potentially operate on in an inventory. +This can be an ini-like file, a script, directory or a list. +The ini syntax is one host per line. Groups headers are allowed and +are included on their own line, enclosed in square brackets that start the line. -*-l* 'SUBSET', *--limit=*'SUBSET':: +Ranges of hosts are also supported. For more information and +additional options, see the documentation on http://docs.ansible.com/. -Further limits the selected host/group patterns. -*-l* '\~REGEX', *--limit=*'~REGEX':: +ENVIRONMENT +----------- -Further limits hosts with a regex pattern. +The following environment variables may be specified. -*--list-hosts*:: +ANSIBLE_INVENTORY -- Override the default ansible inventory file -Outputs a list of matching hosts; does not execute anything else. +ANSIBLE_LIBRARY -- Override the default ansible module library path -INVENTORY ---------- +ANSIBLE_CONFIG -- Override the default ansible config file -Ansible stores the hosts it can potentially operate on in an inventory -file. The syntax is one host per line. Groups headers are allowed and -are included on their own line, enclosed in square brackets that -start the line. +Many more are available for most options in ansible.cfg -Ranges of hosts are also supported. For more information and -additional options, see the documentation on http://docs.ansible.com/. FILES ----- @@ -148,37 +231,25 @@ FILES ~/.ansible.cfg -- User config file, overrides the default config if present -ENVIRONMENT ------------ - -The following environment variables may be specified. - -ANSIBLE_HOSTS -- Override the default ansible hosts file - -ANSIBLE_LIBRARY -- Override the default ansible module library path - -ANSIBLE_CONFIG -- Override the default ansible config file - AUTHOR ------ -Ansible was originally written by Michael DeHaan. See the AUTHORS file -for a complete list of contributors. +Ansible was originally written by Michael DeHaan. +See the AUTHORS file for a complete list of contributors. COPYRIGHT --------- Copyright © 2012, Michael DeHaan - Ansible is released under the terms of the GPLv3 License. SEE ALSO -------- -*ansible-playbook*(1), *ansible-pull*(1), *ansible-doc*(1) +*ansible-playbook*(1), *ansible-pull*(1), *ansible-doc*(1), *ansible-vault*(1), *ansible-galaxy*(1) Extensive documentation is available in the documentation site: . IRC and mailing list info can be found diff --git a/docsite/Makefile b/docsite/Makefile index 92129f78514672..2b87827c5979a7 100644 --- a/docsite/Makefile +++ b/docsite/Makefile @@ -20,6 +20,8 @@ viewdocs: clean staticmin htmldocs: staticmin ./build-site.py rst +webdocs: htmldocs + clean: -rm -rf htmlout -rm -f .buildinfo @@ -43,4 +45,4 @@ modules: $(FORMATTER) ../hacking/templates/rst.j2 PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/ staticmin: - cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css + cat _themes/srtd/static/css/theme.css | sed -e 's/^[ ]*//g; s/[ ]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css diff --git a/docsite/README.md b/docsite/README.md index 5ff774895cd19b..21985a8f6aa0c5 100644 --- a/docsite/README.md +++ b/docsite/README.md @@ -4,7 +4,7 @@ Homepage and documentation source for Ansible This project hosts the source behind [docs.ansible.com](http://docs.ansible.com/) Contributions to the documentation are welcome. To make changes, submit a pull request -that changes the reStructuredText files in the "rst/" directory only, and Michael can +that changes the reStructuredText files in the "rst/" directory only, and the core team can do a docs build and push the static files. If you wish to verify output from the markup diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html index b6422f9a2dd331..dc1d70a4d1fb1e 100644 --- a/docsite/_themes/srtd/footer.html +++ b/docsite/_themes/srtd/footer.html @@ -12,14 +12,23 @@
+ +

- © Copyright 2015 Ansible, Inc.. + © Copyright 2016 Ansible, Inc.. {%- if last_updated %} {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %} {%- endif %}

-Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs. {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %} +Ansible docs are generated from GitHub sources using Sphinx using a theme provided by Read the Docs. {% if pagename.endswith("_module") %}. Module documentation is not edited directly, but is generated from the source code for the modules. To submit an update to module docs, edit the 'DOCUMENTATION' metadata in the core and extras modules source repositories. {% endif %} diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html index d073c4c22f8996..cb532191e6e5aa 100644 --- a/docsite/_themes/srtd/layout.html +++ b/docsite/_themes/srtd/layout.html @@ -113,6 +113,9 @@ } + + @@ -122,18 +125,29 @@ + + +
+ Documentation +
+
+
{# SIDE NAV, TOGGLES ON MOBILE #}