initial import and updated license file.
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..75a0526
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,112 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# IPython Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# dotenv
+.env
+
+# virtualenv
+venv/
+ENV/
+
+# Spyder project settings
+.spyderproject
+
+# Rope project settings
+.ropeproject
+
+.vagrant
+*.iml
+archives
+.DS_Store
+ssh_keys
+ansible.log
+*.zip
+license.txt
+*.log
+*.tar.gz
+*.rpm
+*.tar
+ansible_fact_cache_dir
+*jar
+.ansible_cache*
+configs_and_logs
+redhat-rhui.repo
+rhui-load-balancers.conf
+out
+.idea
+*iml
+*retry
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..36bbf62
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,29 @@
+---
+language: python
+python: "2.7"
+
+# Use the new container infrastructure
+sudo: false
+
+# Install ansible
+addons:
+  apt:
+    packages:
+    - python-pip
+
+install:
+  # Install ansible
+  - pip install ansible
+
+  # Check ansible version
+  - ansible --version
+
+  # Create ansible.cfg with correct roles_path
+  - printf '[defaults]\nroles_path=../' >ansible.cfg
+
+script:
+  # Basic role syntax check
+  - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
+
+notifications:
+  webhooks: https://galaxy.ansible.com/api/v1/notifications/
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..cf044ce
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,224 @@
+ ====
+
+    Copyright 2016 Apigee Corporation
+
+    Permission is hereby granted, free of charge, to any person obtaining a copy
+    of this software and associated documentation files (the "Software"), to
+    deal in the Software without restriction, including without limitation the
+    rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+    sell copies of the Software, and to permit persons to whom the Software is
+    furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+    IN THE SOFTWARE.
+
+====
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 Apigee Corp
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..4a8a306
--- /dev/null
+++ b/README.md
@@ -0,0 +1,51 @@
+Role Name
+=========
+
+This role performs port connectivity validations. The approach I take is very quick and dirty. This role will startup an instance of SimpleHTTPServer on the indicated port asynchronously. The server will remain active for 1 second. 
+The Ansible wait_for module is used to determine that the port is indeed available on the server. I think that a future version should include the findings in the ansible cache for later use.  
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+check_port: Port that should be checked. This variable is required.  No defaults are provided.
+
+Dependencies
+------------
+
+This role has a dependency on the opdk-setup-default-settings role. 
+
+Example Playbook
+----------------
+
+This is an example of how this role can be called. This example assumes you are working with the management server.
+
+    - hosts: ms
+      roles:
+       - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_jmx_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_thrift_client_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_cql_native_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_non_ssl_gossip_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ zk_data_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ zk_leader_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ zk_voter_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ ms_jmx_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ ms_ext_mgmt_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ ui_http_port }}' }
+         - { role: external-port-connectivity-validator-server, check_port: '{{ ldap_data_port }}' }
+                
+
+License
+-------
+
+MIT
+
+Author Information
+------------------
+
+The author of this role is Carlos Frias <cfrias@apigee.com>.
+
diff --git a/defaults/main.yml b/defaults/main.yml
new file mode 100644
index 0000000..b41d498
--- /dev/null
+++ b/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for external-port-connectivity-validator-server
diff --git a/files/validation_server.py b/files/validation_server.py
new file mode 100644
index 0000000..e3fa51b
--- /dev/null
+++ b/files/validation_server.py
@@ -0,0 +1,52 @@
+#! /usr/bin/env python
+
+# Needs to run the server on the port provided
+# Need a get request on the /ping url that response with pong
+# Need a get request on the /exit url that kills the python process
+
+import os
+import sys
+try:
+    from ansible.module_utils.basic import *
+except:
+    pass
+
+from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+
+class CheckRequestHandler(BaseHTTPRequestHandler):
+
+    def do_GET(self):
+        if self.path.endswith('/check'):
+            self.send_response(200)
+        if self.path.endswith('/done'):
+            print("Test server has been shutdown.")
+            os._exit(0)
+
+
+def run(ip, port):
+    server_address = (ip, port)
+    httpd = HTTPServer(server_address, CheckRequestHandler)
+    print("Test server ready; use /check to test connection; use /done to shutdown.")
+    httpd.serve_forever()
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec = dict(
+            port = dict(required = True, type='str'),
+            ip = dict(required = True, type='str')
+        )
+    )
+
+    run(module.param['ip'], int(module.params['port']))
+
+    module.exit_json(changed =  True)
+
+
+if __name__ == '__main__':
+    if sys.argv[1] != None and sys.argv[2] != None:
+        port = int(sys.argv[2])
+        ip = sys.argv[1]
+        run(ip, port)
+    else:
+        main()
diff --git a/handlers/main.yml b/handlers/main.yml
new file mode 100644
index 0000000..1f2c86b
--- /dev/null
+++ b/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for external-port-connectivity-validator-server
diff --git a/meta/main.yml b/meta/main.yml
new file mode 100644
index 0000000..180b8a2
--- /dev/null
+++ b/meta/main.yml
@@ -0,0 +1,192 @@
+galaxy_info:
+  author: your name
+  description: your description
+  company: your company (optional)
+
+  # If the issue tracker for your role is not on github, uncomment the
+  # next line and provide a value
+  # issue_tracker_url: http://example.com/issue/tracker
+
+  # Some suggested licenses:
+  # - BSD (default)
+  # - MIT
+  # - GPLv2
+  # - GPLv3
+  # - Apache
+  # - CC-BY
+  license: license (GPLv2, CC-BY, etc)
+
+  min_ansible_version: 1.2
+
+  # Optionally specify the branch Galaxy will use when accessing the GitHub
+  # repo for this role. During role install, if no tags are available,
+  # Galaxy will use this branch. During import Galaxy will access files on
+  # this branch. If travis integration is cofigured, only notification for this
+  # branch will be accepted. Otherwise, in all cases, the repo's default branch
+  # (usually master) will be used.
+  #github_branch:
+
+  #
+  # Below are all platforms currently available. Just uncomment
+  # the ones that apply to your role. If you don't see your
+  # platform on this list, let us know and we'll get it added!
+  #
+  #platforms:
+  #- name: EL
+  #  versions:
+  #  - all
+  #  - 5
+  #  - 6
+  #  - 7
+  #- name: GenericUNIX
+  #  versions:
+  #  - all
+  #  - any
+  #- name: OpenBSD
+  #  versions:
+  #  - all
+  #  - 5.6
+  #  - 5.7
+  #  - 5.8
+  #  - 5.9
+  #  - 6.0
+  #- name: Fedora
+  #  versions:
+  #  - all
+  #  - 16
+  #  - 17
+  #  - 18
+  #  - 19
+  #  - 20
+  #  - 21
+  #  - 22
+  #  - 23
+  #- name: opensuse
+  #  versions:
+  #  - all
+  #  - 12.1
+  #  - 12.2
+  #  - 12.3
+  #  - 13.1
+  #  - 13.2
+  #- name: MacOSX
+  #  versions:
+  #  - all
+  #  - 10.10
+  #  - 10.11
+  #  - 10.12
+  #  - 10.7
+  #  - 10.8
+  #  - 10.9
+  #- name: IOS
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Solaris
+  #  versions:
+  #  - all
+  #  - 10
+  #  - 11.0
+  #  - 11.1
+  #  - 11.2
+  #  - 11.3
+  #- name: SmartOS
+  #  versions:
+  #  - all
+  #  - any
+  #- name: eos
+  #  versions:
+  #  - all
+  #  - Any
+  #- name: Windows
+  #  versions:
+  #  - all
+  #  - 2012R2
+  #- name: Amazon
+  #  versions:
+  #  - all
+  #  - 2013.03
+  #  - 2013.09
+  #- name: GenericBSD
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Junos
+  #  versions:
+  #  - all
+  #  - any
+  #- name: FreeBSD
+  #  versions:
+  #  - all
+  #  - 10.0
+  #  - 10.1
+  #  - 10.2
+  #  - 10.3
+  #  - 8.0
+  #  - 8.1
+  #  - 8.2
+  #  - 8.3
+  #  - 8.4
+  #  - 9.0
+  #  - 9.1
+  #  - 9.1
+  #  - 9.2
+  #  - 9.3
+  #- name: Ubuntu
+  #  versions:
+  #  - all
+  #  - lucid
+  #  - maverick
+  #  - natty
+  #  - oneiric
+  #  - precise
+  #  - quantal
+  #  - raring
+  #  - saucy
+  #  - trusty
+  #  - utopic
+  #  - vivid
+  #  - wily
+  #  - xenial
+  #- name: SLES
+  #  versions:
+  #  - all
+  #  - 10SP3
+  #  - 10SP4
+  #  - 11
+  #  - 11SP1
+  #  - 11SP2
+  #  - 11SP3
+  #  - 11SP4
+  #  - 12
+  #  - 12SP1
+  #- name: GenericLinux
+  #  versions:
+  #  - all
+  #  - any
+  #- name: NXOS
+  #  versions:
+  #  - all
+  #  - any
+  #- name: Debian
+  #  versions:
+  #  - all
+  #  - etch
+  #  - jessie
+  #  - lenny
+  #  - sid
+  #  - squeeze
+  #  - stretch
+  #  - wheezy
+
+  galaxy_tags: []
+    # List tags for your role here, one per line. A tag is
+    # a keyword that describes and categorizes the role.
+    # Users find roles by searching for tags. Be sure to
+    # remove the '[]' above if you add tags to this list.
+    #
+    # NOTE: A tag is limited to a single word comprised of
+    # alphanumeric characters. Maximum 20 tags per role.
+
+dependencies:
+- { role: opdk-setup-default-settings }
diff --git a/tasks/main.yml b/tasks/main.yml
new file mode 100644
index 0000000..da0c130
--- /dev/null
+++ b/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+# tasks file for external-port-connectivity-validator-server
+
+- name: Create script staging folder
+  file:
+    path: /tmp/simpleserver
+    state: directory
+
+- name: Copy from remote to client server script
+  copy:
+    src: validation_server.py
+    dest: /tmp/simpleserver/validation_server.py
+    owner: '{{ opdk_user_name }}'
+    group: '{{ opdk_group_name }}'
+    mode: 0755
+
+- name: Running validation server at http://{{ local_address }}:{{ check_port }}
+  shell: /tmp/simpleserver/validation_server.py "" {{ check_port }}
+  async: 1000
+  poll: 0
diff --git a/tests/ansible.cfg b/tests/ansible.cfg
new file mode 100644
index 0000000..08b86a4
--- /dev/null
+++ b/tests/ansible.cfg
@@ -0,0 +1,25 @@
+[defaults]
+host_key_checking = false
+hostfile = inventory
+forks = 50
+# remote_user = mpurwar
+remote_user = ec2-user
+private_key_file = ~/.ssh/id_rsa
+roles_path = ../../
+log_path = ./ansible.log
+retry_files_enabled = False
+executable = /bin/bash
+gathering = smart
+fact_caching = jsonfile
+fact_caching_connection = ~/.ansible/tmp/cache
+
+# library = /etc/ansible/library/
+
+# 2 hour timeout
+fact_caching_timeout = 7200
+timeout = 60
+
+[ssh_connection]
+ssh_args = -o ControlMaster=auto -o ControlPersist=7200s -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no
+control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r
+
diff --git a/tests/facts.yml b/tests/facts.yml
new file mode 100644
index 0000000..d16adbb
--- /dev/null
+++ b/tests/facts.yml
@@ -0,0 +1,8 @@
+---
+- hosts: planet
+  strategy: free
+  tasks:
+  - setup:
+  - ec2_facts:
+  tags:
+  - facts
diff --git a/tests/inventory/ec2.py b/tests/inventory/ec2.py
new file mode 100755
index 0000000..f30958a
--- /dev/null
+++ b/tests/inventory/ec2.py
@@ -0,0 +1,1385 @@
+#!/usr/bin/env python
+
+'''
+EC2 external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API request to
+AWS EC2 using the Boto library.
+
+NOTE: This script assumes Ansible is being executed where the environment
+variables needed for Boto have already been set:
+    export AWS_ACCESS_KEY_ID='AK123'
+    export AWS_SECRET_ACCESS_KEY='abc123'
+
+This script also assumes there is an ec2.ini file alongside it.  To specify a
+different path to ec2.ini, define the EC2_INI_PATH environment variable:
+
+    export EC2_INI_PATH=/path/to/my_ec2.ini
+
+If you're using eucalyptus you need to set the above variables and
+you need to define:
+
+    export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
+
+If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
+using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
+the AWS_PROFILE variable:
+
+    AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
+
+For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
+
+When run against a specific host, this script returns the following variables:
+ - ec2_ami_launch_index
+ - ec2_architecture
+ - ec2_association
+ - ec2_attachTime
+ - ec2_attachment
+ - ec2_attachmentId
+ - ec2_client_token
+ - ec2_deleteOnTermination
+ - ec2_description
+ - ec2_deviceIndex
+ - ec2_dns_name
+ - ec2_eventsSet
+ - ec2_group_name
+ - ec2_hypervisor
+ - ec2_id
+ - ec2_image_id
+ - ec2_instanceState
+ - ec2_instance_type
+ - ec2_ipOwnerId
+ - ec2_ip_address
+ - ec2_item
+ - ec2_kernel
+ - ec2_key_name
+ - ec2_launch_time
+ - ec2_monitored
+ - ec2_monitoring
+ - ec2_networkInterfaceId
+ - ec2_ownerId
+ - ec2_persistent
+ - ec2_placement
+ - ec2_platform
+ - ec2_previous_state
+ - ec2_private_dns_name
+ - ec2_private_ip_address
+ - ec2_publicIp
+ - ec2_public_dns_name
+ - ec2_ramdisk
+ - ec2_reason
+ - ec2_region
+ - ec2_requester_id
+ - ec2_root_device_name
+ - ec2_root_device_type
+ - ec2_security_group_ids
+ - ec2_security_group_names
+ - ec2_shutdown_state
+ - ec2_sourceDestCheck
+ - ec2_spot_instance_request_id
+ - ec2_state
+ - ec2_state_code
+ - ec2_state_reason
+ - ec2_status
+ - ec2_subnet_id
+ - ec2_tenancy
+ - ec2_virtualization_type
+ - ec2_vpc_id
+
+These variables are pulled out of a boto.ec2.instance object. There is a lack of
+consistency with variable spellings (camelCase and underscores) since this
+just loops through all variables the object exposes. It is preferred to use the
+ones with underscores when multiple exist.
+
+In addition, if an instance has AWS Tags associated with it, each tag is a new
+variable named:
+ - ec2_tag_[Key] = [Value]
+
+Security groups are comma-separated in 'ec2_security_group_ids' and
+'ec2_security_group_names'.
+'''
+
+# (c) 2012, Peter Sankauskas
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import sys
+import os
+import argparse
+import re
+from time import time
+import boto
+from boto import ec2
+from boto import rds
+from boto import elasticache
+from boto import route53
+import six
+
+from six.moves import configparser
+from collections import defaultdict
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+
+
+class Ec2Inventory(object):
+
+    def _empty_inventory(self):
+        return {"_meta" : {"hostvars" : {}}}
+
+    def __init__(self):
+        ''' Main execution path '''
+
+        # Inventory grouped by instance IDs, tags, security groups, regions,
+        # and availability zones
+        self.inventory = self._empty_inventory()
+
+        # Index of hostname (address) to instance ID
+        self.index = {}
+
+        # Boto profile to use (if any)
+        self.boto_profile = None
+
+        # Read settings and parse CLI arguments
+        self.parse_cli_args()
+        self.read_settings()
+
+        # Make sure that profile_name is not passed at all if not set
+        # as pre 2.24 boto will fall over otherwise
+        if self.boto_profile:
+            if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
+                self.fail_with_error("boto version must be >= 2.24 to use profile")
+
+        # Cache
+        if self.args.refresh_cache:
+            self.do_api_calls_update_cache()
+        elif not self.is_cache_valid():
+            self.do_api_calls_update_cache()
+
+        # Data to print
+        if self.args.host:
+            data_to_print = self.get_host_info()
+
+        elif self.args.list:
+            # Display list of instances for inventory
+            if self.inventory == self._empty_inventory():
+                data_to_print = self.get_inventory_from_cache()
+            else:
+                data_to_print = self.json_format_dict(self.inventory, True)
+
+        print(data_to_print)
+
+
+    def is_cache_valid(self):
+        ''' Determines if the cache files have expired, or if it is still valid '''
+
+        if os.path.isfile(self.cache_path_cache):
+            mod_time = os.path.getmtime(self.cache_path_cache)
+            current_time = time()
+            if (mod_time + self.cache_max_age) > current_time:
+                if os.path.isfile(self.cache_path_index):
+                    return True
+
+        return False
+
+
+    def read_settings(self):
+        ''' Reads the settings from the ec2.ini file '''
+        if six.PY3:
+            config = configparser.ConfigParser()
+        else:
+            config = configparser.SafeConfigParser()
+        ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
+        ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
+        config.read(ec2_ini_path)
+
+        # is eucalyptus?
+        self.eucalyptus_host = None
+        self.eucalyptus = False
+        if config.has_option('ec2', 'eucalyptus'):
+            self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
+        if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
+            self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
+
+        # Regions
+        self.regions = []
+        configRegions = config.get('ec2', 'regions')
+        configRegions_exclude = config.get('ec2', 'regions_exclude')
+        if (configRegions == 'all'):
+            if self.eucalyptus_host:
+                self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
+            else:
+                for regionInfo in ec2.regions():
+                    if regionInfo.name not in configRegions_exclude:
+                        self.regions.append(regionInfo.name)
+        else:
+            self.regions = configRegions.split(",")
+
+        # Destination addresses
+        self.destination_variable = config.get('ec2', 'destination_variable')
+        self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
+
+        if config.has_option('ec2', 'hostname_variable'):
+            self.hostname_variable = config.get('ec2', 'hostname_variable')
+        else:
+            self.hostname_variable = None
+
+        if config.has_option('ec2', 'destination_format') and \
+                config.has_option('ec2', 'destination_format_tags'):
+            self.destination_format = config.get('ec2', 'destination_format')
+            self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
+        else:
+            self.destination_format = None
+            self.destination_format_tags = None
+
+        # Route53
+        self.route53_enabled = config.getboolean('ec2', 'route53')
+        self.route53_excluded_zones = []
+        if config.has_option('ec2', 'route53_excluded_zones'):
+            self.route53_excluded_zones.extend(
+                    config.get('ec2', 'route53_excluded_zones', '').split(','))
+
+        # Include RDS instances?
+        self.rds_enabled = True
+        if config.has_option('ec2', 'rds'):
+            self.rds_enabled = config.getboolean('ec2', 'rds')
+
+        # Include ElastiCache instances?
+        self.elasticache_enabled = True
+        if config.has_option('ec2', 'elasticache'):
+            self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
+
+        # Return all EC2 instances?
+        if config.has_option('ec2', 'all_instances'):
+            self.all_instances = config.getboolean('ec2', 'all_instances')
+        else:
+            self.all_instances = False
+
+        # Instance states to be gathered in inventory. Default is 'running'.
+        # Setting 'all_instances' to 'yes' overrides this option.
+        ec2_valid_instance_states = [
+            'pending',
+            'running',
+            'shutting-down',
+            'terminated',
+            'stopping',
+            'stopped'
+        ]
+        self.ec2_instance_states = []
+        if self.all_instances:
+            self.ec2_instance_states = ec2_valid_instance_states
+        elif config.has_option('ec2', 'instance_states'):
+            for instance_state in config.get('ec2', 'instance_states').split(','):
+                instance_state = instance_state.strip()
+                if instance_state not in ec2_valid_instance_states:
+                    continue
+                self.ec2_instance_states.append(instance_state)
+        else:
+            self.ec2_instance_states = ['running']
+
+        # Return all RDS instances? (if RDS is enabled)
+        if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
+            self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
+        else:
+            self.all_rds_instances = False
+
+        # Return all ElastiCache replication groups? (if ElastiCache is enabled)
+        if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
+            self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
+        else:
+            self.all_elasticache_replication_groups = False
+
+        # Return all ElastiCache clusters? (if ElastiCache is enabled)
+        if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
+            self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
+        else:
+            self.all_elasticache_clusters = False
+
+        # Return all ElastiCache nodes? (if ElastiCache is enabled)
+        if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
+            self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
+        else:
+            self.all_elasticache_nodes = False
+
+        # boto configuration profile (prefer CLI argument)
+        self.boto_profile = self.args.boto_profile
+        if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
+            self.boto_profile = config.get('ec2', 'boto_profile')
+
+        # Cache related
+        cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
+        if self.boto_profile:
+            cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
+        if not os.path.exists(cache_dir):
+            os.makedirs(cache_dir)
+
+        cache_name = 'ansible-ec2'
+        aws_profile = lambda: (self.boto_profile or
+                               os.environ.get('AWS_PROFILE') or
+                               os.environ.get('AWS_ACCESS_KEY_ID'))
+        if aws_profile():
+            cache_name = '%s-%s' % (cache_name, aws_profile())
+        self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
+        self.cache_path_index = cache_dir + "/%s.index" % cache_name
+        self.cache_max_age = config.getint('ec2', 'cache_max_age')
+
+        if config.has_option('ec2', 'expand_csv_tags'):
+            self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
+        else:
+            self.expand_csv_tags = False
+
+        # Configure nested groups instead of flat namespace.
+        if config.has_option('ec2', 'nested_groups'):
+            self.nested_groups = config.getboolean('ec2', 'nested_groups')
+        else:
+            self.nested_groups = False
+
+        # Replace dash or not in group names
+        if config.has_option('ec2', 'replace_dash_in_groups'):
+            self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
+        else:
+            self.replace_dash_in_groups = True
+
+        # Configure which groups should be created.
+        group_by_options = [
+            'group_by_instance_id',
+            'group_by_region',
+            'group_by_availability_zone',
+            'group_by_ami_id',
+            'group_by_instance_type',
+            'group_by_key_pair',
+            'group_by_vpc_id',
+            'group_by_security_group',
+            'group_by_tag_keys',
+            'group_by_tag_none',
+            'group_by_route53_names',
+            'group_by_rds_engine',
+            'group_by_rds_parameter_group',
+            'group_by_elasticache_engine',
+            'group_by_elasticache_cluster',
+            'group_by_elasticache_parameter_group',
+            'group_by_elasticache_replication_group',
+        ]
+        for option in group_by_options:
+            if config.has_option('ec2', option):
+                setattr(self, option, config.getboolean('ec2', option))
+            else:
+                setattr(self, option, True)
+
+        # Do we need to just include hosts that match a pattern?
+        try:
+            pattern_include = config.get('ec2', 'pattern_include')
+            if pattern_include and len(pattern_include) > 0:
+                self.pattern_include = re.compile(pattern_include)
+            else:
+                self.pattern_include = None
+        except configparser.NoOptionError:
+            self.pattern_include = None
+
+        # Do we need to exclude hosts that match a pattern?
+        try:
+            pattern_exclude = config.get('ec2', 'pattern_exclude');
+            if pattern_exclude and len(pattern_exclude) > 0:
+                self.pattern_exclude = re.compile(pattern_exclude)
+            else:
+                self.pattern_exclude = None
+        except configparser.NoOptionError:
+            self.pattern_exclude = None
+
+        # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
+        self.ec2_instance_filters = defaultdict(list)
+        if config.has_option('ec2', 'instance_filters'):
+
+            filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
+
+            for instance_filter in filters:
+                instance_filter = instance_filter.strip()
+                if not instance_filter or '=' not in instance_filter:
+                    continue
+                filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
+                if not filter_key:
+                    continue
+                self.ec2_instance_filters[filter_key].append(filter_value)
+
+    def parse_cli_args(self):
+        ''' Command line argument processing '''
+
+        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
+        parser.add_argument('--list', action='store_true', default=True,
+                            help='List instances (default: True)')
+        parser.add_argument('--host', action='store',
+                            help='Get all the variables about a specific instance')
+        parser.add_argument('--refresh-cache', action='store_true', default=False,
+                            help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
+        parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
+                            help='Use boto profile for connections to EC2')
+        self.args = parser.parse_args()
+
+
+    def do_api_calls_update_cache(self):
+        ''' Do API calls to each region, and save data in cache files '''
+
+        if self.route53_enabled:
+            self.get_route53_records()
+
+        for region in self.regions:
+            self.get_instances_by_region(region)
+            if self.rds_enabled:
+                self.get_rds_instances_by_region(region)
+            if self.elasticache_enabled:
+                self.get_elasticache_clusters_by_region(region)
+                self.get_elasticache_replication_groups_by_region(region)
+
+        self.write_to_cache(self.inventory, self.cache_path_cache)
+        self.write_to_cache(self.index, self.cache_path_index)
+
+    def connect(self, region):
+        ''' create connection to api server'''
+        if self.eucalyptus:
+            conn = boto.connect_euca(host=self.eucalyptus_host)
+            conn.APIVersion = '2010-08-31'
+        else:
+            conn = self.connect_to_aws(ec2, region)
+        return conn
+
+    def boto_fix_security_token_in_profile(self, connect_args):
+        ''' monkey patch for boto issue boto/boto#2100 '''
+        profile = 'profile ' + self.boto_profile
+        if boto.config.has_option(profile, 'aws_security_token'):
+            connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
+        return connect_args
+
+    def connect_to_aws(self, module, region):
+        connect_args = {}
+
+        # only pass the profile name if it's set (as it is not supported by older boto versions)
+        if self.boto_profile:
+            connect_args['profile_name'] = self.boto_profile
+            self.boto_fix_security_token_in_profile(connect_args)
+
+        conn = module.connect_to_region(region, **connect_args)
+        # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+        if conn is None:
+            self.fail_with_error("region name: %s likely not supported, or AWS is down.  connection to region failed." % region)
+        return conn
+
+    def get_instances_by_region(self, region):
+        ''' Makes an AWS EC2 API call to the list of instances in a particular
+        region '''
+
+        try:
+            conn = self.connect(region)
+            reservations = []
+            if self.ec2_instance_filters:
+                for filter_key, filter_values in self.ec2_instance_filters.items():
+                    reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
+            else:
+                reservations = conn.get_all_instances()
+
+            for reservation in reservations:
+                for instance in reservation.instances:
+                    self.add_instance(instance, region)
+
+        except boto.exception.BotoServerError as e:
+            if e.error_code == 'AuthFailure':
+                error = self.get_auth_error_message()
+            else:
+                backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
+                error = "Error connecting to %s backend.\n%s" % (backend, e.message)
+            self.fail_with_error(error, 'getting EC2 instances')
+
+    def get_rds_instances_by_region(self, region):
+        ''' Makes an AWS API call to the list of RDS instances in a particular
+        region '''
+
+        try:
+            conn = self.connect_to_aws(rds, region)
+            if conn:
+                marker = None
+                while True:
+                    instances = conn.get_all_dbinstances(marker=marker)
+                    marker = instances.marker
+                    for instance in instances:
+                        self.add_rds_instance(instance, region)
+                    if not marker:
+                        break
+        except boto.exception.BotoServerError as e:
+            error = e.reason
+
+            if e.error_code == 'AuthFailure':
+                error = self.get_auth_error_message()
+            if not e.reason == "Forbidden":
+                error = "Looks like AWS RDS is down:\n%s" % e.message
+            self.fail_with_error(error, 'getting RDS instances')
+
+    def get_elasticache_clusters_by_region(self, region):
+        ''' Makes an AWS API call to the list of ElastiCache clusters (with
+        nodes' info) in a particular region.'''
+
+        # ElastiCache boto module doesn't provide a get_all_intances method,
+        # that's why we need to call describe directly (it would be called by
+        # the shorthand method anyway...)
+        try:
+            conn = self.connect_to_aws(elasticache, region)
+            if conn:
+                # show_cache_node_info = True
+                # because we also want nodes' information
+                response = conn.describe_cache_clusters(None, None, None, True)
+
+        except boto.exception.BotoServerError as e:
+            error = e.reason
+
+            if e.error_code == 'AuthFailure':
+                error = self.get_auth_error_message()
+            if not e.reason == "Forbidden":
+                error = "Looks like AWS ElastiCache is down:\n%s" % e.message
+            self.fail_with_error(error, 'getting ElastiCache clusters')
+
+        try:
+            # Boto also doesn't provide wrapper classes to CacheClusters or
+            # CacheNodes. Because of that wo can't make use of the get_list
+            # method in the AWSQueryConnection. Let's do the work manually
+            clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
+
+        except KeyError as e:
+            error = "ElastiCache query to AWS failed (unexpected format)."
+            self.fail_with_error(error, 'getting ElastiCache clusters')
+
+        for cluster in clusters:
+            self.add_elasticache_cluster(cluster, region)
+
+    def get_elasticache_replication_groups_by_region(self, region):
+        ''' Makes an AWS API call to the list of ElastiCache replication groups
+        in a particular region.'''
+
+        # ElastiCache boto module doesn't provide a get_all_intances method,
+        # that's why we need to call describe directly (it would be called by
+        # the shorthand method anyway...)
+        try:
+            conn = self.connect_to_aws(elasticache, region)
+            if conn:
+                response = conn.describe_replication_groups()
+
+        except boto.exception.BotoServerError as e:
+            error = e.reason
+
+            if e.error_code == 'AuthFailure':
+                error = self.get_auth_error_message()
+            if not e.reason == "Forbidden":
+                error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
+            self.fail_with_error(error, 'getting ElastiCache clusters')
+
+        try:
+            # Boto also doesn't provide wrapper classes to ReplicationGroups
+            # Because of that wo can't make use of the get_list method in the
+            # AWSQueryConnection. Let's do the work manually
+            replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
+
+        except KeyError as e:
+            error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
+            self.fail_with_error(error, 'getting ElastiCache clusters')
+
+        for replication_group in replication_groups:
+            self.add_elasticache_replication_group(replication_group, region)
+
+    def get_auth_error_message(self):
+        ''' create an informative error message if there is an issue authenticating'''
+        errors = ["Authentication error retrieving ec2 inventory."]
+        if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
+            errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
+        else:
+            errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
+
+        boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
+        boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
+        if len(boto_config_found) > 0:
+            errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
+        else:
+            errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
+
+        return '\n'.join(errors)
+
+    def fail_with_error(self, err_msg, err_operation=None):
+        '''log an error to std err for ansible-playbook to consume and exit'''
+        if err_operation:
+            err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
+                    err_msg=err_msg, err_operation=err_operation)
+        sys.stderr.write(err_msg)
+        sys.exit(1)
+
+    def get_instance(self, region, instance_id):
+        conn = self.connect(region)
+
+        reservations = conn.get_all_instances([instance_id])
+        for reservation in reservations:
+            for instance in reservation.instances:
+                return instance
+
+    def add_instance(self, instance, region):
+        ''' Adds an instance to the inventory and index, as long as it is
+        addressable '''
+
+        # Only return instances with desired instance states
+        if instance.state not in self.ec2_instance_states:
+            return
+
+        # Select the best destination address
+        if self.destination_format and self.destination_format_tags:
+            dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
+        elif instance.subnet_id:
+            dest = getattr(instance, self.vpc_destination_variable, None)
+            if dest is None:
+                dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
+        else:
+            dest = getattr(instance, self.destination_variable, None)
+            if dest is None:
+                dest = getattr(instance, 'tags').get(self.destination_variable, None)
+
+        if not dest:
+            # Skip instances we cannot address (e.g. private VPC subnet)
+            return
+
+        # Set the inventory name
+        hostname = None
+        if self.hostname_variable:
+            if self.hostname_variable.startswith('tag_'):
+                hostname = instance.tags.get(self.hostname_variable[4:], None)
+            else:
+                hostname = getattr(instance, self.hostname_variable)
+
+        # If we can't get a nice hostname, use the destination address
+        if not hostname:
+            hostname = dest
+        else:
+            hostname = self.to_safe(hostname).lower()
+
+        # if we only want to include hosts that match a pattern, skip those that don't
+        if self.pattern_include and not self.pattern_include.match(hostname):
+            return
+
+        # if we need to exclude hosts that match a pattern, skip those
+        if self.pattern_exclude and self.pattern_exclude.match(hostname):
+            return
+
+        # Add to index
+        self.index[hostname] = [region, instance.id]
+
+        # Inventory: Group by instance ID (always a group of 1)
+        if self.group_by_instance_id:
+            self.inventory[instance.id] = [hostname]
+            if self.nested_groups:
+                self.push_group(self.inventory, 'instances', instance.id)
+
+        # Inventory: Group by region
+        if self.group_by_region:
+            self.push(self.inventory, region, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'regions', region)
+
+        # Inventory: Group by availability zone
+        if self.group_by_availability_zone:
+            self.push(self.inventory, instance.placement, hostname)
+            if self.nested_groups:
+                if self.group_by_region:
+                    self.push_group(self.inventory, region, instance.placement)
+                self.push_group(self.inventory, 'zones', instance.placement)
+
+        # Inventory: Group by Amazon Machine Image (AMI) ID
+        if self.group_by_ami_id:
+            ami_id = self.to_safe(instance.image_id)
+            self.push(self.inventory, ami_id, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'images', ami_id)
+
+        # Inventory: Group by instance type
+        if self.group_by_instance_type:
+            type_name = self.to_safe('type_' + instance.instance_type)
+            self.push(self.inventory, type_name, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'types', type_name)
+
+        # Inventory: Group by key pair
+        if self.group_by_key_pair and instance.key_name:
+            key_name = self.to_safe('key_' + instance.key_name)
+            self.push(self.inventory, key_name, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'keys', key_name)
+
+        # Inventory: Group by VPC
+        if self.group_by_vpc_id and instance.vpc_id:
+            vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
+            self.push(self.inventory, vpc_id_name, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'vpcs', vpc_id_name)
+
+        # Inventory: Group by security group
+        if self.group_by_security_group:
+            try:
+                for group in instance.groups:
+                    key = self.to_safe("security_group_" + group.name)
+                    self.push(self.inventory, key, hostname)
+                    if self.nested_groups:
+                        self.push_group(self.inventory, 'security_groups', key)
+            except AttributeError:
+                self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+                                                'Please upgrade boto >= 2.3.0.']))
+
+        # Inventory: Group by tag keys
+        if self.group_by_tag_keys:
+            for k, v in instance.tags.items():
+                if self.expand_csv_tags and v and ',' in v:
+                    values = map(lambda x: x.strip(), v.split(','))
+                else:
+                    values = [v]
+
+                for v in values:
+                    if v:
+                        key = self.to_safe("tag_" + k + "=" + v)
+                    else:
+                        key = self.to_safe("tag_" + k)
+                    self.push(self.inventory, key, hostname)
+                    if self.nested_groups:
+                        self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+                        if v:
+                            self.push_group(self.inventory, self.to_safe("tag_" + k), key)
+
+        # Inventory: Group by Route53 domain names if enabled
+        if self.route53_enabled and self.group_by_route53_names:
+            route53_names = self.get_instance_route53_names(instance)
+            for name in route53_names:
+                self.push(self.inventory, name, hostname)
+                if self.nested_groups:
+                    self.push_group(self.inventory, 'route53', name)
+
+        # Global Tag: instances without tags
+        if self.group_by_tag_none and len(instance.tags) == 0:
+            self.push(self.inventory, 'tag_none', hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'tags', 'tag_none')
+
+        # Global Tag: tag all EC2 instances
+        self.push(self.inventory, 'ec2', hostname)
+
+        self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+        self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
+
+
+    def add_rds_instance(self, instance, region):
+        ''' Adds an RDS instance to the inventory and index, as long as it is
+        addressable '''
+
+        # Only want available instances unless all_rds_instances is True
+        if not self.all_rds_instances and instance.status != 'available':
+            return
+
+        # Select the best destination address
+        dest = instance.endpoint[0]
+
+        if not dest:
+            # Skip instances we cannot address (e.g. private VPC subnet)
+            return
+
+        # Set the inventory name
+        hostname = None
+        if self.hostname_variable:
+            if self.hostname_variable.startswith('tag_'):
+                hostname = instance.tags.get(self.hostname_variable[4:], None)
+            else:
+                hostname = getattr(instance, self.hostname_variable)
+
+        # If we can't get a nice hostname, use the destination address
+        if not hostname:
+            hostname = dest
+
+        hostname = self.to_safe(hostname).lower()
+
+        # Add to index
+        self.index[hostname] = [region, instance.id]
+
+        # Inventory: Group by instance ID (always a group of 1)
+        if self.group_by_instance_id:
+            self.inventory[instance.id] = [hostname]
+            if self.nested_groups:
+                self.push_group(self.inventory, 'instances', instance.id)
+
+        # Inventory: Group by region
+        if self.group_by_region:
+            self.push(self.inventory, region, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'regions', region)
+
+        # Inventory: Group by availability zone
+        if self.group_by_availability_zone:
+            self.push(self.inventory, instance.availability_zone, hostname)
+            if self.nested_groups:
+                if self.group_by_region:
+                    self.push_group(self.inventory, region, instance.availability_zone)
+                self.push_group(self.inventory, 'zones', instance.availability_zone)
+
+        # Inventory: Group by instance type
+        if self.group_by_instance_type:
+            type_name = self.to_safe('type_' + instance.instance_class)
+            self.push(self.inventory, type_name, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'types', type_name)
+
+        # Inventory: Group by VPC
+        if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
+            vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
+            self.push(self.inventory, vpc_id_name, hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'vpcs', vpc_id_name)
+
+        # Inventory: Group by security group
+        if self.group_by_security_group:
+            try:
+                if instance.security_group:
+                    key = self.to_safe("security_group_" + instance.security_group.name)
+                    self.push(self.inventory, key, hostname)
+                    if self.nested_groups:
+                        self.push_group(self.inventory, 'security_groups', key)
+
+            except AttributeError:
+                self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+                                                'Please upgrade boto >= 2.3.0.']))
+
+
+        # Inventory: Group by engine
+        if self.group_by_rds_engine:
+            self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
+
+        # Inventory: Group by parameter group
+        if self.group_by_rds_parameter_group:
+            self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
+
+        # Global Tag: all RDS instances
+        self.push(self.inventory, 'rds', hostname)
+
+        self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+        self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
+
+    def add_elasticache_cluster(self, cluster, region):
+        ''' Adds an ElastiCache cluster to the inventory and index, as long as
+        it's nodes are addressable '''
+
+        # Only want available clusters unless all_elasticache_clusters is True
+        if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
+            return
+
+        # Select the best destination address
+        if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
+            # Memcached cluster
+            dest = cluster['ConfigurationEndpoint']['Address']
+            is_redis = False
+        else:
+            # Redis sigle node cluster
+            # Because all Redis clusters are single nodes, we'll merge the
+            # info from the cluster with info about the node
+            dest = cluster['CacheNodes'][0]['Endpoint']['Address']
+            is_redis = True
+
+        if not dest:
+            # Skip clusters we cannot address (e.g. private VPC subnet)
+            return
+
+        # Add to index
+        self.index[dest] = [region, cluster['CacheClusterId']]
+
+        # Inventory: Group by instance ID (always a group of 1)
+        if self.group_by_instance_id:
+            self.inventory[cluster['CacheClusterId']] = [dest]
+            if self.nested_groups:
+                self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
+
+        # Inventory: Group by region
+        if self.group_by_region and not is_redis:
+            self.push(self.inventory, region, dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'regions', region)
+
+        # Inventory: Group by availability zone
+        if self.group_by_availability_zone and not is_redis:
+            self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+            if self.nested_groups:
+                if self.group_by_region:
+                    self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+                self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+        # Inventory: Group by node type
+        if self.group_by_instance_type and not is_redis:
+            type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+            self.push(self.inventory, type_name, dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'types', type_name)
+
+        # Inventory: Group by VPC (information not available in the current
+        # AWS API version for ElastiCache)
+
+        # Inventory: Group by security group
+        if self.group_by_security_group and not is_redis:
+
+            # Check for the existence of the 'SecurityGroups' key and also if
+            # this key has some value. When the cluster is not placed in a SG
+            # the query can return None here and cause an error.
+            if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+                for security_group in cluster['SecurityGroups']:
+                    key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+                    self.push(self.inventory, key, dest)
+                    if self.nested_groups:
+                        self.push_group(self.inventory, 'security_groups', key)
+
+        # Inventory: Group by engine
+        if self.group_by_elasticache_engine and not is_redis:
+            self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
+
+        # Inventory: Group by parameter group
+        if self.group_by_elasticache_parameter_group:
+            self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
+
+        # Inventory: Group by replication group
+        if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
+            self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
+
+        # Global Tag: all ElastiCache clusters
+        self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
+
+        host_info = self.get_host_info_dict_from_describe_dict(cluster)
+
+        self.inventory["_meta"]["hostvars"][dest] = host_info
+
+        # Add the nodes
+        for node in cluster['CacheNodes']:
+            self.add_elasticache_node(node, cluster, region)
+
+    def add_elasticache_node(self, node, cluster, region):
+        ''' Adds an ElastiCache node to the inventory and index, as long as
+        it is addressable '''
+
+        # Only want available nodes unless all_elasticache_nodes is True
+        if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
+            return
+
+        # Select the best destination address
+        dest = node['Endpoint']['Address']
+
+        if not dest:
+            # Skip nodes we cannot address (e.g. private VPC subnet)
+            return
+
+        node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
+
+        # Add to index
+        self.index[dest] = [region, node_id]
+
+        # Inventory: Group by node ID (always a group of 1)
+        if self.group_by_instance_id:
+            self.inventory[node_id] = [dest]
+            if self.nested_groups:
+                self.push_group(self.inventory, 'instances', node_id)
+
+        # Inventory: Group by region
+        if self.group_by_region:
+            self.push(self.inventory, region, dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'regions', region)
+
+        # Inventory: Group by availability zone
+        if self.group_by_availability_zone:
+            self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+            if self.nested_groups:
+                if self.group_by_region:
+                    self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+                self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+        # Inventory: Group by node type
+        if self.group_by_instance_type:
+            type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+            self.push(self.inventory, type_name, dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'types', type_name)
+
+        # Inventory: Group by VPC (information not available in the current
+        # AWS API version for ElastiCache)
+
+        # Inventory: Group by security group
+        if self.group_by_security_group:
+
+            # Check for the existence of the 'SecurityGroups' key and also if
+            # this key has some value. When the cluster is not placed in a SG
+            # the query can return None here and cause an error.
+            if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+                for security_group in cluster['SecurityGroups']:
+                    key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+                    self.push(self.inventory, key, dest)
+                    if self.nested_groups:
+                        self.push_group(self.inventory, 'security_groups', key)
+
+        # Inventory: Group by engine
+        if self.group_by_elasticache_engine:
+            self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
+
+        # Inventory: Group by parameter group (done at cluster level)
+
+        # Inventory: Group by replication group (done at cluster level)
+
+        # Inventory: Group by ElastiCache Cluster
+        if self.group_by_elasticache_cluster:
+            self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
+
+        # Global Tag: all ElastiCache nodes
+        self.push(self.inventory, 'elasticache_nodes', dest)
+
+        host_info = self.get_host_info_dict_from_describe_dict(node)
+
+        if dest in self.inventory["_meta"]["hostvars"]:
+            self.inventory["_meta"]["hostvars"][dest].update(host_info)
+        else:
+            self.inventory["_meta"]["hostvars"][dest] = host_info
+
+    def add_elasticache_replication_group(self, replication_group, region):
+        ''' Adds an ElastiCache replication group to the inventory and index '''
+
+        # Only want available clusters unless all_elasticache_replication_groups is True
+        if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
+            return
+
+        # Select the best destination address (PrimaryEndpoint)
+        dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
+
+        if not dest:
+            # Skip clusters we cannot address (e.g. private VPC subnet)
+            return
+
+        # Add to index
+        self.index[dest] = [region, replication_group['ReplicationGroupId']]
+
+        # Inventory: Group by ID (always a group of 1)
+        if self.group_by_instance_id:
+            self.inventory[replication_group['ReplicationGroupId']] = [dest]
+            if self.nested_groups:
+                self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
+
+        # Inventory: Group by region
+        if self.group_by_region:
+            self.push(self.inventory, region, dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'regions', region)
+
+        # Inventory: Group by availability zone (doesn't apply to replication groups)
+
+        # Inventory: Group by node type (doesn't apply to replication groups)
+
+        # Inventory: Group by VPC (information not available in the current
+        # AWS API version for replication groups
+
+        # Inventory: Group by security group (doesn't apply to replication groups)
+        # Check this value in cluster level
+
+        # Inventory: Group by engine (replication groups are always Redis)
+        if self.group_by_elasticache_engine:
+            self.push(self.inventory, 'elasticache_redis', dest)
+            if self.nested_groups:
+                self.push_group(self.inventory, 'elasticache_engines', 'redis')
+
+        # Global Tag: all ElastiCache clusters
+        self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
+
+        host_info = self.get_host_info_dict_from_describe_dict(replication_group)
+
+        self.inventory["_meta"]["hostvars"][dest] = host_info
+
+    def get_route53_records(self):
+        ''' Get and store the map of resource records to domain names that
+        point to them. '''
+
+        r53_conn = route53.Route53Connection()
+        all_zones = r53_conn.get_zones()
+
+        route53_zones = [ zone for zone in all_zones if zone.name[:-1]
+                          not in self.route53_excluded_zones ]
+
+        self.route53_records = {}
+
+        for zone in route53_zones:
+            rrsets = r53_conn.get_all_rrsets(zone.id)
+
+            for record_set in rrsets:
+                record_name = record_set.name
+
+                if record_name.endswith('.'):
+                    record_name = record_name[:-1]
+
+                for resource in record_set.resource_records:
+                    self.route53_records.setdefault(resource, set())
+                    self.route53_records[resource].add(record_name)
+
+
+    def get_instance_route53_names(self, instance):
+        ''' Check if an instance is referenced in the records we have from
+        Route53. If it is, return the list of domain names pointing to said
+        instance. If nothing points to it, return an empty list. '''
+
+        instance_attributes = [ 'public_dns_name', 'private_dns_name',
+                                'ip_address', 'private_ip_address' ]
+
+        name_list = set()
+
+        for attrib in instance_attributes:
+            try:
+                value = getattr(instance, attrib)
+            except AttributeError:
+                continue
+
+            if value in self.route53_records:
+                name_list.update(self.route53_records[value])
+
+        return list(name_list)
+
+    def get_host_info_dict_from_instance(self, instance):
+        instance_vars = {}
+        for key in vars(instance):
+            value = getattr(instance, key)
+            key = self.to_safe('ec2_' + key)
+
+            # Handle complex types
+            # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
+            if key == 'ec2__state':
+                instance_vars['ec2_state'] = instance.state or ''
+                instance_vars['ec2_state_code'] = instance.state_code
+            elif key == 'ec2__previous_state':
+                instance_vars['ec2_previous_state'] = instance.previous_state or ''
+                instance_vars['ec2_previous_state_code'] = instance.previous_state_code
+            elif type(value) in [int, bool]:
+                instance_vars[key] = value
+            elif isinstance(value, six.string_types):
+                instance_vars[key] = value.strip()
+            elif type(value) == type(None):
+                instance_vars[key] = ''
+            elif key == 'ec2_region':
+                instance_vars[key] = value.name
+            elif key == 'ec2__placement':
+                instance_vars['ec2_placement'] = value.zone
+            elif key == 'ec2_tags':
+                for k, v in value.items():
+                    if self.expand_csv_tags and ',' in v:
+                        v = map(lambda x: x.strip(), v.split(','))
+                    key = self.to_safe('ec2_tag_' + k)
+                    instance_vars[key] = v
+            elif key == 'ec2_groups':
+                group_ids = []
+                group_names = []
+                for group in value:
+                    group_ids.append(group.id)
+                    group_names.append(group.name)
+                instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
+                instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
+            else:
+                pass
+                # TODO Product codes if someone finds them useful
+                #print key
+                #print type(value)
+                #print value
+
+        return instance_vars
+
+    def get_host_info_dict_from_describe_dict(self, describe_dict):
+        ''' Parses the dictionary returned by the API call into a flat list
+            of parameters. This method should be used only when 'describe' is
+            used directly because Boto doesn't provide specific classes. '''
+
+        # I really don't agree with prefixing everything with 'ec2'
+        # because EC2, RDS and ElastiCache are different services.
+        # I'm just following the pattern used until now to not break any
+        # compatibility.
+
+        host_info = {}
+        for key in describe_dict:
+            value = describe_dict[key]
+            key = self.to_safe('ec2_' + self.uncammelize(key))
+
+            # Handle complex types
+
+            # Target: Memcached Cache Clusters
+            if key == 'ec2_configuration_endpoint' and value:
+                host_info['ec2_configuration_endpoint_address'] = value['Address']
+                host_info['ec2_configuration_endpoint_port'] = value['Port']
+
+            # Target: Cache Nodes and Redis Cache Clusters (single node)
+            if key == 'ec2_endpoint' and value:
+                host_info['ec2_endpoint_address'] = value['Address']
+                host_info['ec2_endpoint_port'] = value['Port']
+
+            # Target: Redis Replication Groups
+            if key == 'ec2_node_groups' and value:
+                host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
+                host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
+                replica_count = 0
+                for node in value[0]['NodeGroupMembers']:
+                    if node['CurrentRole'] == 'primary':
+                        host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
+                        host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
+                        host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
+                    elif node['CurrentRole'] == 'replica':
+                        host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
+                        host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
+                        host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
+                        replica_count += 1
+
+            # Target: Redis Replication Groups
+            if key == 'ec2_member_clusters' and value:
+                host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
+
+            # Target: All Cache Clusters
+            elif key == 'ec2_cache_parameter_group':
+                host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
+                host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
+                host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
+
+            # Target: Almost everything
+            elif key == 'ec2_security_groups':
+
+                # Skip if SecurityGroups is None
+                # (it is possible to have the key defined but no value in it).
+                if value is not None:
+                    sg_ids = []
+                    for sg in value:
+                        sg_ids.append(sg['SecurityGroupId'])
+                    host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
+
+            # Target: Everything
+            # Preserve booleans and integers
+            elif type(value) in [int, bool]:
+                host_info[key] = value
+
+            # Target: Everything
+            # Sanitize string values
+            elif isinstance(value, six.string_types):
+                host_info[key] = value.strip()
+
+            # Target: Everything
+            # Replace None by an empty string
+            elif type(value) == type(None):
+                host_info[key] = ''
+
+            else:
+                # Remove non-processed complex types
+                pass
+
+        return host_info
+
+    def get_host_info(self):
+        ''' Get variables about a specific host '''
+
+        if len(self.index) == 0:
+            # Need to load index from cache
+            self.load_index_from_cache()
+
+        if not self.args.host in self.index:
+            # try updating the cache
+            self.do_api_calls_update_cache()
+            if not self.args.host in self.index:
+                # host might not exist anymore
+                return self.json_format_dict({}, True)
+
+        (region, instance_id) = self.index[self.args.host]
+
+        instance = self.get_instance(region, instance_id)
+        return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
+
+    def push(self, my_dict, key, element):
+        ''' Push an element onto an array that may not have been defined in
+        the dict '''
+        group_info = my_dict.setdefault(key, [])
+        if isinstance(group_info, dict):
+            host_list = group_info.setdefault('hosts', [])
+            host_list.append(element)
+        else:
+            group_info.append(element)
+
+    def push_group(self, my_dict, key, element):
+        ''' Push a group as a child of another group. '''
+        parent_group = my_dict.setdefault(key, {})
+        if not isinstance(parent_group, dict):
+            parent_group = my_dict[key] = {'hosts': parent_group}
+        child_groups = parent_group.setdefault('children', [])
+        if element not in child_groups:
+            child_groups.append(element)
+
+    def get_inventory_from_cache(self):
+        ''' Reads the inventory from the cache file and returns it as a JSON
+        object '''
+
+        cache = open(self.cache_path_cache, 'r')
+        json_inventory = cache.read()
+        return json_inventory
+
+
+    def load_index_from_cache(self):
+        ''' Reads the index from the cache file sets self.index '''
+
+        cache = open(self.cache_path_index, 'r')
+        json_index = cache.read()
+        self.index = json.loads(json_index)
+
+
+    def write_to_cache(self, data, filename):
+        ''' Writes data in JSON format to a file '''
+
+        json_data = self.json_format_dict(data, True)
+        cache = open(filename, 'w')
+        cache.write(json_data)
+        cache.close()
+
+    def uncammelize(self, key):
+        temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
+        return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
+
+    def to_safe(self, word):
+        ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
+        regex = "[^A-Za-z0-9\_"
+        if not self.replace_dash_in_groups:
+            regex += "\-"
+        return re.sub(regex + "]", "_", word)
+
+    def json_format_dict(self, data, pretty=False):
+        ''' Converts a dict to a JSON object and dumps it as a formatted
+        string '''
+
+        if pretty:
+            return json.dumps(data, sort_keys=True, indent=2)
+        else:
+            return json.dumps(data)
+
+
+# Run the script
+Ec2Inventory()
diff --git a/tests/inventory/inventory b/tests/inventory/inventory
new file mode 100644
index 0000000..fbfd211
--- /dev/null
+++ b/tests/inventory/inventory
@@ -0,0 +1,76 @@
+[tag_Name_cfrias_1601_edge_rhel72_1_n1]
+[tag_Name_cfrias_1601_edge_rhel72_1_n2]
+[tag_Name_cfrias_1601_edge_rhel72_1_n3]
+[tag_Name_cfrias_1601_edge_rhel72_1_n4]
+[tag_Name_cfrias_1601_edge_rhel72_1_n5]
+[tag_Name_cfrias_1601_edge_rhel72_1_n6]
+[tag_Name_cfrias_1601_edge_rhel72_1_n7]
+[tag_Name_cfrias_1601_edge_rhel72_1_n8]
+[tag_Name_cfrias_1601_edge_rhel72_1_n9]
+
+[planet:children]
+dc-1
+
+[dc-1:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n1
+tag_Name_cfrias_1601_edge_rhel72_1_n2
+tag_Name_cfrias_1601_edge_rhel72_1_n3
+tag_Name_cfrias_1601_edge_rhel72_1_n4
+tag_Name_cfrias_1601_edge_rhel72_1_n5
+tag_Name_cfrias_1601_edge_rhel72_1_n6
+tag_Name_cfrias_1601_edge_rhel72_1_n7
+tag_Name_cfrias_1601_edge_rhel72_1_n8
+tag_Name_cfrias_1601_edge_rhel72_1_n9
+
+[ds:children]
+dc-1-ds
+
+[dc-1-ds:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n1
+tag_Name_cfrias_1601_edge_rhel72_1_n2
+tag_Name_cfrias_1601_edge_rhel72_1_n3
+
+[ms:children]
+dc-1-ms
+
+[dc-1-ms:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n1
+
+[ldap:children]
+dc-1-ldap
+
+[dc-1-ldap:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n1
+
+[rmp:children]
+dc-1-rmp
+
+[dc-1-rmp:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n4
+tag_Name_cfrias_1601_edge_rhel72_1_n5
+
+[qpid:children]
+dc-1-qpid
+
+[dc-1-qpid:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n6
+tag_Name_cfrias_1601_edge_rhel72_1_n7
+
+[pg:children]
+dc-1-pg
+
+[dc-1-pg:children]
+dc-1-pgmaster
+dc-1-pgstandby
+
+[pgmaster:children]
+dc-1-pgmaster
+
+[dc-1-pgmaster:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n8
+
+[pgstandby:children]
+dc-1-pgstandby
+
+[dc-1-pgstandby:children]
+tag_Name_cfrias_1601_edge_rhel72_1_n9
diff --git a/tests/local_clean.yml b/tests/local_clean.yml
new file mode 100644
index 0000000..b8ce025
--- /dev/null
+++ b/tests/local_clean.yml
@@ -0,0 +1,15 @@
+---
+- hosts: localhost
+  connection: local
+
+  tasks:
+  - name: Remove .ansible_cache
+    file:
+      path: '{{ item }}'
+      state: absent
+      force: yes
+    with_items:
+    - '.ansible_cache'
+    - 'ansible.log'
+    - 'ssh_keys'
+    - 'configs_and_logs'
diff --git a/tests/test-with-library.yml b/tests/test-with-library.yml
new file mode 100644
index 0000000..1f03848
--- /dev/null
+++ b/tests/test-with-library.yml
@@ -0,0 +1,23 @@
+---
+
+- include: facts.yml
+
+- hosts: ms
+  tasks:
+
+#  - name: Startup a simple http server on port '{{ check_port }}'
+#    shell: python -m SimpleHTTPServer {{ check_port }}
+#    async: 1
+#    poll: 0
+
+  - name: Startup a simple http server on port '{{ check_port }}'
+    external_validator:
+      port: '{{ check_port }}'
+    async: 5
+    poll: 0
+
+  - name: Check for available server port
+    wait_for:
+      port: '{{ check_port }}'
+      timeout: 1
+
diff --git a/tests/test.yml b/tests/test.yml
new file mode 100644
index 0000000..ff96b52
--- /dev/null
+++ b/tests/test.yml
@@ -0,0 +1,16 @@
+---
+- include: facts.yml
+
+- hosts: ms
+  roles:
+  - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_jmx_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_thrift_client_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_cql_native_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ cassandra_non_ssl_gossip_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ zk_data_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ zk_leader_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ zk_voter_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ ms_jmx_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ ms_ext_mgmt_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ ui_http_port }}' }
+  - { role: external-port-connectivity-validator-server, check_port: '{{ ldap_data_port }}' }
diff --git a/vars/main.yml b/vars/main.yml
new file mode 100644
index 0000000..212bd41
--- /dev/null
+++ b/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for external-port-connectivity-validator-server