summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc5
-rw-r--r--.github/ISSUE_TEMPLATE.md63
-rw-r--r--.gitignore2
-rw-r--r--.pylintrc (renamed from git/.pylintrc)173
-rw-r--r--.travis.yml4
-rw-r--r--.yamllint (renamed from git/.yamllint)0
-rw-r--r--CONTRIBUTING.md49
-rw-r--r--README.md62
-rw-r--r--filter_plugins/oo_filters.py1794
-rw-r--r--filter_plugins/openshift_master.py19
-rwxr-xr-xgit/parent.py97
-rwxr-xr-xgit/pylint.sh51
-rwxr-xr-xgit/yaml_validation.py73
-rw-r--r--inventory/README.md2
-rw-r--r--inventory/aws/hosts/ec2.ini54
-rwxr-xr-xinventory/aws/hosts/ec2.py285
-rwxr-xr-xinventory/gce/hosts/gce.py252
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py10
-rwxr-xr-xlibrary/modify_yaml.py27
-rw-r--r--openshift-ansible.spec1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml2
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml5
-rw-r--r--requirements.txt4
-rw-r--r--roles/lib_utils/library/yedit.py766
-rw-r--r--roles/lib_utils/src/ansible/yedit.py84
-rw-r--r--roles/lib_utils/src/class/import.py11
-rw-r--r--roles/lib_utils/src/class/yedit.py520
-rw-r--r--roles/lib_utils/src/doc/license16
-rw-r--r--roles/lib_utils/src/doc/yedit132
-rwxr-xr-xroles/lib_utils/src/generate.py45
-rw-r--r--roles/lib_utils/src/generate_sources.yml7
-rw-r--r--roles/lib_utils/src/test/integration/files/kube-manager.yaml39
-rwxr-xr-xroles/lib_utils/src/test/integration/yedit_test.yml221
-rwxr-xr-xroles/lib_utils/src/test/unit/yedit_test.py277
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py25
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py48
-rw-r--r--roles/openshift_hosted_metrics/README.md (renamed from roles/openshift_metrics/README.md)2
-rw-r--r--roles/openshift_hosted_metrics/defaults/main.yml (renamed from roles/openshift_metrics/defaults/main.yml)0
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml (renamed from roles/openshift_metrics/handlers/main.yml)0
-rw-r--r--roles/openshift_hosted_metrics/meta/main.yaml (renamed from roles/openshift_metrics/meta/main.yaml)0
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml (renamed from roles/openshift_metrics/tasks/install.yml)22
-rw-r--r--roles/openshift_hosted_metrics/tasks/main.yaml (renamed from roles/openshift_metrics/tasks/main.yaml)8
-rw-r--r--roles/openshift_hosted_metrics/vars/main.yaml (renamed from roles/openshift_metrics/vars/main.yaml)0
-rw-r--r--roles/openshift_node/README.md2
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml3
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/templates/nfs.json.j24
-rw-r--r--setup.cfg27
-rw-r--r--setup.py193
-rw-r--r--test-requirements.txt11
-rw-r--r--tox.ini18
l---------utils/.pylintrc1
-rw-r--r--utils/Makefile30
-rw-r--r--utils/README.md41
-rw-r--r--utils/setup.cfg4
-rw-r--r--utils/test-requirements.txt2
-rw-r--r--utils/test/openshift_ansible_tests.py71
-rw-r--r--utils/test/test_utils.py1
-rw-r--r--utils/tox.ini5
62 files changed, 4225 insertions, 1453 deletions
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 000000000..e1d918755
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,5 @@
+[run]
+omit=
+ */lib/python*/site-packages/*
+ */lib/python*/*
+ /usr/*
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 627fa13eb..2a4f80a36 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,21 +1,64 @@
-[provide a description of the issue]
+#### Description
+
+Provide a brief description of your issue here. For example:
+
+> On a multi master install, if the first master goes down we can no
+> longer scaleup the cluster with new nodes or masters.
+
##### Version
-[if you're operating from a git clone provide the output of `git describe`]
-[if you're running from playbooks installed via RPM or atomic-openshift-utils `rpm -q atomic-openshift-utils openshift-ansible`]
-[Your version of ansible, `ansible --version`]
+Please put the following version information in the code block
+indicated below.
+
+* Your ansible version per `ansible --version`
+
+If you're operating from a **git clone**:
+
+* The output of `git describe`
+
+If you're running from playbooks installed via RPM or
+`atomic-openshift-utils`
+
+* The output of `rpm -q atomic-openshift-utils openshift-ansible`
+
+Place the output between the code block below:
+
+```
+VERSION INFORMATION HERE PLEASE
+```
##### Steps To Reproduce
1. [step 1]
2. [step 2]
-##### Current Result
-##### Expected Result
+##### Expected Results
+Describe what you expected to happen.
+
+```
+Example command and output or error messages
+```
+
+##### Observed Results
+Describe what is actually happening.
+
+```
+Example command and output or error messages
+```
+
+For long output or logs, consider using a [gist](https://gist.github.com/)
+
##### Additional Information
-[The exact command you ran]
-[Your operating system and version, ie: RHEL 7.2, Fedora 23]
-[Your inventory file]
-[visit https://docs.openshift.org/latest/welcome/index.html]
+
+Provide any additional information which may help us diagnose the
+issue.
+
+* Your operating system and version, ie: RHEL 7.2, Fedora 23 (`$ cat /etc/redhat-release`)
+* Your inventory file (especially any non-standard configuration parameters)
+* Sample code, etc
+
+```
+EXTRA INFORMATION GOES HERE
+```
diff --git a/.gitignore b/.gitignore
index ac249d5eb..9af271235 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,3 +25,5 @@ ansible.cfg
.tox
.coverage
*.egg-info
+.eggs
+cover
diff --git a/git/.pylintrc b/.pylintrc
index 411330fe7..a32bd3d68 100644
--- a/git/.pylintrc
+++ b/.pylintrc
@@ -1,5 +1,4 @@
[MASTER]
-
# Specify a configuration file.
#rcfile=
@@ -7,12 +6,9 @@
# pygtk.require().
#init-hook=
-# Profiled execution.
-#profile=no
-
# Add files or directories to the blacklist. They should be base names, not
# paths.
-ignore=CVS
+ignore=CVS,setup.py
# Pickle collected data for later comparisons.
persistent=no
@@ -21,14 +17,6 @@ persistent=no
# usually to register additional checkers.
load-plugins=
-# Deprecated. It was used to include message's id in output. Use --msg-template
-# instead.
-#include-ids=no
-
-# Deprecated. It was used to include symbolic ids of messages in output. Use
-# --msg-template instead.
-#symbols=no
-
# Use multiple processes to speed up Pylint.
jobs=1
@@ -58,7 +46,8 @@ confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
-# multiple time. See also the "--disable" option for examples.
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
@@ -70,8 +59,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-# w0511 - fixme - disabled because TODOs are acceptable
-disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801,locally-disabled,file-ignored
+disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating
[REPORTS]
@@ -96,20 +84,24 @@ reports=no
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-# Add a comment according to your evaluation note. This is used by the global
-# evaluation report (RP0004).
-#comment=no
-
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
-[LOGGING]
+[SIMILARITIES]
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-logging-modules=logging
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=yes
[BASIC]
@@ -192,44 +184,23 @@ method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
-no-docstring-rgx=__.*__
+no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
-[SIMILARITIES]
-
-# Minimum lines number of a similarity.
-min-similarity-lines=0
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=yes
-
-
-[VARIABLES]
+[ELIF]
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=_$|dummy
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-additional-builtins=
+[MISCELLANEOUS]
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,_cb
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
[TYPECHECK]
@@ -240,27 +211,30 @@ ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# List of classes names for which member attributes should not be checked
-# (useful for classes with attributes dynamically set).
-ignored-classes=SQLObject
-
-# When zope mode is activated, add a predefined set of Zope acquired attributes
-# to generated-members.
-#zope=no
+# (useful for classes with attributes dynamically set). This supports can work
+# with qualified names.
+ignored-classes=
# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E0201 when accessed. Python regular
+# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=REQUEST,acl_users,aq_parent
+generated-members=
[SPELLING]
-# Spelling dictionary name. Available dictionaries: none. To make it working
-# install python-enchant package.
+# Spelling dictionary name. Available dictionaries: en_ZW (myspell), en_NG
+# (myspell), en_NA (myspell), en_NZ (myspell), en_PH (myspell), en_AG
+# (myspell), en_BW (myspell), en_IE (myspell), en_ZM (myspell), en_DK
+# (myspell), en_CA (myspell), en_GH (myspell), en_IN (myspell), en_BZ
+# (myspell), en_MW (myspell), en_TT (myspell), en_JM (myspell), en_GB
+# (myspell), en_ZA (myspell), en_SG (myspell), en_AU (myspell), en_US
+# (myspell), en_BS (myspell), en_HK (myspell).
spelling-dict=
# List of comma separated words that should not be checked.
@@ -274,12 +248,6 @@ spelling-private-dict-file=
spelling-store-unknown-words=no
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,XXX,TODO
-
-
[FORMAT]
# Maximum number of characters on a single line.
@@ -292,23 +260,67 @@ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# else.
single-line-if-stmt=no
-# List of optional constructs for which whitespace checking is disabled
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
-# Number of spaces of indent required inside a hanging or continued line.
+# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
[DESIGN]
# Maximum number of arguments for function / method
@@ -342,21 +354,8 @@ min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,__new__,setUp
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,_fields,_replace,_source,_make
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
[IMPORTS]
diff --git a/.travis.yml b/.travis.yml
index 0e3a75df7..f0a228c23 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -11,8 +11,10 @@ python:
install:
- pip install -r requirements.txt
+ - pip install tox-travis
script:
# TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- ansible-playbook --syntax-check playbooks/byo/config.yml
- - cd utils && make ci
+ - tox
+ - cd utils && tox
diff --git a/git/.yamllint b/.yamllint
index 573321a94..573321a94 100644
--- a/git/.yamllint
+++ b/.yamllint
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1145da495..83c844e28 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -66,30 +66,55 @@ These are plugins used in playbooks and roles:
└── test Contains tests.
```
-### Others
-
-```
-.
-└── git Contains some helper scripts for repository maintenance.
-```
-
## Building RPMs
See the [RPM build instructions](BUILD.md).
## Running tests
-We use [Nose](http://readthedocs.org/docs/nose/) as a test runner. Make sure it
-is installed along with other test dependencies:
+This section covers how to run tests for the root of this repo, running tests
+for the oo-install wrapper is described in [utils/README.md](utils/README.md).
+
+We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
+tests. Alternatively, tests can be run using
+[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
+parallel
+
```
-pip install -r utils/test-requirements.txt
+pip install tox detox
```
-Run the tests with:
+List the test environments available:
+```
+tox -l
+```
+
+Run all of the tests with:
+```
+tox
+```
+
+Run all of the tests in parallel with detox:
+```
+detox
+```
+
+Running a particular test environment (python 2.7 flake8 tests in this case):
+```
+tox -e py27-ansible22-flake8
+```
+
+Running a particular test environment in a clean virtualenv (python 3.5 pylint
+tests in this case):
+```
+tox -r -e py35-ansible22-pylint
+```
+If you want to enter the virtualenv created by tox to do additional
+testing/debugging (py27-flake8 env in this case):
```
-nosetests
+source .tox/py27-ansible22-flake8/bin/activate
```
## Submitting contributions
diff --git a/README.md b/README.md
index 635981b45..def8dfdc5 100644
--- a/README.md
+++ b/README.md
@@ -3,30 +3,48 @@
# OpenShift Ansible
-This repository contains [Ansible](https://www.ansible.com/) code to install,
-upgrade and manage [OpenShift](https://www.openshift.com/) clusters.
-
-**Note**: the Ansible playbooks in this repository require an RPM package that
-provides `docker`. Currently, the RPMs from
-[dockerproject.org](https://dockerproject.org/) do not provide this requirement,
-though they may in the future. This limitation is being tracked by
+This repository contains [Ansible](https://www.ansible.com/) roles and
+playbooks to install, upgrade, and manage
+[OpenShift](https://www.openshift.com/) clusters.
+
+**Note**: the Ansible playbooks in this repository require an RPM
+package that provides `docker`. Currently, the RPMs from
+[dockerproject.org](https://dockerproject.org/) do not provide this
+requirement, though they may in the future. This limitation is being
+tracked by
[#2720](https://github.com/openshift/openshift-ansible/issues/2720).
-## Branches and tags
-
-The [master branch](https://github.com/openshift/openshift-ansible/tree/master)
-tracks our current work and should be compatible with both [Origin master
-branch](https://github.com/openshift/origin/tree/master) and the [most recent
-Origin stable release](https://github.com/openshift/origin/releases). Currently
-that's v1.4 and v1.3.x. In addition to the master branch, we maintain stable
-branches corresponding to upstream Origin releases, e.g.:
-[release-1.2](https://github.com/openshift/openshift-ansible/tree/release-1.2).
-The most recent branch will often receive minor feature backports and fixes.
-Older branches will receive only critical fixes.
-
-Releases are tagged periodically from active branches and are versioned 3.x
-corresponding to Origin releases 1.x. We unfortunately started with 3.0 and it's
-not practical to start over at 1.0.
+## Getting the correct version
+
+The
+[master branch](https://github.com/openshift/openshift-ansible/tree/master)
+tracks our current work **in development** and should be compatible
+with the
+[Origin master branch](https://github.com/openshift/origin/tree/master)
+(code in development).
+
+In addition to the master branch, we maintain stable branches
+corresponding to upstream Origin releases, e.g.: we guarantee an
+openshift-ansible 3.2 release will fully support an origin
+[1.2 release](https://github.com/openshift/openshift-ansible/tree/release-1.2).
+The most recent branch will often receive minor feature backports and
+fixes. Older branches will receive only critical fixes.
+
+**Getting the right openshift-ansible release**
+
+Follow this release pattern and you can't go wrong:
+
+| Origin | OpenShift-Ansible |
+| ------------- | ----------------- |
+| 1.3 | 3.3 |
+| 1.4 | 3.4 |
+| 1.*X* | 3.*X* |
+
+If you're running from the openshift-ansible **master branch** we can
+only guarantee compatibility with the newest origin releases **in
+development**. Use a branch corresponding to your origin version if
+you are not running a stable release.
+
## Setup
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index bad1f6a3b..707662cbf 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -38,925 +38,927 @@ except ImportError:
from ansible.utils.unicode import to_unicode as to_text
-# Disabling too-many-public-methods, since filter methods are necessarily
-# public
-# pylint: disable=too-many-public-methods
-class FilterModule(object):
- """ Custom ansible filters """
-
- @staticmethod
- def oo_pdb(arg):
- """ This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- """
- pdb.set_trace()
- return arg
-
- @staticmethod
- def get_attr(data, attribute=None):
- """ This looks up dictionary attributes of the form a.b.c and returns
- the value.
-
- If the key isn't present, None is returned.
- Ex: data = {'a': {'b': {'c': 5}}}
- attribute = "a.b.c"
- returns 5
- """
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
- ptr = data
- for attr in attribute.split('.'):
- if attr in ptr:
- ptr = ptr[attr]
- else:
- ptr = None
- break
-
- return ptr
-
- @staticmethod
- def oo_flatten(data):
- """ This filter plugin will flatten a list of lists
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to flatten a List")
-
- return [item for sublist in data for item in sublist]
-
- @staticmethod
- def oo_merge_dicts(first_dict, second_dict):
- """ Merge two dictionaries where second_dict values take precedence.
- Ex: first_dict={'a': 1, 'b': 2}
- second_dict={'b': 3, 'c': 4}
- returns {'a': 1, 'b': 3, 'c': 4}
- """
- if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
- raise errors.AnsibleFilterError("|failed expects to merge two dicts")
- merged = first_dict.copy()
- merged.update(second_dict)
- return merged
-
- @staticmethod
- def oo_merge_hostvars(hostvars, variables, inventory_hostname):
- """ Merge host and play variables.
-
- When ansible version is greater than or equal to 2.0.0,
- merge hostvars[inventory_hostname] with variables (ansible vars)
- otherwise merge hostvars with hostvars['inventory_hostname'].
-
- Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
- 'openshift_other_variable': '7'}
- variables={'openshift_other_variable': '6'}
- inventory_hostname='master1.example.com'
- returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
-
- hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
- variables={'openshift_other_variable': '6'}
- inventory_hostname='master1.example.com'
- returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
- """
- if not isinstance(hostvars, Mapping):
- raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
- if not isinstance(variables, dict):
- raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
- if not isinstance(inventory_hostname, string_types):
- raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
- # pylint: disable=no-member
- ansible_version = pkg_resources.get_distribution("ansible").version
- merged_hostvars = {}
- if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
- merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
- variables)
+def oo_pdb(arg):
+ """ This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ """
+ pdb.set_trace()
+ return arg
+
+
+def get_attr(data, attribute=None):
+ """ This looks up dictionary attributes of the form a.b.c and returns
+ the value.
+
+ If the key isn't present, None is returned.
+ Ex: data = {'a': {'b': {'c': 5}}}
+ attribute = "a.b.c"
+ returns 5
+ """
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ ptr = data
+ for attr in attribute.split('.'):
+ if attr in ptr:
+ ptr = ptr[attr]
else:
- merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
- hostvars)
- return merged_hostvars
-
- @staticmethod
- def oo_collect(data, attribute=None, filters=None):
- """ This takes a list of dict and collects all attributes specified into a
- list. If filter is specified then we will include all items that
- match _ALL_ of filters. If a dict entry is missing the key in a
- filter it will be excluded from the match.
- Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
- {'a':2, 'z': 'z'}, # True, return
- {'a':3, 'z': 'z'}, # True, return
- {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
- ]
- attribute = 'a'
- filters = {'z': 'z'}
- returns [1, 2, 3]
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
-
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
- if filters is not None:
- if not isinstance(filters, dict):
- raise errors.AnsibleFilterError("|failed expects filter to be a"
- " dict")
- retval = [FilterModule.get_attr(d, attribute) for d in data if (
- all([d.get(key, None) == filters[key] for key in filters]))]
- else:
- retval = [FilterModule.get_attr(d, attribute) for d in data]
-
- retval = [val for val in retval if val is not None]
-
- return retval
-
- @staticmethod
- def oo_select_keys_from_list(data, keys):
- """ This returns a list, which contains the value portions for the keys
- Ex: data = { 'a':1, 'b':2, 'c':3 }
- keys = ['a', 'c']
- returns [1, 3]
- """
-
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a list")
-
- if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+ ptr = None
+ break
+
+ return ptr
+
+
+def oo_flatten(data):
+ """ This filter plugin will flatten a list of lists
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to flatten a List")
+
+ return [item for sublist in data for item in sublist]
+
+
+def oo_merge_dicts(first_dict, second_dict):
+ """ Merge two dictionaries where second_dict values take precedence.
+ Ex: first_dict={'a': 1, 'b': 2}
+ second_dict={'b': 3, 'c': 4}
+ returns {'a': 1, 'b': 3, 'c': 4}
+ """
+ if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
+ raise errors.AnsibleFilterError("|failed expects to merge two dicts")
+ merged = first_dict.copy()
+ merged.update(second_dict)
+ return merged
+
+
+def oo_merge_hostvars(hostvars, variables, inventory_hostname):
+ """ Merge host and play variables.
+
+ When ansible version is greater than or equal to 2.0.0,
+ merge hostvars[inventory_hostname] with variables (ansible vars)
+ otherwise merge hostvars with hostvars['inventory_hostname'].
+
+ Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
+ 'openshift_other_variable': '7'}
+ variables={'openshift_other_variable': '6'}
+ inventory_hostname='master1.example.com'
+ returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
+
+ hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
+ variables={'openshift_other_variable': '6'}
+ inventory_hostname='master1.example.com'
+ returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
+ """
+ if not isinstance(hostvars, Mapping):
+ raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
+ if not isinstance(variables, dict):
+ raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
+ if not isinstance(inventory_hostname, string_types):
+ raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
+ # pylint: disable=no-member
+ ansible_version = pkg_resources.get_distribution("ansible").version
+ merged_hostvars = {}
+ if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
+ merged_hostvars = oo_merge_dicts(
+ hostvars[inventory_hostname], variables)
+ else:
+ merged_hostvars = oo_merge_dicts(
+ hostvars[inventory_hostname], hostvars)
+ return merged_hostvars
+
+
+def oo_collect(data, attribute=None, filters=None):
+ """ This takes a list of dict and collects all attributes specified into a
+ list. If filter is specified then we will include all items that
+ match _ALL_ of filters. If a dict entry is missing the key in a
+ filter it will be excluded from the match.
+ Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'z': 'z'}, # True, return
+ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3]
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a List")
+
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ if filters is not None:
+ if not isinstance(filters, dict):
+ raise errors.AnsibleFilterError("|failed expects filter to be a"
+ " dict")
+ retval = [get_attr(d, attribute) for d in data if (
+ all([d.get(key, None) == filters[key] for key in filters]))]
+ else:
+ retval = [get_attr(d, attribute) for d in data]
+
+ retval = [val for val in retval if val is not None]
+
+ return retval
+
+
+def oo_select_keys_from_list(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a list")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [oo_select_keys(item, keys) for item in data]
+
+ return oo_flatten(retval)
+
+
+def oo_select_keys(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, Mapping):
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys if key in data]
+
+ return retval
+
+
+def oo_prepend_strings_in_list(data, prepend):
+ """ This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, string_types) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
+ retval = [prepend + s for s in data]
+ return retval
+
+
+def oo_combine_key_value(data, joiner='='):
+ """Take a list of dict in the form of { 'key': 'value'} and
+ arrange them as a list of strings ['key=value']
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ rval = []
+ for item in data:
+ rval.append("%s%s%s" % (item['key'], joiner, item['value']))
+
+ return rval
+
+
+def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
+ """Take a dict in the form of { 'key': 'value', 'key': 'value' } and
+ arrange them as a string 'key=value key=value'
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
+
+ return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
- # Gather up the values for the list of keys passed in
- retval = [FilterModule.oo_select_keys(item, keys) for item in data]
- return FilterModule.oo_flatten(retval)
-
- @staticmethod
- def oo_select_keys(data, keys):
- """ This returns a list, which contains the value portions for the keys
- Ex: data = { 'a':1, 'b':2, 'c':3 }
- keys = ['a', 'c']
- returns [1, 3]
- """
-
- if not isinstance(data, Mapping):
- raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
-
- if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- # Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys if key in data]
-
- return retval
-
- @staticmethod
- def oo_prepend_strings_in_list(data, prepend):
- """ This takes a list of strings and prepends a string to each item in the
- list
- Ex: data = ['cart', 'tree']
- prepend = 'apple-'
- returns ['apple-cart', 'apple-tree']
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not all(isinstance(x, string_types) for x in data):
- raise errors.AnsibleFilterError("|failed expects first param is a list"
- " of strings")
- retval = [prepend + s for s in data]
- return retval
-
- @staticmethod
- def oo_combine_key_value(data, joiner='='):
- """Take a list of dict in the form of { 'key': 'value'} and
- arrange them as a list of strings ['key=value']
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- rval = []
- for item in data:
- rval.append("%s%s%s" % (item['key'], joiner, item['value']))
-
- return rval
-
- @staticmethod
- def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
- """Take a dict in the form of { 'key': 'value', 'key': 'value' } and
- arrange them as a string 'key=value key=value'
- """
- if not isinstance(data, dict):
- # pylint: disable=line-too-long
- raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
-
- return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
-
- @staticmethod
- def oo_ami_selector(data, image_name):
- """ This takes a list of amis and an image name and attempts to return
- the latest ami.
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+def oo_ami_selector(data, image_name):
+ """ This takes a list of amis and an image name and attempts to return
+ the latest ami.
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not data:
- return None
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
else:
- if image_name is None or not image_name.endswith('_*'):
- ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
- return ami['ami_id']
- else:
- ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
- ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
- return ami['ami_id']
-
- @staticmethod
- def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
- """ This takes a dictionary of volume definitions and returns a valid ec2
- volume definition based on the host_type and the values in the
- dictionary.
- The dictionary should look similar to this:
- { 'master':
- { 'root':
- { 'volume_size': 10, 'device_type': 'gp2',
- 'iops': 500
- },
- 'docker':
- { 'volume_size': 40, 'device_type': 'gp2',
- 'iops': 500, 'ephemeral': 'true'
- }
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+
+def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
+ """ This takes a dictionary of volume definitions and returns a valid ec2
+ volume definition based on the host_type and the values in the
+ dictionary.
+ The dictionary should look similar to this:
+ { 'master':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'gp2',
+ 'iops': 500
},
- 'node':
- { 'root':
- { 'volume_size': 10, 'device_type': 'io1',
- 'iops': 1000
- },
- 'docker':
- { 'volume_size': 40, 'device_type': 'gp2',
- 'iops': 500, 'ephemeral': 'true'
- }
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
+ }
+ },
+ 'node':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'io1',
+ 'iops': 1000
+ },
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
}
}
- """
- if not isinstance(data, dict):
- # pylint: disable=line-too-long
- raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
- if host_type not in ['master', 'node', 'etcd']:
- raise errors.AnsibleFilterError("|failed expects etcd, master or node"
- " as the host type")
-
- root_vol = data[host_type]['root']
- root_vol['device_name'] = '/dev/sda1'
- root_vol['delete_on_termination'] = True
- if root_vol['device_type'] != 'io1':
- root_vol.pop('iops', None)
- if host_type in ['master', 'node'] and 'docker' in data[host_type]:
- docker_vol = data[host_type]['docker']
- docker_vol['device_name'] = '/dev/xvdb'
- docker_vol['delete_on_termination'] = True
- if docker_vol['device_type'] != 'io1':
- docker_vol.pop('iops', None)
- if docker_ephemeral:
- docker_vol.pop('device_type', None)
- docker_vol.pop('delete_on_termination', None)
- docker_vol['ephemeral'] = 'ephemeral0'
- return [root_vol, docker_vol]
- elif host_type == 'etcd' and 'etcd' in data[host_type]:
- etcd_vol = data[host_type]['etcd']
- etcd_vol['device_name'] = '/dev/xvdb'
- etcd_vol['delete_on_termination'] = True
- if etcd_vol['device_type'] != 'io1':
- etcd_vol.pop('iops', None)
- return [root_vol, etcd_vol]
- return [root_vol]
-
- @staticmethod
- def oo_split(string, separator=','):
- """ This splits the input string into a list. If the input string is
- already a list we will return it as is.
- """
- if isinstance(string, list):
- return string
- return string.split(separator)
-
- @staticmethod
- def oo_haproxy_backend_masters(hosts, port):
- """ This takes an array of dicts and returns an array of dicts
- to be used as a backend for the haproxy role
- """
- servers = []
- for idx, host_info in enumerate(hosts):
- server = dict(name="master%s" % idx)
- server_ip = host_info['openshift']['common']['ip']
- server['address'] = "%s:%s" % (server_ip, port)
- server['opts'] = 'check'
- servers.append(server)
- return servers
-
- @staticmethod
- def oo_filter_list(data, filter_attr=None):
- """ This returns a list, which contains all items where filter_attr
- evaluates to true
- Ex: data = [ { a: 1, b: True },
- { a: 3, b: False },
- { a: 5, b: True } ]
- filter_attr = 'b'
- returns [ { a: 1, b: True },
- { a: 5, b: True } ]
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a list")
-
- if not isinstance(filter_attr, string_types):
- raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
-
- # Gather up the values for the list of keys passed in
- return [x for x in data if filter_attr in x and x[filter_attr]]
-
- @staticmethod
- def oo_nodes_with_label(nodes, label, value=None):
- """ Filters a list of nodes by label and value (if provided)
-
- It handles labels that are in the following variables by priority:
- openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
-
- Examples:
- data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
- 'c': {'openshift_node_labels': {'size': 'S'}}]
- label = 'color'
- returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
-
- data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
- 'c': {'openshift_node_labels': {'size': 'S'}}]
- label = 'color'
- value = 'green'
- returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
-
- Args:
- nodes (list[dict]): list of node to node variables
- label (str): label to filter `nodes` by
- value (Optional[str]): value of `label` to filter by Defaults
- to None.
-
- Returns:
- list[dict]: nodes filtered by label and value (if provided)
- """
- if not isinstance(nodes, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(label, string_types):
- raise errors.AnsibleFilterError("failed expects label to be a string")
- if value is not None and not isinstance(value, string_types):
- raise errors.AnsibleFilterError("failed expects value to be a string")
-
- def label_filter(node):
- """ filter function for testing if node should be returned """
- if not isinstance(node, dict):
- raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
- if 'openshift_node_labels' in node:
- labels = node['openshift_node_labels']
- elif 'cli_openshift_node_labels' in node:
- labels = node['cli_openshift_node_labels']
- elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
- labels = node['openshift']['node']['labels']
- else:
- return False
-
- if isinstance(labels, string_types):
- labels = yaml.safe_load(labels)
- if not isinstance(labels, dict):
- raise errors.AnsibleFilterError(
- "failed expected node labels to be a dict or serializable to a dict"
- )
- return label in labels and (value is None or labels[label] == value)
-
- return [n for n in nodes if label_filter(n)]
-
- @staticmethod
- def oo_parse_heat_stack_outputs(data):
- """ Formats the HEAT stack output into a usable form
-
- The goal is to transform something like this:
-
- +---------------+-------------------------------------------------+
- | Property | Value |
- +---------------+-------------------------------------------------+
- | capabilities | [] | |
- | creation_time | 2015-06-26T12:26:26Z | |
- | description | OpenShift cluster | |
- | … | … |
- | outputs | [ |
- | | { |
- | | "output_value": "value_A" |
- | | "description": "This is the value of Key_A" |
- | | "output_key": "Key_A" |
- | | }, |
- | | { |
- | | "output_value": [ |
- | | "value_B1", |
- | | "value_B2" |
- | | ], |
- | | "description": "This is the value of Key_B" |
- | | "output_key": "Key_B" |
- | | }, |
- | | ] |
- | parameters | { |
- | … | … |
- +---------------+-------------------------------------------------+
-
- into something like this:
-
- {
- "Key_A": "value_A",
- "Key_B": [
- "value_B1",
- "value_B2"
- ]
}
- """
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
+ if host_type not in ['master', 'node', 'etcd']:
+ raise errors.AnsibleFilterError("|failed expects etcd, master or node"
+ " as the host type")
+
+ root_vol = data[host_type]['root']
+ root_vol['device_name'] = '/dev/sda1'
+ root_vol['delete_on_termination'] = True
+ if root_vol['device_type'] != 'io1':
+ root_vol.pop('iops', None)
+ if host_type in ['master', 'node'] and 'docker' in data[host_type]:
+ docker_vol = data[host_type]['docker']
+ docker_vol['device_name'] = '/dev/xvdb'
+ docker_vol['delete_on_termination'] = True
+ if docker_vol['device_type'] != 'io1':
+ docker_vol.pop('iops', None)
+ if docker_ephemeral:
+ docker_vol.pop('device_type', None)
+ docker_vol.pop('delete_on_termination', None)
+ docker_vol['ephemeral'] = 'ephemeral0'
+ return [root_vol, docker_vol]
+ elif host_type == 'etcd' and 'etcd' in data[host_type]:
+ etcd_vol = data[host_type]['etcd']
+ etcd_vol['device_name'] = '/dev/xvdb'
+ etcd_vol['delete_on_termination'] = True
+ if etcd_vol['device_type'] != 'io1':
+ etcd_vol.pop('iops', None)
+ return [root_vol, etcd_vol]
+ return [root_vol]
+
+
+def oo_split(string, separator=','):
+ """ This splits the input string into a list. If the input string is
+ already a list we will return it as is.
+ """
+ if isinstance(string, list):
+ return string
+ return string.split(separator)
+
+
+def oo_haproxy_backend_masters(hosts, port):
+ """ This takes an array of dicts and returns an array of dicts
+ to be used as a backend for the haproxy role
+ """
+ servers = []
+ for idx, host_info in enumerate(hosts):
+ server = dict(name="master%s" % idx)
+ server_ip = host_info['openshift']['common']['ip']
+ server['address'] = "%s:%s" % (server_ip, port)
+ server['opts'] = 'check'
+ servers.append(server)
+ return servers
+
+
+def oo_filter_list(data, filter_attr=None):
+ """ This returns a list, which contains all items where filter_attr
+ evaluates to true
+ Ex: data = [ { a: 1, b: True },
+ { a: 3, b: False },
+ { a: 5, b: True } ]
+ filter_attr = 'b'
+ returns [ { a: 1, b: True },
+ { a: 5, b: True } ]
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a list")
+
+ if not isinstance(filter_attr, string_types):
+ raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
+
+ # Gather up the values for the list of keys passed in
+ return [x for x in data if filter_attr in x and x[filter_attr]]
+
+
+def oo_nodes_with_label(nodes, label, value=None):
+ """ Filters a list of nodes by label and value (if provided)
+
+ It handles labels that are in the following variables by priority:
+ openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
+
+ Examples:
+ data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
+ 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
+ 'c': {'openshift_node_labels': {'size': 'S'}}]
+ label = 'color'
+ returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
+ 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
+
+ data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
+ 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
+ 'c': {'openshift_node_labels': {'size': 'S'}}]
+ label = 'color'
+ value = 'green'
+ returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
+
+ Args:
+ nodes (list[dict]): list of node to node variables
+ label (str): label to filter `nodes` by
+ value (Optional[str]): value of `label` to filter by Defaults
+ to None.
+
+ Returns:
+ list[dict]: nodes filtered by label and value (if provided)
+ """
+ if not isinstance(nodes, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if not isinstance(label, string_types):
+ raise errors.AnsibleFilterError("failed expects label to be a string")
+ if value is not None and not isinstance(value, string_types):
+ raise errors.AnsibleFilterError("failed expects value to be a string")
+
+ def label_filter(node):
+ """ filter function for testing if node should be returned """
+ if not isinstance(node, dict):
+ raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
+ if 'openshift_node_labels' in node:
+ labels = node['openshift_node_labels']
+ elif 'cli_openshift_node_labels' in node:
+ labels = node['cli_openshift_node_labels']
+ elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
+ labels = node['openshift']['node']['labels']
+ else:
+ return False
+
+ if isinstance(labels, string_types):
+ labels = yaml.safe_load(labels)
+ if not isinstance(labels, dict):
+ raise errors.AnsibleFilterError(
+ "failed expected node labels to be a dict or serializable to a dict"
+ )
+ return label in labels and (value is None or labels[label] == value)
+
+ return [n for n in nodes if label_filter(n)]
+
+
+def oo_parse_heat_stack_outputs(data):
+ """ Formats the HEAT stack output into a usable form
+
+ The goal is to transform something like this:
+
+ +---------------+-------------------------------------------------+
+ | Property | Value |
+ +---------------+-------------------------------------------------+
+ | capabilities | [] | |
+ | creation_time | 2015-06-26T12:26:26Z | |
+ | description | OpenShift cluster | |
+ | … | … |
+ | outputs | [ |
+ | | { |
+ | | "output_value": "value_A" |
+ | | "description": "This is the value of Key_A" |
+ | | "output_key": "Key_A" |
+ | | }, |
+ | | { |
+ | | "output_value": [ |
+ | | "value_B1", |
+ | | "value_B2" |
+ | | ], |
+ | | "description": "This is the value of Key_B" |
+ | | "output_key": "Key_B" |
+ | | }, |
+ | | ] |
+ | parameters | { |
+ | … | … |
+ +---------------+-------------------------------------------------+
+
+ into something like this:
+
+ {
+ "Key_A": "value_A",
+ "Key_B": [
+ "value_B1",
+ "value_B2"
+ ]
+ }
+ """
+
+ # Extract the “outputs” JSON snippet from the pretty-printed array
+ in_outputs = False
+ outputs = ''
+
+ line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
+ for line in data['stdout_lines']:
+ match = line_regex.match(line)
+ if match:
+ if match.group(1) == 'outputs':
+ in_outputs = True
+ elif match.group(1) != '':
+ in_outputs = False
+ if in_outputs:
+ outputs += match.group(2)
+
+ outputs = json.loads(outputs)
+
+ # Revamp the “outputs” to put it in the form of a “Key: value” map
+ revamped_outputs = {}
+ for output in outputs:
+ revamped_outputs[output['output_key']] = output['output_value']
+
+ return revamped_outputs
+
+
+# pylint: disable=too-many-branches
+def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
+ """ Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/root/custom1.crt",
+ "keyfile": "/root/custom1.key",
+ "cafile": "/root/custom-ca1.crt" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key",
+ "cafile": "custom-ca2.crt" }]
+
+ returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom1.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom2.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
+ "names": [ "some-hostname.com" ] }]
+ """
+ if not isinstance(named_certs_dir, string_types):
+ raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
+
+ if not isinstance(internal_hostnames, list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ if not HAS_OPENSSL:
+ raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
- # Extract the “outputs” JSON snippet from the pretty-printed array
- in_outputs = False
- outputs = ''
-
- line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
- for line in data['stdout_lines']:
- match = line_regex.match(line)
- if match:
- if match.group(1) == 'outputs':
- in_outputs = True
- elif match.group(1) != '':
- in_outputs = False
- if in_outputs:
- outputs += match.group(2)
-
- outputs = json.loads(outputs)
-
- # Revamp the “outputs” to put it in the form of a “Key: value” map
- revamped_outputs = {}
- for output in outputs:
- revamped_outputs[output['output_key']] = output['output_value']
-
- return revamped_outputs
-
- @staticmethod
- # pylint: disable=too-many-branches
- def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
- """ Parses names from list of certificate hashes.
-
- Ex: certificates = [{ "certfile": "/root/custom1.crt",
- "keyfile": "/root/custom1.key",
- "cafile": "/root/custom-ca1.crt" },
- { "certfile": "custom2.crt",
- "keyfile": "custom2.key",
- "cafile": "custom-ca2.crt" }]
-
- returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
- "keyfile": "/etc/origin/master/named_certificates/custom1.key",
- "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
- "names": [ "public-master-host.com",
- "other-master-host.com" ] },
- { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
- "keyfile": "/etc/origin/master/named_certificates/custom2.key",
- "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
- "names": [ "some-hostname.com" ] }]
- """
- if not isinstance(named_certs_dir, string_types):
- raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
-
- if not isinstance(internal_hostnames, list):
- raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
-
- if not HAS_OPENSSL:
- raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
-
- for certificate in certificates:
- if 'names' in certificate.keys():
- continue
- else:
- certificate['names'] = []
-
- if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
- raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
- (certificate['certfile'], certificate['keyfile']))
-
- try:
- st_cert = open(certificate['certfile'], 'rt').read()
- cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
- certificate['names'].append(str(cert.get_subject().commonName.decode()))
- for i in range(cert.get_extension_count()):
- if cert.get_extension(i).get_short_name() == 'subjectAltName':
- for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
- certificate['names'].append(name)
- except Exception:
- raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
- "please specify certificate names in host inventory"))
-
- certificate['names'] = list(set(certificate['names']))
- if 'cafile' not in certificate:
- certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
- if not certificate['names']:
- raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
- "detected a collision with internal hostname, please specify " +
- "certificate names in host inventory"))
-
- for certificate in certificates:
- # Update paths for configuration
- certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
- certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
- if 'cafile' in certificate:
- certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
- return certificates
-
- @staticmethod
- def oo_pretty_print_cluster(data, prefix='tag_'):
- """ Read a subset of hostvars and build a summary of the cluster
- in the following layout:
+ if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except Exception:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = list(set(certificate['names']))
+ if 'cafile' not in certificate:
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+
+ for certificate in certificates:
+ # Update paths for configuration
+ certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
+ certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
+ if 'cafile' in certificate:
+ certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
+ return certificates
+
+
+def oo_pretty_print_cluster(data, prefix='tag_'):
+ """ Read a subset of hostvars and build a summary of the cluster
+ in the following layout:
"c_id": {
- "master": {
- "default": [
- { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
- ]
- "node": {
- "infra": [
- { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
- ],
- "compute": [
- { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
- ...
- ]
- }
+"master": {
+"default": [
+ { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
+]
+"node": {
+"infra": [
+ { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
+],
+"compute": [
+ { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
+...
+]
+}
+ """
+
+ def _get_tag_value(tags, key):
+ """ Extract values of a map implemented as a set.
+ Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
+ key = 'bar'
+ returns 'value2'
"""
-
- def _get_tag_value(tags, key):
- """ Extract values of a map implemented as a set.
- Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
- key = 'bar'
- returns 'value2'
- """
- for tag in tags:
- if tag[:len(prefix) + len(key)] == prefix + key:
- return tag[len(prefix) + len(key) + 1:]
- raise KeyError(key)
-
- def _add_host(clusters,
- clusterid,
- host_type,
- sub_host_type,
- host):
- """ Add a new host in the clusters data structure """
- if clusterid not in clusters:
- clusters[clusterid] = {}
- if host_type not in clusters[clusterid]:
- clusters[clusterid][host_type] = {}
- if sub_host_type not in clusters[clusterid][host_type]:
- clusters[clusterid][host_type][sub_host_type] = []
- clusters[clusterid][host_type][sub_host_type].append(host)
-
- clusters = {}
- for host in data:
- try:
- _add_host(clusters=clusters,
- clusterid=_get_tag_value(host['group_names'], 'clusterid'),
- host_type=_get_tag_value(host['group_names'], 'host-type'),
- sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
- host={'name': host['inventory_hostname'],
- 'public IP': host['oo_public_ipv4'],
- 'private IP': host['oo_private_ipv4']})
- except KeyError:
- pass
- return clusters
-
- @staticmethod
- def oo_generate_secret(num_bytes):
- """ generate a session secret """
-
- if not isinstance(num_bytes, int):
- raise errors.AnsibleFilterError("|failed expects num_bytes is int")
-
- secret = os.urandom(num_bytes)
- return secret.encode('base-64').strip()
-
- @staticmethod
- def to_padded_yaml(data, level=0, indent=2, **kw):
- """ returns a yaml snippet padded to match the indent level you specify """
- if data in [None, ""]:
- return ""
-
+ for tag in tags:
+ if tag[:len(prefix) + len(key)] == prefix + key:
+ return tag[len(prefix) + len(key) + 1:]
+ raise KeyError(key)
+
+ def _add_host(clusters,
+ clusterid,
+ host_type,
+ sub_host_type,
+ host):
+ """ Add a new host in the clusters data structure """
+ if clusterid not in clusters:
+ clusters[clusterid] = {}
+ if host_type not in clusters[clusterid]:
+ clusters[clusterid][host_type] = {}
+ if sub_host_type not in clusters[clusterid][host_type]:
+ clusters[clusterid][host_type][sub_host_type] = []
+ clusters[clusterid][host_type][sub_host_type].append(host)
+
+ clusters = {}
+ for host in data:
try:
- transformed = yaml.dump(data, indent=indent, allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper, **kw)
- padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
- return to_text("\n{0}".format(padded))
- except Exception as my_e:
- raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
-
- @staticmethod
- def oo_openshift_env(hostvars):
- ''' Return facts which begin with "openshift_" and translate
- legacy facts to their openshift_env counterparts.
-
- Ex: hostvars = {'openshift_fact': 42,
- 'theyre_taking_the_hobbits_to': 'isengard'}
- returns = {'openshift_fact': 42}
- '''
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- facts = {}
- regex = re.compile('^openshift_.*')
- for key in hostvars:
- if regex.match(key):
- facts[key] = hostvars[key]
-
- migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
- 'openshift_registry_selector': 'openshift_hosted_registry_selector'}
- for old_fact, new_fact in migrations.items():
- if old_fact in facts and new_fact not in facts:
- facts[new_fact] = facts[old_fact]
- return facts
-
- @staticmethod
- # pylint: disable=too-many-branches, too-many-nested-blocks
- def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
- """ Generate list of persistent volumes based on oo_openshift_env
- storage options set in host variables.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- if not issubclass(type(groups), dict):
- raise errors.AnsibleFilterError("|failed expects groups is a dict")
- if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
- raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
-
- if persistent_volumes is None:
- persistent_volumes = []
- if 'hosted' in hostvars['openshift']:
- for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
- else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
- persistent_volumes.append(persistent_volume)
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
- return persistent_volumes
-
- @staticmethod
- def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
- """ Generate list of persistent volume claims based on oo_openshift_env
- storage options set in host variables.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
- raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
-
- if persistent_volume_claims is None:
- persistent_volume_claims = []
- if 'hosted' in hostvars['openshift']:
- for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
+ _add_host(clusters=clusters,
+ clusterid=_get_tag_value(host['group_names'], 'clusterid'),
+ host_type=_get_tag_value(host['group_names'], 'host-type'),
+ sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
+ host={'name': host['inventory_hostname'],
+ 'public IP': host['oo_public_ipv4'],
+ 'private IP': host['oo_private_ipv4']})
+ except KeyError:
+ pass
+ return clusters
+
+
+def oo_generate_secret(num_bytes):
+ """ generate a session secret """
+
+ if not isinstance(num_bytes, int):
+ raise errors.AnsibleFilterError("|failed expects num_bytes is int")
+
+ secret = os.urandom(num_bytes)
+ return secret.encode('base-64').strip()
+
+
+def to_padded_yaml(data, level=0, indent=2, **kw):
+ """ returns a yaml snippet padded to match the indent level you specify """
+ if data in [None, ""]:
+ return ""
+
+ try:
+ transformed = yaml.dump(data, indent=indent, allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper, **kw)
+ padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
+ return to_text("\n{0}".format(padded))
+ except Exception as my_e:
+ raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
+
+
+def oo_openshift_env(hostvars):
+ ''' Return facts which begin with "openshift_" and translate
+ legacy facts to their openshift_env counterparts.
+
+ Ex: hostvars = {'openshift_fact': 42,
+ 'theyre_taking_the_hobbits_to': 'isengard'}
+ returns = {'openshift_fact': 42}
+ '''
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+
+ facts = {}
+ regex = re.compile('^openshift_.*')
+ for key in hostvars:
+ if regex.match(key):
+ facts[key] = hostvars[key]
+
+ migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
+ 'openshift_registry_selector': 'openshift_hosted_registry_selector'}
+ for old_fact, new_fact in migrations.items():
+ if old_fact in facts and new_fact not in facts:
+ facts[new_fact] = facts[old_fact]
+ return facts
+
+
+# pylint: disable=too-many-branches, too-many-nested-blocks
+def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
+ """ Generate list of persistent volumes based on oo_openshift_env
+ storage options set in host variables.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+ if not issubclass(type(groups), dict):
+ raise errors.AnsibleFilterError("|failed expects groups is a dict")
+ if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
+ raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
+
+ if persistent_volumes is None:
+ persistent_volumes = []
+ if 'hosted' in hostvars['openshift']:
+ for component in hostvars['openshift']['hosted']:
+ if 'storage' in hostvars['openshift']['hosted'][component]:
+ params = hostvars['openshift']['hosted'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
volume = params['volume']['name']
+ path = directory + '/' + volume
size = params['volume']['size']
access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
capacity=size,
- access_modes=access_modes)
- persistent_volume_claims.append(persistent_volume_claim)
- return persistent_volume_claims
-
- @staticmethod
- def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
- """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
- names with proper version (if provided)
-
- If 3.1 rpms are passed in they will only be augmented with the
- correct version. This is important for hosts that are running both
- Masters and Nodes.
- """
- if not isinstance(rpms, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if openshift_version is not None and not isinstance(openshift_version, string_types):
- raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
-
- rpms_31 = []
- for rpm in rpms:
- if 'atomic' not in rpm:
- rpm = rpm.replace("openshift", "atomic-openshift")
- if openshift_version:
- rpm = rpm + openshift_version
- rpms_31.append(rpm)
-
- return rpms_31
-
- @staticmethod
- def oo_pods_match_component(pods, deployment_type, component):
- """ Filters a list of Pods and returns the ones matching the deployment_type and component
- """
- if not isinstance(pods, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(deployment_type, string_types):
- raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
- if not isinstance(component, string_types):
- raise errors.AnsibleFilterError("failed expects component to be a string")
-
- image_prefix = 'openshift/origin-'
- if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
- image_prefix = 'openshift3/ose-'
- elif deployment_type == 'atomic-enterprise':
- image_prefix = 'aep3_beta/aep-'
-
- matching_pods = []
- image_regex = image_prefix + component + r'.*'
- for pod in pods:
- for container in pod['spec']['containers']:
- if re.search(image_regex, container['image']):
- matching_pods.append(pod)
- break # stop here, don't add a pod more than once
-
- return matching_pods
-
- @staticmethod
- def oo_get_hosts_from_hostvars(hostvars, hosts):
- """ Return a list of hosts from hostvars """
- retval = []
- for host in hosts:
- try:
- retval.append(hostvars[host])
- except errors.AnsibleError:
- # host does not exist
- pass
-
- return retval
-
- @staticmethod
- def oo_image_tag_to_rpm_version(version, include_dash=False):
- """ Convert an image tag string to an RPM version if necessary
- Empty strings and strings that are already in rpm version format
- are ignored. Also remove non semantic version components.
-
- Ex. v3.2.0.10 -> -3.2.0.10
- v1.2.0-rc1 -> -1.2.0
- """
- if not isinstance(version, string_types):
- raise errors.AnsibleFilterError("|failed expects a string or unicode")
- if version.startswith("v"):
- version = version[1:]
- # Strip release from requested version, we no longer support this.
- version = version.split('-')[0]
-
- if include_dash and version and not version.startswith("-"):
- version = "-" + version
-
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+ persistent_volumes.append(persistent_volume)
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+ persistent_volumes.append(persistent_volume)
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
+ return persistent_volumes
+
+
+def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
+ """ Generate list of persistent volume claims based on oo_openshift_env
+ storage options set in host variables.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+ if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
+ raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
+
+ if persistent_volume_claims is None:
+ persistent_volume_claims = []
+ if 'hosted' in hostvars['openshift']:
+ for component in hostvars['openshift']['hosted']:
+ if 'storage' in hostvars['openshift']['hosted'][component]:
+ params = hostvars['openshift']['hosted'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ persistent_volume_claims.append(persistent_volume_claim)
+ return persistent_volume_claims
+
+
+def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
+ """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
+ names with proper version (if provided)
+
+ If 3.1 rpms are passed in they will only be augmented with the
+ correct version. This is important for hosts that are running both
+ Masters and Nodes.
+ """
+ if not isinstance(rpms, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if openshift_version is not None and not isinstance(openshift_version, string_types):
+ raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
+
+ rpms_31 = []
+ for rpm in rpms:
+ if 'atomic' not in rpm:
+ rpm = rpm.replace("openshift", "atomic-openshift")
+ if openshift_version:
+ rpm = rpm + openshift_version
+ rpms_31.append(rpm)
+
+ return rpms_31
+
+
+def oo_pods_match_component(pods, deployment_type, component):
+ """ Filters a list of Pods and returns the ones matching the deployment_type and component
+ """
+ if not isinstance(pods, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if not isinstance(deployment_type, string_types):
+ raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
+ if not isinstance(component, string_types):
+ raise errors.AnsibleFilterError("failed expects component to be a string")
+
+ image_prefix = 'openshift/origin-'
+ if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ image_prefix = 'openshift3/ose-'
+ elif deployment_type == 'atomic-enterprise':
+ image_prefix = 'aep3_beta/aep-'
+
+ matching_pods = []
+ image_regex = image_prefix + component + r'.*'
+ for pod in pods:
+ for container in pod['spec']['containers']:
+ if re.search(image_regex, container['image']):
+ matching_pods.append(pod)
+ break # stop here, don't add a pod more than once
+
+ return matching_pods
+
+
+def oo_get_hosts_from_hostvars(hostvars, hosts):
+ """ Return a list of hosts from hostvars """
+ retval = []
+ for host in hosts:
+ try:
+ retval.append(hostvars[host])
+ except errors.AnsibleError:
+ # host does not exist
+ pass
+
+ return retval
+
+
+def oo_image_tag_to_rpm_version(version, include_dash=False):
+ """ Convert an image tag string to an RPM version if necessary
+ Empty strings and strings that are already in rpm version format
+ are ignored. Also remove non semantic version components.
+
+ Ex. v3.2.0.10 -> -3.2.0.10
+ v1.2.0-rc1 -> -1.2.0
+ """
+ if not isinstance(version, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ if version.startswith("v"):
+ version = version[1:]
+ # Strip release from requested version, we no longer support this.
+ version = version.split('-')[0]
+
+ if include_dash and version and not version.startswith("-"):
+ version = "-" + version
+
+ return version
+
+
+def oo_hostname_from_url(url):
+ """ Returns the hostname contained in a URL
+
+ Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
+ """
+ if not isinstance(url, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ parse_result = urlparse(url)
+ if parse_result.netloc != '':
+ return parse_result.netloc
+ else:
+ # netloc wasn't parsed, assume url was missing scheme and path
+ return parse_result.path
+
+
+# pylint: disable=invalid-name, unused-argument
+def oo_openshift_loadbalancer_frontends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_frontends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(api_port)],
+ 'default_backend': 'atomic-openshift-api'}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ loadbalancer_frontends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(nuage_rest_port)],
+ 'default_backend': 'nuage-monitor'})
+ return loadbalancer_frontends
+
+
+# pylint: disable=invalid-name
+def oo_openshift_loadbalancer_backends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_backends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': oo_haproxy_backend_masters(servers_hostvars, api_port)}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ # pylint: disable=line-too-long
+ loadbalancer_backends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
+ return loadbalancer_backends
+
+
+def oo_chomp_commit_offset(version):
+ """Chomp any "+git.foo" commit offset string from the given `version`
+ and return the modified version string.
+
+Ex:
+- chomp_commit_offset(None) => None
+- chomp_commit_offset(1337) => "1337"
+- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
+- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
+- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
+ """
+ if version is None:
return version
+ else:
+ # Stringify, just in case it's a Number type. Split by '+' and
+ # return the first split. No concerns about strings without a
+ # '+', .split() returns an array of the original string.
+ return str(version).split('+')[0]
- @staticmethod
- def oo_hostname_from_url(url):
- """ Returns the hostname contained in a URL
- Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
- """
- if not isinstance(url, string_types):
- raise errors.AnsibleFilterError("|failed expects a string or unicode")
- parse_result = urlparse(url)
- if parse_result.netloc != '':
- return parse_result.netloc
- else:
- # netloc wasn't parsed, assume url was missing scheme and path
- return parse_result.path
-
- # pylint: disable=invalid-name, missing-docstring, unused-argument
- @staticmethod
- def oo_openshift_loadbalancer_frontends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
- loadbalancer_frontends = [{'name': 'atomic-openshift-api',
- 'mode': 'tcp',
- 'options': ['tcplog'],
- 'binds': ["*:{0}".format(api_port)],
- 'default_backend': 'atomic-openshift-api'}]
- if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
- loadbalancer_frontends.append({'name': 'nuage-monitor',
- 'mode': 'tcp',
- 'options': ['tcplog'],
- 'binds': ["*:{0}".format(nuage_rest_port)],
- 'default_backend': 'nuage-monitor'})
- return loadbalancer_frontends
-
- # pylint: disable=invalid-name, missing-docstring
- @staticmethod
- def oo_openshift_loadbalancer_backends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
- loadbalancer_backends = [{'name': 'atomic-openshift-api',
- 'mode': 'tcp',
- 'option': 'tcplog',
- 'balance': 'source',
- 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, api_port)}]
- if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
- # pylint: disable=line-too-long
- loadbalancer_backends.append({'name': 'nuage-monitor',
- 'mode': 'tcp',
- 'option': 'tcplog',
- 'balance': 'source',
- 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
- return loadbalancer_backends
-
- @staticmethod
- def oo_chomp_commit_offset(version):
- """Chomp any "+git.foo" commit offset string from the given `version`
- and return the modified version string.
-
- Ex:
- - chomp_commit_offset(None) => None
- - chomp_commit_offset(1337) => "1337"
- - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
- - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
- - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
- """
- if version is None:
- return version
- else:
- # Stringify, just in case it's a Number type. Split by '+' and
- # return the first split. No concerns about strings without a
- # '+', .split() returns an array of the original string.
- return str(version).split('+')[0]
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+ # pylint: disable=no-self-use, too-few-public-methods
def filters(self):
""" returns a mapping of filters to methods """
return {
- "oo_select_keys": self.oo_select_keys,
- "oo_select_keys_from_list": self.oo_select_keys_from_list,
- "oo_chomp_commit_offset": self.oo_chomp_commit_offset,
- "oo_collect": self.oo_collect,
- "oo_flatten": self.oo_flatten,
- "oo_pdb": self.oo_pdb,
- "oo_prepend_strings_in_list": self.oo_prepend_strings_in_list,
- "oo_ami_selector": self.oo_ami_selector,
- "oo_ec2_volume_definition": self.oo_ec2_volume_definition,
- "oo_combine_key_value": self.oo_combine_key_value,
- "oo_combine_dict": self.oo_combine_dict,
- "oo_split": self.oo_split,
- "oo_filter_list": self.oo_filter_list,
- "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
- "oo_parse_named_certificates": self.oo_parse_named_certificates,
- "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
- "oo_pretty_print_cluster": self.oo_pretty_print_cluster,
- "oo_generate_secret": self.oo_generate_secret,
- "to_padded_yaml": self.to_padded_yaml,
- "oo_nodes_with_label": self.oo_nodes_with_label,
- "oo_openshift_env": self.oo_openshift_env,
- "oo_persistent_volumes": self.oo_persistent_volumes,
- "oo_persistent_volume_claims": self.oo_persistent_volume_claims,
- "oo_31_rpm_rename_conversion": self.oo_31_rpm_rename_conversion,
- "oo_pods_match_component": self.oo_pods_match_component,
- "oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars,
- "oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version,
- "oo_merge_dicts": self.oo_merge_dicts,
- "oo_hostname_from_url": self.oo_hostname_from_url,
- "oo_merge_hostvars": self.oo_merge_hostvars,
- "oo_openshift_loadbalancer_frontends": self.oo_openshift_loadbalancer_frontends,
- "oo_openshift_loadbalancer_backends": self.oo_openshift_loadbalancer_backends
+ "oo_select_keys": oo_select_keys,
+ "oo_select_keys_from_list": oo_select_keys_from_list,
+ "oo_chomp_commit_offset": oo_chomp_commit_offset,
+ "oo_collect": oo_collect,
+ "oo_flatten": oo_flatten,
+ "oo_pdb": oo_pdb,
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+ "oo_ami_selector": oo_ami_selector,
+ "oo_ec2_volume_definition": oo_ec2_volume_definition,
+ "oo_combine_key_value": oo_combine_key_value,
+ "oo_combine_dict": oo_combine_dict,
+ "oo_split": oo_split,
+ "oo_filter_list": oo_filter_list,
+ "oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
+ "oo_parse_named_certificates": oo_parse_named_certificates,
+ "oo_haproxy_backend_masters": oo_haproxy_backend_masters,
+ "oo_pretty_print_cluster": oo_pretty_print_cluster,
+ "oo_generate_secret": oo_generate_secret,
+ "oo_nodes_with_label": oo_nodes_with_label,
+ "oo_openshift_env": oo_openshift_env,
+ "oo_persistent_volumes": oo_persistent_volumes,
+ "oo_persistent_volume_claims": oo_persistent_volume_claims,
+ "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
+ "oo_pods_match_component": oo_pods_match_component,
+ "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
+ "oo_image_tag_to_rpm_version": oo_image_tag_to_rpm_version,
+ "oo_merge_dicts": oo_merge_dicts,
+ "oo_hostname_from_url": oo_hostname_from_url,
+ "oo_merge_hostvars": oo_merge_hostvars,
+ "oo_openshift_loadbalancer_frontends": oo_openshift_loadbalancer_frontends,
+ "oo_openshift_loadbalancer_backends": oo_openshift_loadbalancer_backends,
+ "to_padded_yaml": to_padded_yaml,
}
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index ec09b09f6..437f4c400 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -161,7 +161,7 @@ class LDAPPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['attributes'], ['url'], ['insecure']]
self._optional += [['ca'],
@@ -176,7 +176,6 @@ class LDAPPasswordIdentityProvider(IdentityProviderBase):
def validate(self):
''' validate this idp instance '''
- IdentityProviderBase.validate(self)
if not isinstance(self.provider['attributes'], dict):
raise errors.AnsibleFilterError("|failed attributes for provider "
"{0} must be a dictionary".format(self.__class__.__name__))
@@ -206,7 +205,7 @@ class KeystonePasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url'], ['domainName', 'domain_name']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
@@ -225,7 +224,7 @@ class RequestHeaderIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['headers']]
self._optional += [['challengeURL', 'challenge_url'],
@@ -238,7 +237,6 @@ class RequestHeaderIdentityProvider(IdentityProviderBase):
def validate(self):
''' validate this idp instance '''
- IdentityProviderBase.validate(self)
if not isinstance(self.provider['headers'], list):
raise errors.AnsibleFilterError("|failed headers for provider {0} "
"must be a list".format(self.__class__.__name__))
@@ -257,7 +255,7 @@ class AllowAllPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
@@ -274,7 +272,7 @@ class DenyAllPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
@@ -291,7 +289,7 @@ class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['file', 'filename', 'fileName', 'file_name']]
@@ -316,7 +314,7 @@ class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
@@ -335,13 +333,12 @@ class IdentityProviderOauthBase(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
def validate(self):
''' validate this idp instance '''
- IdentityProviderBase.validate(self)
if self.challenge:
raise errors.AnsibleFilterError("|failed provider {0} does not "
"allow challenge authentication".format(self.__class__.__name__))
diff --git a/git/parent.py b/git/parent.py
deleted file mode 100755
index 92f57df3e..000000000
--- a/git/parent.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-# flake8: noqa
-# pylint: skip-file
-'''
- Script to determine if this commit has also
- been merged through the stage branch
-'''
-#
-# Usage:
-# parent_check.py <branch> <commit_id>
-#
-#
-import sys
-import subprocess
-
-def run_cli_cmd(cmd, in_stdout=None, in_stderr=None):
- '''Run a command and return its output'''
- if not in_stderr:
- proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
- else:
- proc = subprocess.check_output(cmd, bufsize=-1, stdout=in_stdout, stderr=in_stderr, shell=False)
- stdout, stderr = proc.communicate()
- if proc.returncode != 0:
- return {"rc": proc.returncode, "error": stderr}
- else:
- return {"rc": proc.returncode, "result": stdout}
-
-def main():
- '''Check to ensure that the commit that is currently
- being submitted is also in the stage branch.
-
- if it is, succeed
- else, fail
- '''
- branch = 'prod'
-
- if sys.argv[1] != branch:
- sys.exit(0)
-
- # git co stg
- results = run_cli_cmd(['/usr/bin/git', 'checkout', 'stg'])
-
- # git pull latest
- results = run_cli_cmd(['/usr/bin/git', 'pull'])
-
- # setup on the <prod> branch in git
- results = run_cli_cmd(['/usr/bin/git', 'checkout', 'prod'])
-
- results = run_cli_cmd(['/usr/bin/git', 'pull'])
- # merge the passed in commit into my current <branch>
-
- commit_id = sys.argv[2]
- results = run_cli_cmd(['/usr/bin/git', 'merge', commit_id])
-
- # get the differences from stg and <branch>
- results = run_cli_cmd(['/usr/bin/git', 'rev-list', '--left-right', 'stg...prod'])
-
- # exit here with error code if the result coming back is an error
- if results['rc'] != 0:
- print results['error']
- sys.exit(results['rc'])
-
- count = 0
- # Each 'result' is a commit
- # Walk through each commit and see if it is in stg
- for commit in results['result'].split('\n'):
-
- # continue if it is already in stg
- if not commit or commit.startswith('<'):
- continue
-
- # remove the first char '>'
- commit = commit[1:]
-
- # check if any remote branches contain $commit
- results = run_cli_cmd(['/usr/bin/git', 'branch', '-q', '-r', '--contains', commit], in_stderr=None)
-
- # if this comes back empty, nothing contains it, we can skip it as
- # we have probably created the merge commit here locally
- if results['rc'] == 0 and len(results['result']) == 0:
- continue
-
- # The results generally contain origin/pr/246/merge and origin/pr/246/head
- # this is the pull request which would contain the commit in question.
- #
- # If the results do not contain origin/stg then stage does not contain
- # the commit in question. Therefore we need to alert!
- if 'origin/stg' not in results['result']:
- print "\nFAILED: (These commits are not in stage.)\n"
- print "\t%s" % commit
- count += 1
-
- # Exit with count of commits in #{branch} but not stg
- sys.exit(count)
-
-if __name__ == '__main__':
- main()
diff --git a/git/pylint.sh b/git/pylint.sh
deleted file mode 100755
index 3acf9cc8c..000000000
--- a/git/pylint.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env bash
-set -eu
-
-ANSIBLE_UPSTREAM_FILES=(
- 'inventory/aws/hosts/ec2.py'
- 'inventory/gce/hosts/gce.py'
- 'inventory/libvirt/hosts/libvirt_generic.py'
- 'inventory/openstack/hosts/nova.py'
- 'lookup_plugins/sequence.py'
- 'playbooks/gce/openshift-cluster/library/gce.py'
- )
-
-OLDREV=$1
-NEWREV=$2
-#TRG_BRANCH=$3
-
-PYTHON=$(which python)
-
-set +e
-PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$")
-set -e
-
-FILES_TO_TEST=""
-
-for PY_FILE in $PY_DIFF; do
- IGNORE_FILE=false
- for UPSTREAM_FILE in "${ANSIBLE_UPSTREAM_FILES[@]}"; do
- if [ "${PY_FILE}" == "${UPSTREAM_FILE}" ]; then
- IGNORE_FILE=true
- break
- fi
- done
-
- if [ "${IGNORE_FILE}" == true ]; then
- echo "Skipping file ${PY_FILE} as an upstream Ansible file..."
- continue
- fi
-
- if [ -e "${PY_FILE}" ]; then
- FILES_TO_TEST="${FILES_TO_TEST} ${PY_FILE}"
- fi
-done
-
-export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/
-
-if [ "${FILES_TO_TEST}" != "" ]; then
- echo "Testing files: ${FILES_TO_TEST}"
- exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
-else
- exit 0
-fi
diff --git a/git/yaml_validation.py b/git/yaml_validation.py
deleted file mode 100755
index 6672876bb..000000000
--- a/git/yaml_validation.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-# flake8: noqa
-#
-# python yaml validator for a git commit
-#
-'''
-python yaml validator for a git commit
-'''
-import shutil
-import sys
-import os
-import tempfile
-import subprocess
-import yaml
-
-def get_changes(oldrev, newrev, tempdir):
- '''Get a list of git changes from oldrev to newrev'''
- proc = subprocess.Popen(['/usr/bin/git', 'diff', '--name-only', oldrev,
- newrev, '--diff-filter=ACM'], stdout=subprocess.PIPE)
- stdout, _ = proc.communicate()
- files = stdout.split('\n')
-
- # No file changes
- if not files:
- return []
-
- cmd = '/usr/bin/git archive %s %s | /bin/tar x -C %s' % (newrev, " ".join(files), tempdir)
- proc = subprocess.Popen(cmd, shell=True)
- _, _ = proc.communicate()
-
- rfiles = []
- for dirpath, _, fnames in os.walk(tempdir):
- for fname in fnames:
- rfiles.append(os.path.join(dirpath, fname))
-
- return rfiles
-
-def main():
- '''
- Perform yaml validation
- '''
- results = []
- try:
- tmpdir = tempfile.mkdtemp(prefix='jenkins-git-')
- old, new, _ = sys.argv[1:]
-
- for file_mod in get_changes(old, new, tmpdir):
-
- print "+++++++ Received: %s" % file_mod
-
- # if the file extensions is not yml or yaml, move along.
- if not file_mod.endswith('.yml') and not file_mod.endswith('.yaml'):
- continue
-
- # We use symlinks in our repositories, ignore them.
- if os.path.islink(file_mod):
- continue
-
- try:
- yaml.load(open(file_mod))
- results.append(True)
-
- except yaml.scanner.ScannerError as yerr:
- print yerr
- results.append(False)
- finally:
- shutil.rmtree(tmpdir)
-
- if not all(results):
- sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/inventory/README.md b/inventory/README.md
index b8edfcbb0..b61bfff18 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -5,5 +5,5 @@ You can install OpenShift on:
* [Amazon Web Services](aws/hosts/)
* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
* [GCE](gce/) (Google Compute Engine)
-* [libvirt](libviert/hosts/)
+* [libvirt](libvirt/hosts/)
* [OpenStack](openstack/hosts/)
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
index 5ee51c84f..64c097d47 100644
--- a/inventory/aws/hosts/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
@@ -29,17 +29,32 @@ regions_exclude = us-gov-west-1,cn-north-1
# in the event of a collision.
destination_variable = public_dns_name
+# This allows you to override the inventory_name with an ec2 variable, instead
+# of using the destination_variable above. Addressing (aka ansible_ssh_host)
+# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
+hostname_variable = tag_Name
+
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
-# WARNING: - instances that are in the private vpc, _without_ public ip address
+# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory until You set:
-# vpc_destination_variable = 'private_ip_address'
+# vpc_destination_variable = private_ip_address
vpc_destination_variable = ip_address
+# The following two settings allow flexible ansible host naming based on a
+# python format string and a comma-separated list of ec2 tags. Note that:
+#
+# 1) If the tags referenced are not present for some instances, empty strings
+# will be substituted in the format string.
+# 2) This overrides both destination_variable and vpc_destination_variable.
+#
+#destination_format = {0}.{1}.example.com
+#destination_format_tags = Name,environment
+
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
@@ -67,6 +82,9 @@ all_instances = False
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
+# Include RDS cluster information (Aurora etc.)
+include_rds_clusters = False
+
# By default, only ElastiCache clusters and nodes in the 'available' state
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
# to True return all ElastiCache clusters and nodes, regardless of state.
@@ -91,19 +109,16 @@ cache_path = ~/.ansible/tmp
# To disable the cache, set this value to 0
cache_max_age = 300
-# These two settings allow flexible ansible host naming based on a format
-# string and a comma-separated list of ec2 tags. The tags used must be
-# present for all instances, or the code will fail. This overrides both
-# destination_variable and vpc_destination_variable.
-# destination_format = {0}.{1}.rhcloud.com
-# destination_format_tags = Name,environment
-
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = False
+# If set to true, any tag of the form "a,b,c" is expanded into a list
+# and the results are used to create additional tag_* inventory groups.
+expand_csv_tags = False
+
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = True
@@ -147,9 +162,28 @@ group_by_elasticache_replication_group = True
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
-# (ex. webservers15, webservers1a, webservers123 etc)
+# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name
+
+
+[credentials]
+
+# The AWS credentials can optionally be specified here. Credentials specified
+# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
+# AWS_PROFILE is set, or if the boto_profile property above is set.
+#
+# Supplying AWS credentials here is not recommended, as it introduces
+# non-trivial security concerns. When going down this route, please make sure
+# to set access permissions for this file correctly, e.g. handle it the same
+# way as you would a private SSH key.
+#
+# Unlike the boto and AWS configure files, this section does not support
+# profiles.
+#
+# aws_access_key_id = AXXXXXXXXXXXXXX
+# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
+# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py
index 7dfcd7889..b71458a29 100755
--- a/inventory/aws/hosts/ec2.py
+++ b/inventory/aws/hosts/ec2.py
@@ -38,6 +38,7 @@ When run against a specific host, this script returns the following variables:
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
+ - ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
@@ -132,6 +133,15 @@ from boto import elasticache
from boto import route53
import six
+from ansible.module_utils import ec2 as ec2_utils
+
+HAS_BOTO3 = False
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ pass
+
from six.moves import configparser
from collections import defaultdict
@@ -142,6 +152,7 @@ except ImportError:
class Ec2Inventory(object):
+
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
@@ -158,6 +169,9 @@ class Ec2Inventory(object):
# Boto profile to use (if any)
self.boto_profile = None
+ # AWS credentials.
+ self.credentials = {}
+
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
@@ -225,7 +239,7 @@ class Ec2Inventory(object):
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
- self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
+ self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
@@ -237,6 +251,11 @@ class Ec2Inventory(object):
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
+ if config.has_option('ec2', 'hostname_variable'):
+ self.hostname_variable = config.get('ec2', 'hostname_variable')
+ else:
+ self.hostname_variable = None
+
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
@@ -257,6 +276,12 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
+ # Include RDS cluster instances?
+ if config.has_option('ec2', 'include_rds_clusters'):
+ self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
+ else:
+ self.include_rds_clusters = False
+
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
@@ -319,6 +344,29 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
+ # AWS credentials (prefer environment variables)
+ if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
+ os.environ.get('AWS_PROFILE')):
+ if config.has_option('credentials', 'aws_access_key_id'):
+ aws_access_key_id = config.get('credentials', 'aws_access_key_id')
+ else:
+ aws_access_key_id = None
+ if config.has_option('credentials', 'aws_secret_access_key'):
+ aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
+ else:
+ aws_secret_access_key = None
+ if config.has_option('credentials', 'aws_security_token'):
+ aws_security_token = config.get('credentials', 'aws_security_token')
+ else:
+ aws_security_token = None
+ if aws_access_key_id:
+ self.credentials = {
+ 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key
+ }
+ if aws_security_token:
+ self.credentials['security_token'] = aws_security_token
+
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
@@ -326,10 +374,22 @@ class Ec2Inventory(object):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
- self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
- self.cache_path_index = cache_dir + "/ansible-ec2.index"
+ cache_name = 'ansible-ec2'
+ aws_profile = lambda: (self.boto_profile or
+ os.environ.get('AWS_PROFILE') or
+ os.environ.get('AWS_ACCESS_KEY_ID') or
+ self.credentials.get('aws_access_key_id', None))
+ if aws_profile():
+ cache_name = '%s-%s' % (cache_name, aws_profile())
+ self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
+ self.cache_path_index = cache_dir + "/%s.index" % cache_name
self.cache_max_age = config.getint('ec2', 'cache_max_age')
+ if config.has_option('ec2', 'expand_csv_tags'):
+ self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
+ else:
+ self.expand_csv_tags = False
+
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
@@ -391,7 +451,10 @@ class Ec2Inventory(object):
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
- for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
+
+ filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
+
+ for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
@@ -410,7 +473,7 @@ class Ec2Inventory(object):
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
- parser.add_argument('--boto-profile', action='store',
+ parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
@@ -428,6 +491,8 @@ class Ec2Inventory(object):
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
+ if self.include_rds_clusters:
+ self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
@@ -435,7 +500,7 @@ class Ec2Inventory(object):
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host)
+ conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
@@ -449,7 +514,7 @@ class Ec2Inventory(object):
return connect_args
def connect_to_aws(self, module, region):
- connect_args = {}
+ connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
@@ -475,8 +540,25 @@ class Ec2Inventory(object):
else:
reservations = conn.get_all_instances()
+ # Pull the tags back in a second step
+ # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
+ # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
+ instance_ids = []
+ for reservation in reservations:
+ instance_ids.extend([instance.id for instance in reservation.instances])
+
+ max_filter_value = 199
+ tags = []
+ for i in range(0, len(instance_ids), max_filter_value):
+ tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
+
+ tags_by_instance_id = defaultdict(dict)
+ for tag in tags:
+ tags_by_instance_id[tag.res_id][tag.name] = tag.value
+
for reservation in reservations:
for instance in reservation.instances:
+ instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
@@ -494,9 +576,14 @@ class Ec2Inventory(object):
try:
conn = self.connect_to_aws(rds, region)
if conn:
- instances = conn.get_all_dbinstances()
- for instance in instances:
- self.add_rds_instance(instance, region)
+ marker = None
+ while True:
+ instances = conn.get_all_dbinstances(marker=marker)
+ marker = instances.marker
+ for instance in instances:
+ self.add_rds_instance(instance, region)
+ if not marker:
+ break
except boto.exception.BotoServerError as e:
error = e.reason
@@ -506,6 +593,65 @@ class Ec2Inventory(object):
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
+ def include_rds_clusters_by_region(self, region):
+ if not HAS_BOTO3:
+ self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
+ "getting RDS clusters")
+
+ client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
+
+ marker, clusters = '', []
+ while marker is not None:
+ resp = client.describe_db_clusters(Marker=marker)
+ clusters.extend(resp["DBClusters"])
+ marker = resp.get('Marker', None)
+
+ account_id = boto.connect_iam().get_user().arn.split(':')[4]
+ c_dict = {}
+ for c in clusters:
+ # remove these datetime objects as there is no serialisation to json
+ # currently in place and we don't need the data yet
+ if 'EarliestRestorableTime' in c:
+ del c['EarliestRestorableTime']
+ if 'LatestRestorableTime' in c:
+ del c['LatestRestorableTime']
+
+ if self.ec2_instance_filters == {}:
+ matches_filter = True
+ else:
+ matches_filter = False
+
+ try:
+ # arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
+ tags = client.list_tags_for_resource(
+ ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
+ c['Tags'] = tags['TagList']
+
+ if self.ec2_instance_filters:
+ for filter_key, filter_values in self.ec2_instance_filters.items():
+ # get AWS tag key e.g. tag:env will be 'env'
+ tag_name = filter_key.split(":", 1)[1]
+ # Filter values is a list (if you put multiple values for the same tag name)
+ matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
+
+ if matches_filter:
+ # it matches a filter, so stop looking for further matches
+ break
+
+ except Exception as e:
+ if e.message.find('DBInstanceNotFound') >= 0:
+ # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
+ # Ignore errors when trying to find tags for these
+ pass
+
+ # ignore empty clusters caused by AWS bug
+ if len(c['DBClusterMembers']) == 0:
+ continue
+ elif matches_filter:
+ c_dict[c['DBClusterIdentifier']] = c
+
+ self.inventory['db_clusters'] = c_dict
+
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
@@ -514,7 +660,7 @@ class Ec2Inventory(object):
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
- conn = elasticache.connect_to_region(region)
+ conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
@@ -531,7 +677,7 @@ class Ec2Inventory(object):
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
- # CacheNodes. Because of that wo can't make use of the get_list
+ # CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
@@ -550,7 +696,7 @@ class Ec2Inventory(object):
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
- conn = elasticache.connect_to_region(region)
+ conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
@@ -565,7 +711,7 @@ class Ec2Inventory(object):
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
- # Because of that wo can't make use of the get_list method in the
+ # Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
@@ -619,7 +765,7 @@ class Ec2Inventory(object):
# Select the best destination address
if self.destination_format and self.destination_format_tags:
- dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ])
+ dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
@@ -633,32 +779,46 @@ class Ec2Inventory(object):
# Skip instances we cannot address (e.g. private VPC subnet)
return
+ # Set the inventory name
+ hostname = None
+ if self.hostname_variable:
+ if self.hostname_variable.startswith('tag_'):
+ hostname = instance.tags.get(self.hostname_variable[4:], None)
+ else:
+ hostname = getattr(instance, self.hostname_variable)
+
+ # If we can't get a nice hostname, use the destination address
+ if not hostname:
+ hostname = dest
+ else:
+ hostname = self.to_safe(hostname).lower()
+
# if we only want to include hosts that match a pattern, skip those that don't
- if self.pattern_include and not self.pattern_include.match(dest):
+ if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
- if self.pattern_exclude and self.pattern_exclude.match(dest):
+ if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
- self.index[dest] = [region, instance.id]
+ self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
- self.inventory[instance.id] = [dest]
+ self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
- self.push(self.inventory, region, dest)
+ self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
- self.push(self.inventory, instance.placement, dest)
+ self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
@@ -667,28 +827,28 @@ class Ec2Inventory(object):
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
- self.push(self.inventory, ami_id, dest)
+ self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
- self.push(self.inventory, type_name, dest)
+ self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
- self.push(self.inventory, key_name, dest)
+ self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
- self.push(self.inventory, vpc_id_name, dest)
+ self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
@@ -697,7 +857,7 @@ class Ec2Inventory(object):
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
- self.push(self.inventory, key, dest)
+ self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
@@ -707,34 +867,41 @@ class Ec2Inventory(object):
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
- if v:
- key = self.to_safe("tag_" + k + "=" + v)
+ if self.expand_csv_tags and v and ',' in v:
+ values = map(lambda x: x.strip(), v.split(','))
else:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ values = [v]
+
+ for v in values:
if v:
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
+ key = self.to_safe("tag_" + k + "=" + v)
+ else:
+ key = self.to_safe("tag_" + k)
+ self.push(self.inventory, key, hostname)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ if v:
+ self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
- self.push(self.inventory, name, dest)
+ self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
- self.push(self.inventory, 'tag_none', dest)
+ self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
- self.push(self.inventory, 'ec2', dest)
+ self.push(self.inventory, 'ec2', hostname)
- self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
@@ -752,24 +919,38 @@ class Ec2Inventory(object):
# Skip instances we cannot address (e.g. private VPC subnet)
return
+ # Set the inventory name
+ hostname = None
+ if self.hostname_variable:
+ if self.hostname_variable.startswith('tag_'):
+ hostname = instance.tags.get(self.hostname_variable[4:], None)
+ else:
+ hostname = getattr(instance, self.hostname_variable)
+
+ # If we can't get a nice hostname, use the destination address
+ if not hostname:
+ hostname = dest
+
+ hostname = self.to_safe(hostname).lower()
+
# Add to index
- self.index[dest] = [region, instance.id]
+ self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
- self.inventory[instance.id] = [dest]
+ self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
- self.push(self.inventory, region, dest)
+ self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
- self.push(self.inventory, instance.availability_zone, dest)
+ self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
@@ -778,14 +959,14 @@ class Ec2Inventory(object):
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
- self.push(self.inventory, type_name, dest)
+ self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
- self.push(self.inventory, vpc_id_name, dest)
+ self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
@@ -794,7 +975,7 @@ class Ec2Inventory(object):
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
- self.push(self.inventory, key, dest)
+ self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
@@ -805,20 +986,21 @@ class Ec2Inventory(object):
# Inventory: Group by engine
if self.group_by_rds_engine:
- self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
+ self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
- self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
+ self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
- self.push(self.inventory, 'rds', dest)
+ self.push(self.inventory, 'rds', hostname)
- self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
@@ -1131,6 +1313,8 @@ class Ec2Inventory(object):
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
+ if self.expand_csv_tags and ',' in v:
+ v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@@ -1141,6 +1325,10 @@ class Ec2Inventory(object):
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
+ elif key == 'ec2_block_device_mapping':
+ instance_vars["ec2_block_devices"] = {}
+ for k, v in value.items():
+ instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
@@ -1321,4 +1509,3 @@ class Ec2Inventory(object):
# Run the script
Ec2Inventory()
-
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index cce3c5f35..2be46a58c 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -70,7 +70,8 @@ Examples:
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
-Version: 0.0.1
+Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
+Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
@@ -84,13 +85,19 @@ except ImportError:
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
-USER_AGENT_VERSION="v1"
+USER_AGENT_VERSION="v2"
import sys
import os
import argparse
+
+from time import time
+
import ConfigParser
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
try:
import json
except ImportError:
@@ -101,31 +108,103 @@ try:
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
- print("GCE inventory script requires libcloud >= 0.13")
- sys.exit(1)
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class CloudInventoryCache(object):
+ def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
+ cache_max_age=300):
+ cache_dir = os.path.expanduser(cache_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ self.cache_path_cache = os.path.join(cache_dir, cache_name)
+
+ self.cache_max_age = cache_max_age
+
+ def is_valid(self, max_age=None):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if max_age is None:
+ max_age = self.cache_max_age
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + max_age) > current_time:
+ return True
+
+ return False
+
+ def get_all_data_from_cache(self, filename=''):
+ ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
+
+ data = ''
+ if not filename:
+ filename = self.cache_path_cache
+ with open(filename, 'r') as cache:
+ data = cache.read()
+ return json.loads(data)
+
+ def write_to_cache(self, data, filename=''):
+ ''' Writes data to file as JSON. Returns True. '''
+ if not filename:
+ filename = self.cache_path_cache
+ json_data = json.dumps(data)
+ with open(filename, 'w') as cache:
+ cache.write(json_data)
+ return True
class GceInventory(object):
def __init__(self):
+ # Cache object
+ self.cache = None
+ # dictionary containing inventory read from disk
+ self.inventory = {}
+
# Read settings and parse CLI arguments
self.parse_cli_args()
+ self.config = self.get_config()
self.driver = self.get_gce_driver()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Cache management
+ start_inventory_time = time()
+ cache_used = False
+ if self.args.refresh_cache or not self.cache.is_valid():
+ self.do_api_calls_update_cache()
+ else:
+ self.load_inventory_from_cache()
+ cache_used = True
+ self.inventory['_meta']['stats'] = {'use_cache': True}
+ self.inventory['_meta']['stats'] = {
+ 'inventory_load_time': time() - start_inventory_time,
+ 'cache_used': cache_used
+ }
# Just display data for specific host
if self.args.host:
- print(self.json_format_dict(self.node_to_dict(
- self.get_instance(self.args.host)),
- pretty=self.args.pretty))
- sys.exit(0)
-
- # Otherwise, assume user wants all instances grouped
- print(self.json_format_dict(self.group_instances(),
- pretty=self.args.pretty))
+ print(self.json_format_dict(
+ self.inventory['_meta']['hostvars'][self.args.host],
+ pretty=self.args.pretty))
+ else:
+ # Otherwise, assume user wants all instances grouped
+ zones = self.parse_env_zones()
+ print(self.json_format_dict(self.inventory,
+ pretty=self.args.pretty))
sys.exit(0)
- def get_gce_driver(self):
- """Determine the GCE authorization settings and return a
- libcloud driver.
+ def get_config(self):
+ """
+ Reads the settings from the gce.ini file.
+
+ Populates a SafeConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
@@ -140,14 +219,57 @@ class GceInventory(object):
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
+ 'inventory_ip_type': '',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+ if 'cache' not in config.sections():
+ config.add_section('cache')
+
config.read(gce_ini_path)
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ # Caching
+ cache_path = config.get('cache', 'cache_path')
+ cache_max_age = config.getint('cache', 'cache_max_age')
+ # TOOD(supertom): support project-specific caches
+ cache_name = 'ansible-gce.cache'
+ self.cache = CloudInventoryCache(cache_path=cache_path,
+ cache_max_age=cache_max_age,
+ cache_name=cache_name)
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_driver(self):
+ """Determine the GCE authorization settings and return a
+ libcloud driver.
+ """
# Attempt to get GCE params from a configuration file, if one
# exists.
- secrets_path = config.get('gce', 'libcloud_secrets')
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
@@ -161,8 +283,7 @@ class GceInventory(object):
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
- print(err)
- sys.exit(1)
+ sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
@@ -173,10 +294,10 @@ class GceInventory(object):
pass
if not secrets_found:
args = [
- config.get('gce','gce_service_account_email_address'),
- config.get('gce','gce_service_account_pem_file_path')
+ self.config.get('gce','gce_service_account_email_address'),
+ self.config.get('gce','gce_service_account_pem_file_path')
]
- kwargs = {'project': config.get('gce', 'gce_project_id')}
+ kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
@@ -191,6 +312,14 @@ class GceInventory(object):
)
return gce
+ def parse_env_zones(self):
+ '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
def parse_cli_args(self):
''' Command line argument processing '''
@@ -202,6 +331,9 @@ class GceInventory(object):
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
+ parser.add_argument(
+ '--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
@@ -211,11 +343,17 @@ class GceInventory(object):
if inst is None:
return {}
- if inst.extra['metadata'].has_key('items'):
+ if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
@@ -231,29 +369,67 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+ 'ansible_ssh_host': ssh_host
}
- def get_instance(self, instance_name):
- '''Gets details about a specific instance '''
+ def load_inventory_from_cache(self):
+ ''' Loads inventory from JSON on disk. '''
+
try:
- return self.driver.ex_get_node(instance_name)
+ self.inventory = self.cache.get_all_data_from_cache()
+ hosts = self.inventory['_meta']['hostvars']
except Exception as e:
- return None
-
- def group_instances(self):
+ print(
+ "Invalid inventory file %s. Please rebuild with -refresh-cache option."
+ % (self.cache.cache_path_cache))
+ raise
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls and save data in cache. '''
+ zones = self.parse_env_zones()
+ data = self.group_instances(zones)
+ self.cache.write_to_cache(data)
+ self.inventory = data
+
+ def list_nodes(self):
+ all_nodes = []
+ params, more_results = {'maxResults': 500}, True
+ while more_results:
+ self.driver.connection.gce_params=params
+ all_nodes.extend(self.driver.list_nodes())
+ more_results = 'pageToken' in params
+ return all_nodes
+
+ def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
- for node in self.driver.list_nodes():
+ for node in self.list_nodes():
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
- if groups.has_key(zone): groups[zone].append(name)
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if zone in groups: groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
@@ -262,25 +438,25 @@ class GceInventory(object):
tag = t[6:]
else:
tag = 'tag_%s' % t
- if groups.has_key(tag): groups[tag].append(name)
+ if tag in groups: groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
- if groups.has_key(net): groups[net].append(name)
+ if net in groups: groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
- if groups.has_key(machine_type): groups[machine_type].append(name)
+ if machine_type in groups: groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
- if groups.has_key(image): groups[image].append(name)
+ if image in groups: groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
- if groups.has_key(stat): groups[stat].append(name)
+ if stat in groups: groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
@@ -296,6 +472,6 @@ class GceInventory(object):
else:
return json.dumps(data)
-
# Run the script
-GceInventory()
+if __name__ == '__main__':
+ GceInventory()
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
index ac2f0430a..d63e07b64 100755
--- a/inventory/libvirt/hosts/libvirt_generic.py
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -61,11 +61,11 @@ class LibvirtInventory(object):
self.parse_cli_args()
if self.args.host:
- print _json_format_dict(self.get_host_info(), self.args.pretty)
+ print(_json_format_dict(self.get_host_info(), self.args.pretty))
elif self.args.list:
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
else: # default action with no options
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
def read_settings(self):
''' Reads the settings from the libvirt.ini file '''
@@ -115,12 +115,12 @@ class LibvirtInventory(object):
conn = libvirt.openReadOnly(self.libvirt_uri)
if conn is None:
- print "Failed to open connection to %s" % self.libvirt_uri
+ print("Failed to open connection to %s" % self.libvirt_uri)
sys.exit(1)
domains = conn.listAllDomains()
if domains is None:
- print "Failed to list domains for connection %s" % self.libvirt_uri
+ print("Failed to list domains for connection %s" % self.libvirt_uri)
sys.exit(1)
for domain in domains:
diff --git a/library/modify_yaml.py b/library/modify_yaml.py
index d8d22d5ea..8706e80c2 100755
--- a/library/modify_yaml.py
+++ b/library/modify_yaml.py
@@ -6,6 +6,11 @@
import yaml
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+
DOCUMENTATION = '''
---
module: modify_yaml
@@ -21,8 +26,18 @@ EXAMPLES = '''
'''
-# pylint: disable=missing-docstring
def set_key(yaml_data, yaml_key, yaml_value):
+ ''' Updates a parsed yaml structure setting a key to a value.
+
+ :param yaml_data: yaml structure to modify.
+ :type yaml_data: dict
+ :param yaml_key: Key to modify.
+ :type yaml_key: mixed
+ :param yaml_value: Value use for yaml_key.
+ :type yaml_value: mixed
+ :returns: Changes to the yaml_data structure
+ :rtype: dict(tuple())
+ '''
changes = []
ptr = yaml_data
final_key = yaml_key.split('.')[-1]
@@ -75,6 +90,7 @@ def main():
# pylint: disable=missing-docstring, unused-argument
def none_representer(dumper, data):
return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'')
+
yaml.add_representer(type(None), none_representer)
try:
@@ -95,14 +111,9 @@ def main():
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception as e:
- return module.fail_json(msg=str(e))
-
+ except Exception as error:
+ return module.fail_json(msg=str(error))
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
-# import module snippets
-from ansible.module_utils.basic import * # noqa: F402,F403
if __name__ == '__main__':
main()
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 665ede1cb..955772486 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -15,6 +15,7 @@ BuildArch: noarch
Requires: ansible >= 2.2.0.0-1
Requires: python2
+Requires: python-six
Requires: openshift-ansible-docs = %{version}-%{release}
%description
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 1e0a6d4e7..dc0bf73a2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -31,7 +31,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --drain --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 7839b85e8..ec5b18389 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -27,7 +27,7 @@
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- role: openshift_hosted
- - role: openshift_metrics
+ - role: openshift_hosted_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_hosted_logging
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5fc81bf3a..6e3e04a6b 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -234,7 +234,7 @@
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
- --drain --force
+ {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 68b111df4..86b344d7a 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -41,7 +41,7 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --drain --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 21f3c80a1..39d64a126 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -74,11 +74,6 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- - openshift_facts:
- role: hosted
- openshift_env:
- openshift_hosted_registry_storage_kind: 'nfs'
- when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0
- name: Create temp directory for syncing certs
hosts: localhost
diff --git a/requirements.txt b/requirements.txt
index e55ef5f0b..8f47033f8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,4 @@
-ansible>=2.1
+ansible>=2.2
+six
pyOpenSSL
+PyYAML
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
new file mode 100644
index 000000000..fb545c7c8
--- /dev/null
+++ b/roles/lib_utils/library/yedit.py
@@ -0,0 +1,766 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# pylint: disable=wrong-import-order
+import json
+import os
+import re
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+import shutil
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: yedit
+short_description: Create, modify, and idempotently manage yaml files.
+description:
+ - Modify yaml files programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list yaml
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ debug:
+ description:
+ - Turn on debug information.
+ required: false
+ default: false
+ aliases: []
+ src:
+ description:
+ - The file that is the target of the modifications.
+ required: false
+ default: None
+ aliases: []
+ content:
+ description:
+ - Content represents the yaml content you desire to work with. This
+ - could be the file contents to write or the inmemory data to modify.
+ required: false
+ default: None
+ aliases: []
+ content_type:
+ description:
+ - The python type of the content parameter.
+ required: false
+ default: 'dict'
+ aliases: []
+ key:
+ description:
+ - The path to the value you wish to modify. Emtpy string means the top of
+ - the document.
+ required: false
+ default: ''
+ aliases: []
+ value:
+ description:
+ - The incoming value of parameter 'key'.
+ required: false
+ default:
+ aliases: []
+ value_type:
+ description:
+ - The python type of the incoming value.
+ required: false
+ default: ''
+ aliases: []
+ update:
+ description:
+ - Whether the update should be performed on a dict/hash or list/array
+ - object.
+ required: false
+ default: false
+ aliases: []
+ append:
+ description:
+ - Whether to append to an array/list. When the key does not exist or is
+ - null, a new array is created. When the key is of a non-list type,
+ - nothing is done.
+ required: false
+ default: false
+ aliases: []
+ index:
+ description:
+ - Used in conjunction with the update parameter. This will update a
+ - specific index in an array/list.
+ required: false
+ default: false
+ aliases: []
+ curr_value:
+ description:
+ - Used in conjunction with the update parameter. This is the current
+ - value of 'key' in the yaml file.
+ required: false
+ default: false
+ aliases: []
+ curr_value_format:
+ description:
+ - Format of the incoming current value.
+ choices: ["yaml", "json", "str"]
+ required: false
+ default: false
+ aliases: []
+ backup:
+ description:
+ - Whether to make a backup copy of the current file when performing an
+ - edit.
+ required: false
+ default: true
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Simple insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: test
+ value: somevalue
+ state: present
+# Results:
+# test: somevalue
+
+# Multilevel insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: a#b#c
+ value: d
+ state: present
+# Results:
+# a:
+# b:
+# c: d
+'''
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+
+def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+
+def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+
+# pylint: disable=too-many-branches
+def main():
+ ''' ansible oc module for secrets '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ src=dict(default=None, type='str'),
+ content=dict(default=None),
+ content_type=dict(default='dict', choices=['dict']),
+ key=dict(default='', type='str'),
+ value=dict(),
+ value_type=dict(default='', type='str'),
+ update=dict(default=False, type='bool'),
+ append=dict(default=False, type='bool'),
+ index=dict(default=None, type='int'),
+ curr_value=dict(default=None, type='str'),
+ curr_value_format=dict(default='yaml',
+ choices=['yaml', 'json', 'str'],
+ type='str'),
+ backup=dict(default=True, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
+ required_one_of=[["content", "src"]],
+ )
+
+ rval = Yedit.run_ansible(module)
+ if 'failed' in rval and rval['failed']:
+ module.fail_json(msg=rval['msg'])
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/ansible/yedit.py b/roles/lib_utils/src/ansible/yedit.py
new file mode 100644
index 000000000..a80cd520c
--- /dev/null
+++ b/roles/lib_utils/src/ansible/yedit.py
@@ -0,0 +1,84 @@
+# flake8: noqa
+# pylint: skip-file
+
+
+def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+
+def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+
+# pylint: disable=too-many-branches
+def main():
+ ''' ansible oc module for secrets '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ src=dict(default=None, type='str'),
+ content=dict(default=None),
+ content_type=dict(default='dict', choices=['dict']),
+ key=dict(default='', type='str'),
+ value=dict(),
+ value_type=dict(default='', type='str'),
+ update=dict(default=False, type='bool'),
+ append=dict(default=False, type='bool'),
+ index=dict(default=None, type='int'),
+ curr_value=dict(default=None, type='str'),
+ curr_value_format=dict(default='yaml',
+ choices=['yaml', 'json', 'str'],
+ type='str'),
+ backup=dict(default=True, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
+ required_one_of=[["content", "src"]],
+ )
+
+ rval = Yedit.run_ansible(module)
+ if 'failed' in rval and rval['failed']:
+ module.fail_json(msg=rval['msg'])
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/class/import.py b/roles/lib_utils/src/class/import.py
new file mode 100644
index 000000000..249e07228
--- /dev/null
+++ b/roles/lib_utils/src/class/import.py
@@ -0,0 +1,11 @@
+# flake8: noqa
+# pylint: skip-file
+
+# pylint: disable=wrong-import-order
+import json
+import os
+import re
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+import shutil
+from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
new file mode 100644
index 000000000..e110bc11e
--- /dev/null
+++ b/roles/lib_utils/src/class/yedit.py
@@ -0,0 +1,520 @@
+# flake8: noqa
+# pylint: skip-file
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = get_curr_value(parse_value(module.params['curr_value']), module.params['curr_value_format']) # noqa: #501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
diff --git a/roles/lib_utils/src/doc/license b/roles/lib_utils/src/doc/license
new file mode 100644
index 000000000..717bb7f17
--- /dev/null
+++ b/roles/lib_utils/src/doc/license
@@ -0,0 +1,16 @@
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/roles/lib_utils/src/doc/yedit b/roles/lib_utils/src/doc/yedit
new file mode 100644
index 000000000..e367a389e
--- /dev/null
+++ b/roles/lib_utils/src/doc/yedit
@@ -0,0 +1,132 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: yedit
+short_description: Create, modify, and idempotently manage yaml files.
+description:
+ - Modify yaml files programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list yaml
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ debug:
+ description:
+ - Turn on debug information.
+ required: false
+ default: false
+ aliases: []
+ src:
+ description:
+ - The file that is the target of the modifications.
+ required: false
+ default: None
+ aliases: []
+ content:
+ description:
+ - Content represents the yaml content you desire to work with. This
+ - could be the file contents to write or the inmemory data to modify.
+ required: false
+ default: None
+ aliases: []
+ content_type:
+ description:
+ - The python type of the content parameter.
+ required: false
+ default: 'dict'
+ aliases: []
+ key:
+ description:
+ - The path to the value you wish to modify. Emtpy string means the top of
+ - the document.
+ required: false
+ default: ''
+ aliases: []
+ value:
+ description:
+ - The incoming value of parameter 'key'.
+ required: false
+ default:
+ aliases: []
+ value_type:
+ description:
+ - The python type of the incoming value.
+ required: false
+ default: ''
+ aliases: []
+ update:
+ description:
+ - Whether the update should be performed on a dict/hash or list/array
+ - object.
+ required: false
+ default: false
+ aliases: []
+ append:
+ description:
+ - Whether to append to an array/list. When the key does not exist or is
+ - null, a new array is created. When the key is of a non-list type,
+ - nothing is done.
+ required: false
+ default: false
+ aliases: []
+ index:
+ description:
+ - Used in conjunction with the update parameter. This will update a
+ - specific index in an array/list.
+ required: false
+ default: false
+ aliases: []
+ curr_value:
+ description:
+ - Used in conjunction with the update parameter. This is the current
+ - value of 'key' in the yaml file.
+ required: false
+ default: false
+ aliases: []
+ curr_value_format:
+ description:
+ - Format of the incoming current value.
+ choices: ["yaml", "json", "str"]
+ required: false
+ default: false
+ aliases: []
+ backup:
+ description:
+ - Whether to make a backup copy of the current file when performing an
+ - edit.
+ required: false
+ default: true
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Simple insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: test
+ value: somevalue
+ state: present
+# Results:
+# test: somevalue
+
+# Multilevel insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: a#b#c
+ value: d
+ state: present
+# Results:
+# a:
+# b:
+# c: d
+'''
diff --git a/roles/lib_utils/src/generate.py b/roles/lib_utils/src/generate.py
new file mode 100755
index 000000000..f4b46aa91
--- /dev/null
+++ b/roles/lib_utils/src/generate.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+'''
+ Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
+'''
+
+import os
+import yaml
+
+# pylint: disable=anomalous-backslash-in-string
+GEN_STR = "#!/usr/bin/env python\n" + \
+ "# pylint: disable=missing-docstring\n" + \
+ "# ___ ___ _ _ ___ ___ _ _____ ___ ___\n" + \
+ "# / __| __| \| | __| _ \ /_\_ _| __| \\\n" + \
+ "# | (_ | _|| .` | _|| / / _ \| | | _|| |) |\n" + \
+ "# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____\n" + \
+ "# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|\n" + \
+ "# | |) | (_) | | .` | (_) || | | _|| |) | | | |\n" + \
+ "# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|\n"
+
+OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'generate_sources.yml') # noqa: E501
+
+
+def main():
+ ''' combine the necessary files to create the ansible module '''
+
+ library = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+ sources = yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+ for fname, parts in sources.items():
+ with open(os.path.join(library, fname), 'w') as afd:
+ afd.seek(0)
+ afd.write(GEN_STR)
+ for fpart in parts:
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ # first line is pylint disable so skip it
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line \
+ or 'pylint: skip-file' in line:
+ continue
+
+ afd.write(line)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/generate_sources.yml b/roles/lib_utils/src/generate_sources.yml
new file mode 100644
index 000000000..83b21de1b
--- /dev/null
+++ b/roles/lib_utils/src/generate_sources.yml
@@ -0,0 +1,7 @@
+---
+yedit.py:
+- doc/license
+- class/import.py
+- doc/yedit
+- class/yedit.py
+- ansible/yedit.py
diff --git a/roles/lib_utils/src/test/integration/files/kube-manager.yaml b/roles/lib_utils/src/test/integration/files/kube-manager.yaml
new file mode 100644
index 000000000..6f4b9e6dc
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/files/kube-manager.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kube-controller-manager
+ namespace: kube-system
+spec:
+ hostNetwork: true
+ containers:
+ - name: kube-controller-manager
+ image: openshift/kube:v1.0.0
+ command:
+ - /hyperkube
+ - controller-manager
+ - --master=http://127.0.0.1:8080
+ - --leader-elect=true
+ - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
+ - --root-ca-file=/etc/kubernetes/ssl/ca.pem
+ livenessProbe:
+ httpGet:
+ host: 127.0.0.1
+ path: /healthz
+ port: 10252
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ volumeMounts:
+ - mountPath: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ readOnly: true
+ - mountPath: /etc/ssl/certs
+ name: ssl-certs-host
+ readOnly: true
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ - hostPath:
+ path: /usr/share/ca-certificates
+ name: ssl-certs-host
diff --git a/roles/lib_utils/src/test/integration/yedit_test.yml b/roles/lib_utils/src/test/integration/yedit_test.yml
new file mode 100755
index 000000000..1760a7466
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/yedit_test.yml
@@ -0,0 +1,221 @@
+#!/usr/bin/ansible-playbook
+# Yedit test so that we can quickly determine if features are working
+# Ensure that the kube-manager.yaml file exists
+#
+# ./yedit_test.yml -M ../../library
+#
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ test_file: kube-manager-test.yaml
+ test: test
+ strategy: debug
+
+ post_tasks:
+ - name: copy the kube-manager.yaml file so that we have a pristine copy each time
+ copy:
+ src: kube-manager.yaml
+ dest: "./{{ test_file }}"
+ changed_when: False
+
+ ####### add key to top level #####
+ - name: add a key at the top level
+ yedit:
+ src: "{{ test_file }}"
+ key: yedittest
+ value: yedittest
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: yedittest
+ register: results
+
+ - name: Assert that key is at top level
+ assert:
+ that: results.result == 'yedittest'
+ msg: 'Test: add a key to top level failed. yedittest != [{{ results.result }}]'
+ ###### end add key to top level #####
+
+ ###### modify multilevel key, value #####
+ - name: modify multilevel key, value
+ yedit:
+ src: "{{ test_file }}"
+ key: metadata-namespace
+ value: openshift-is-awesome
+ separator: '-'
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: metadata-namespace
+ separator: '-'
+ register: results
+
+ - name: Assert that key is as expected
+ assert:
+ that: results.result == 'openshift-is-awesome'
+ msg: 'Test: multilevel key, value modification: openshift-is-awesome != [{{ results.result }}]'
+ ###### end modify multilevel key, value #####
+
+ ###### test a string boolean #####
+ - name: test a string boolean
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].volumeMounts[1].readOnly
+ value: 'true'
+ value_type: str
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].volumeMounts[1].readOnly
+ register: results
+
+ - name: Assert that key is a string
+ assert:
+ that: results.result == "true"
+ msg: "Test: boolean str: 'true' != [{{ results.result }}]"
+
+ - name: Assert that key is not bool
+ assert:
+ that: results.result != true
+ msg: "Test: boolean str: true != [{{ results.result }}]"
+ ###### end test boolean string #####
+
+ ###### test array append #####
+ - name: test array append
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].command
+ value: --my-new-parameter=openshift
+ append: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].command
+ register: results
+
+ - name: Assert that the last element in array is our value
+ assert:
+ that: results.result[-1] == "--my-new-parameter=openshift"
+ msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]"
+ ###### end test array append #####
+
+ ###### test non-existing array append #####
+ - name: test array append to non-existing key
+ yedit:
+ src: "{{ test_file }}"
+ key: nonexistingkey
+ value: --my-new-parameter=openshift
+ append: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: nonexistingkey
+ register: results
+
+ - name: Assert that the last element in array is our value
+ assert:
+ that: results.result[-1] == "--my-new-parameter=openshift"
+ msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]"
+ ###### end test non-existing array append #####
+
+ ###### test array update modify #####
+ - name: test array update modify
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].command
+ value: --root-ca-file=/etc/k8s/ssl/my.pem
+ curr_value: --root-ca-file=/etc/kubernetes/ssl/ca.pem
+ curr_value_format: str
+ update: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].command
+ register: results
+
+ - name: Assert that the element in array is our value
+ assert:
+ that: results.result[5] == "--root-ca-file=/etc/k8s/ssl/my.pem"
+ msg: "Test: '--root-ca-file=/etc/k8s/ssl/my.pem' != [{{ results.result[5] }}]"
+ ###### end test array update modify#####
+
+ ###### test dict create #####
+ - name: test dict create
+ yedit:
+ src: "{{ test_file }}"
+ key: a.b.c
+ value: d
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: a.b.c
+ register: results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == "d"
+ msg: "Test: 'd' != [{{ results.result }}]"
+ ###### end test dict create #####
+
+ ###### test create dict value #####
+ - name: test create dict value
+ yedit:
+ src: "{{ test_file }}"
+ key: e.f.g
+ value:
+ h:
+ i:
+ j: k
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: e.f.g.h.i.j
+ register: results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == "k"
+ msg: "Test: 'k' != [{{ results.result }}]"
+ ###### end test dict create #####
+
+ ###### test create list value #####
+ - name: test create list value
+ yedit:
+ src: "{{ test_file }}"
+ key: z.x.y
+ value:
+ - 1
+ - 2
+ - 3
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: z#x#y
+ separator: '#'
+ register: results
+ - debug: var=results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == [1, 2, 3]
+ msg: "Test: '[1, 2, 3]' != [{{ results.result }}]"
+###### end test create list value #####
diff --git a/roles/lib_utils/src/test/unit/yedit_test.py b/roles/lib_utils/src/test/unit/yedit_test.py
new file mode 100755
index 000000000..2793c5c1a
--- /dev/null
+++ b/roles/lib_utils/src/test/unit/yedit_test.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for yedit
+'''
+# To run
+# python -m unittest yedit_test
+#
+# .............................
+# ----------------------------------------------------------------------
+# Ran 29 tests in 0.133s
+# OK
+
+import os
+import sys
+import unittest
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place yedit in our path
+yedit_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, yedit_path)
+
+from yedit import Yedit # noqa: E402
+
+# pylint: disable=too-many-public-methods
+# Silly pylint, moar tests!
+
+
+class YeditTest(unittest.TestCase):
+ '''
+ Test class for yedit
+ '''
+ data = {'a': 'a',
+ 'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
+ } # noqa: E124
+
+ filename = 'yedit_test.yml'
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ yed = Yedit(YeditTest.filename)
+ yed.yaml_dict = YeditTest.data
+ yed.write()
+
+ def test_load(self):
+ ''' Testing a get '''
+ yed = Yedit('yedit_test.yml')
+ self.assertEqual(yed.yaml_dict, self.data)
+
+ def test_write(self):
+ ''' Testing a simple write '''
+ yed = Yedit('yedit_test.yml')
+ yed.put('key1', 1)
+ yed.write()
+ self.assertTrue('key1' in yed.yaml_dict)
+ self.assertEqual(yed.yaml_dict['key1'], 1)
+
+ def test_write_x_y_z(self):
+ '''Testing a write of multilayer key'''
+ yed = Yedit('yedit_test.yml')
+ yed.put('x.y.z', 'modified')
+ yed.write()
+ yed.load()
+ self.assertEqual(yed.get('x.y.z'), 'modified')
+
+ def test_delete_a(self):
+ '''Testing a simple delete '''
+ yed = Yedit('yedit_test.yml')
+ yed.delete('a')
+ yed.write()
+ yed.load()
+ self.assertTrue('a' not in yed.yaml_dict)
+
+ def test_delete_b_c(self):
+ '''Testing delete of layered key '''
+ yed = Yedit('yedit_test.yml', separator=':')
+ yed.delete('b:c')
+ yed.write()
+ yed.load()
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertFalse('c' in yed.yaml_dict['b'])
+
+ def test_create(self):
+ '''Testing a create '''
+ os.unlink(YeditTest.filename)
+ yed = Yedit('yedit_test.yml')
+ yed.create('foo', 'bar')
+ yed.write()
+ yed.load()
+ self.assertTrue('foo' in yed.yaml_dict)
+ self.assertTrue(yed.yaml_dict['foo'] == 'bar')
+
+ def test_create_content(self):
+ '''Testing a create with content '''
+ content = {"foo": "bar"}
+ yed = Yedit("yedit_test.yml", content)
+ yed.write()
+ yed.load()
+ self.assertTrue('foo' in yed.yaml_dict)
+ self.assertTrue(yed.yaml_dict['foo'], 'bar')
+
+ def test_array_insert(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[0]') == 'inject')
+
+ def test_array_insert_first_index(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[1]') == 'f')
+
+ def test_array_insert_second_index(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[2]') == 'g')
+
+ def test_dict_array_dict_access(self):
+ '''Testing a create with content'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
+
+ def test_dict_array_dict_replace(self):
+ '''Testing multilevel delete'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ yed.put('b:c:d[0]:[0]:x:y', 'testing')
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertTrue('c' in yed.yaml_dict['b'])
+ self.assertTrue('d' in yed.yaml_dict['b']['c'])
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
+ self.assertTrue('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
+ self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'] == 'testing') # noqa: E501
+
+ def test_dict_array_dict_remove(self):
+ '''Testing multilevel delete'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ yed.delete('b:c:d[0]:[0]:x:y')
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertTrue('c' in yed.yaml_dict['b'])
+ self.assertTrue('d' in yed.yaml_dict['b']['c'])
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
+ self.assertFalse('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
+
+ def test_key_exists_in_dict(self):
+ '''Testing exist in dict'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.exists('b:c', 'd'))
+
+ def test_key_exists_in_list(self):
+ '''Testing exist in list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
+ self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
+
+ def test_update_to_list_with_index(self):
+ '''Testing update to list with index'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6], index=2)
+ self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_update_to_list_with_curr_value(self):
+ '''Testing update to list with index'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6], curr_value=3)
+ self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_update_to_list(self):
+ '''Testing update to list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6])
+ self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_append_twice_to_list(self):
+ '''Testing append to list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.append('x:y:z', [5, 6])
+ yed.append('x:y:z', [5, 6])
+ self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
+ self.assertTrue(2 == yed.get('x:y:z').count([5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_add_item_to_dict(self):
+ '''Testing update to dict'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', {'a': 1, 'b': 2})
+ yed.update('x:y:z', {'c': 3, 'd': 4})
+ self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
+ self.assertTrue(yed.exists('x:y:z', {'c': 3}))
+
+ def test_first_level_dict_with_none_value(self):
+ '''test dict value with none value'''
+ yed = Yedit(content={'a': None}, separator=":")
+ yed.put('a:b:c', 'test')
+ self.assertTrue(yed.get('a:b:c') == 'test')
+ self.assertTrue(yed.get('a:b'), {'c': 'test'})
+
+ def test_adding_yaml_variable(self):
+ '''test dict value with none value'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('z:y', '{{test}}')
+ self.assertTrue(yed.get('z:y') == '{{test}}')
+
+ def test_keys_with_underscore(self):
+ '''test dict value with none value'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('z_:y_y', {'test': '{{test}}'})
+ self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
+
+ def test_first_level_array_update(self):
+ '''test update on top level array'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
+ yed.update('', {'c': 4})
+ self.assertTrue({'c': 4} in yed.get(''))
+
+ def test_first_level_array_delete(self):
+ '''test remove top level key'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
+ yed.delete('')
+ self.assertTrue({'b': 3} not in yed.get(''))
+
+ def test_first_level_array_get(self):
+ '''test dict value with none value'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
+ yed.get('')
+ self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
+
+ def test_pop_list_item(self):
+ '''test dict value with none value'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
+ yed.pop('', {'b': 2})
+ self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
+
+ def test_pop_list_item_2(self):
+ '''test dict value with none value'''
+ z = range(10)
+ yed = Yedit(content=z, separator=':')
+ yed.pop('', 5)
+ z.pop(5)
+ self.assertTrue(z == yed.yaml_dict)
+
+ def test_pop_dict_key(self):
+ '''test dict value with none value'''
+ yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
+ yed.pop('a#b', 'c')
+ self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
+
+ def tearDown(self):
+ '''TearDown method'''
+ os.unlink(YeditTest.filename)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index 7161b5277..a474b36b0 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -4,17 +4,13 @@
"""For details on this module see DOCUMENTATION (below)"""
-# router/registry cert grabbing
-import subprocess
-# etcd config file
-import ConfigParser
-# Expiration parsing
import datetime
-# File path stuff
import os
-# Config file parsing
+import subprocess
+
+from six.moves import configparser
+
import yaml
-# Certificate loading
import OpenSSL.crypto
DOCUMENTATION = '''
@@ -260,7 +256,10 @@ Return:
# This is our module MAIN function after all, so there's bound to be a
# lot of code bundled up into one block
#
-# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches
+# Reason: These checks are disabled because the issue was introduced
+# during a period where the pylint checks weren't enabled for this file
+# Status: temporarily disabled pending future refactoring
+# pylint: disable=too-many-locals,too-many-statements,too-many-branches
def main():
"""This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
@@ -479,13 +478,17 @@ an OpenShift Container Platform cluster
etcd_cert_params.append('dne')
try:
with open('/etc/etcd/etcd.conf', 'r') as fp:
- etcd_config = ConfigParser.ConfigParser()
+ etcd_config = configparser.ConfigParser()
+ # Reason: This check is disabled because the issue was introduced
+ # during a period where the pylint checks weren't enabled for this file
+ # Status: temporarily disabled pending future refactoring
+ # pylint: disable=deprecated-method
etcd_config.readfp(FakeSecHead(fp))
for param in etcd_cert_params:
try:
etcd_certs_to_check.add(etcd_config.get('ETCD', param))
- except ConfigParser.NoOptionError:
+ except configparser.NoOptionError:
# That parameter does not exist, oh well...
pass
except IOError:
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index d7e3596fd..10e30f1c4 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -7,13 +7,6 @@
"""Ansible module for retrieving and setting openshift related facts"""
-try:
- # python2
- import ConfigParser
-except ImportError:
- # python3
- import configparser as ConfigParser
-
# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
import errno
@@ -26,8 +19,8 @@ import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
-from six import string_types
-from six import text_type
+from six import string_types, text_type
+from six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
@@ -776,7 +769,7 @@ def set_etcd_facts_if_unset(facts):
# Add a fake section for parsing:
ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
@@ -874,6 +867,20 @@ def set_deployment_facts_if_unset(facts):
return facts
+def set_evacuate_or_drain_option(facts):
+ """OCP before 1.5/3.5 used '--evacuate'. As of 1.5/3.5 OCP uses
+'--drain'. Let's make that a fact for easy reference later.
+ """
+ if facts['common']['version_gte_3_5_or_1_5']:
+ # New-style
+ facts['common']['evacuate_or_drain'] = '--drain'
+ else:
+ # Old-style
+ facts['common']['evacuate_or_drain'] = '--evacuate'
+
+ return facts
+
+
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
@@ -1256,6 +1263,13 @@ def is_service_running(service):
return service_running
+def rpm_rebuilddb():
+ """
+ Runs rpm --rebuilddb to ensure the db is in good shape.
+ """
+ module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
+
+
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
@@ -1292,7 +1306,7 @@ def get_hosted_registry_insecure():
try:
ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
@@ -1561,15 +1575,15 @@ def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
- ini_facts = ConfigParser.SafeConfigParser()
+ ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
- except (ConfigParser.MissingSectionHeaderError,
- ConfigParser.ParsingError):
+ except (configparser.MissingSectionHeaderError,
+ configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
@@ -1898,6 +1912,7 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_evacuate_or_drain_option(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
@@ -1966,6 +1981,11 @@ class OpenShiftFacts(object):
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
options='--log-driver=json-file --log-opt max-size=50m')
+ # NOTE: This is a workaround for a dnf output racecondition that can occur in
+ # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
+ if self.system_facts['ansible_pkg_mgr'] == 'dnf':
+ rpm_rebuilddb()
+
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_hosted_metrics/README.md
index 015a673a8..c2af3c494 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_hosted_metrics/README.md
@@ -40,7 +40,7 @@ Example Playbook
- name: Configure openshift-metrics
hosts: oo_first_master
roles:
- - role: openshift_metrics
+ - role: openshift_hosted_metrics
```
License
diff --git a/roles/openshift_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml
index a01f24df8..a01f24df8 100644
--- a/roles/openshift_metrics/defaults/main.yml
+++ b/roles/openshift_hosted_metrics/defaults/main.yml
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
index 69c5a1663..69c5a1663 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_hosted_metrics/handlers/main.yml
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml
index debca3ca6..debca3ca6 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_hosted_metrics/meta/main.yaml
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
index 0f520e685..2c839996e 100644
--- a/roles/openshift_metrics/tasks/install.yml
+++ b/roles/openshift_hosted_metrics/tasks/install.yml
@@ -3,7 +3,7 @@
- name: Test if metrics-deployer service account exists
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace=openshift-infra
get serviceaccount metrics-deployer -o json
register: serviceaccount
@@ -14,7 +14,7 @@
shell: >
echo {{ metrics_deployer_sa | to_json | quote }} |
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
create -f -
when: serviceaccount.rc == 1
@@ -22,7 +22,7 @@
- name: Test edit permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
register: edit_rolebindings
@@ -31,7 +31,7 @@
- name: Add edit permission to the openshift-infra project to metrics-deployer SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-role-to-user edit
system:serviceaccount:openshift-infra:metrics-deployer
@@ -40,7 +40,7 @@
- name: Test hawkular view permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
register: view_rolebindings
@@ -49,7 +49,7 @@
- name: Add view permissions to hawkular SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-role-to-user view
system:serviceaccount:openshift-infra:hawkular
@@ -58,7 +58,7 @@
- name: Test cluster-reader permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
register: cluster_reader_clusterrolebindings
@@ -67,7 +67,7 @@
- name: Add cluster-reader permission to the openshift-infra project to heapster SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-cluster-role-to-user cluster-reader
system:serviceaccount:openshift-infra:heapster
@@ -76,7 +76,7 @@
- name: Create metrics-deployer secret
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
secrets new metrics-deployer nothing=/dev/null
register: metrics_deployer_secret
@@ -98,7 +98,7 @@
{{ image_version }} \
-v MODE={{ deployment_mode }} \
| {{ openshift.common.client_binary }} --namespace openshift-infra \
- --config={{ openshift_metrics_kubeconfig }} \
+ --config={{ openshift_hosted_metrics_kubeconfig }} \
create -o name -f -"
- name: Deploy Metrics
@@ -116,7 +116,7 @@
shell: >
{{ openshift.common.client_binary }}
--namespace openshift-infra
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
get {{ deploy_metrics.stdout }}
register: deploy_result
until: "{{ 'Completed' in deploy_result.stdout }}"
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml
index 68e4a48b9..5ce8aa92b 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_hosted_metrics/tasks/main.yaml
@@ -6,11 +6,11 @@
- name: Record kubeconfig tmp dir
set_fact:
- openshift_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- name: Copy the admin client config(s)
command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_metrics_kubeconfig }}
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }}
changed_when: False
- name: Set hosted metrics facts
@@ -33,7 +33,7 @@
- name: Check for existing metrics pods
shell: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get pods -l {{ item }} | grep -q Running
register: metrics_pods_status
@@ -47,7 +47,7 @@
- name: Check for previous deployer
shell: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
register: metrics_deployer_status
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml
index 6c207d6ac..6c207d6ac 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_hosted_metrics/vars/main.yaml
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index 616f44c1d..b69b60c1d 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -47,6 +47,8 @@ oadm manage-node --drain ${NODE}
oadm manage-node --schedulable=true ${NODE}
````
+> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
+
TODO
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index ea7c9bb45..50d94f6a3 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -14,4 +14,5 @@ galaxy_info:
- all
categories:
- openshift
-dependencies: []
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index ea0cc2a94..49dd657b5 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -2,7 +2,7 @@
# TODO -- this may actually work on atomic hosts
- fail:
msg: "openshift_storage_nfs_lvm is not compatible with atomic host"
- when: openshift.common.is_atomic | true
+ when: openshift.common.is_atomic | bool
- name: Create lvm volumes
lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G
diff --git a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
index 19e150f7d..c273aca9f 100644
--- a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
+++ b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
@@ -14,8 +14,8 @@
"accessModes": [ "ReadWriteOnce", "ReadWriteMany" ],
"persistentVolumeReclaimPolicy": "{{ osnl_volume_reclaim_policy }}",
"nfs": {
- "Server": "{{ inventory_hostname }}",
- "Path": "{{ osnl_mount_dir }}/{{ item }}"
+ "server": "{{ inventory_hostname }}",
+ "path": "{{ osnl_mount_dir }}/{{ item }}"
}
}
}
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000..06346852c
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,27 @@
+[bdist_wheel]
+# This flag says that the code is written to work on both Python 2 and Python
+# 3. If at all possible, it is good practice to do this. If you cannot, you
+# will need to generate wheels for each Python version that you support.
+universal=1
+
+[nosetests]
+tests=roles/openshift_master_facts/test/, test/
+verbosity=2
+with-coverage=1
+cover-html=1
+cover-inclusive=1
+cover-min-percentage=70
+cover-erase=1
+detailed-errors=1
+cover-branches=1
+
+[yamllint]
+excludes=.tox,utils,files
+
+[lint]
+lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
+
+[flake8]
+exclude=.tox/*,utils/*,inventory/*
+max_line_length = 120
+ignore = E501,T003
diff --git a/setup.py b/setup.py
new file mode 100644
index 000000000..c826c167f
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,193 @@
+"""A setuptools based setup module.
+
+"""
+from __future__ import print_function
+
+import os
+import fnmatch
+import re
+
+import yaml
+
+# Always prefer setuptools over distutils
+from setuptools import setup, Command
+from setuptools_lint.setuptools_command import PylintCommand
+from six import string_types
+from yamllint.config import YamlLintConfig
+from yamllint.cli import Format
+from yamllint import linter
+
+
+def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
+ ''' find files matching file_regex '''
+ found = []
+ exclude_regex = ''
+ include_regex = ''
+
+ if exclude_dirs is not None:
+ exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
+
+ if include_dirs is not None:
+ include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
+
+ for root, dirs, files in os.walk(base_dir):
+ if exclude_dirs is not None:
+ # filter out excludes for dirs
+ dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]
+
+ if include_dirs is not None:
+ # filter for includes for dirs
+ dirs[:] = [d for d in dirs if re.match(include_regex, d)]
+
+ matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
+ found.extend(matches)
+
+ return found
+
+
+class OpenShiftAnsibleYamlLint(Command):
+ ''' Command to run yamllint '''
+ description = "Run yamllint tests"
+ user_options = [
+ ('excludes=', 'e', 'directories to exclude'),
+ ('config-file=', 'c', 'config file to use'),
+ ('format=', 'f', 'format to use (standard, parsable)'),
+ ]
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ # Reason: Defining these attributes as a part of initialize_options is
+ # consistent with upstream usage
+ # Status: permanently disabled
+ # pylint: disable=attribute-defined-outside-init
+ self.excludes = None
+ self.config_file = None
+ self.format = None
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ # Reason: These attributes are defined in initialize_options and this
+ # usage is consistant with upstream usage
+ # Status: permanently disabled
+ # pylint: disable=attribute-defined-outside-init
+ if isinstance(self.excludes, string_types):
+ self.excludes = self.excludes.split(',')
+ if self.format is None:
+ self.format = 'standard'
+ assert (self.format in ['standard', 'parsable']), (
+ 'unknown format {0}.'.format(self.format))
+ if self.config_file is None:
+ self.config_file = '.yamllint'
+ assert os.path.isfile(self.config_file), (
+ 'yamllint config file {0} does not exist.'.format(self.config_file))
+
+ def run(self):
+ ''' run command '''
+ if self.excludes is not None:
+ print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False)))
+
+ config = YamlLintConfig(file=self.config_file)
+
+ has_errors = False
+ has_warnings = False
+
+ if self.format == 'parsable':
+ format_method = Format.parsable
+ else:
+ format_method = Format.standard_color
+
+ for yaml_file in find_files(os.getcwd(), self.excludes, None, r'\.ya?ml$'):
+ first = True
+ with open(yaml_file, 'r') as contents:
+ for problem in linter.run(contents, config):
+ if first and self.format != 'parsable':
+ print('\n{0}:'.format(os.path.relpath(yaml_file)))
+ first = False
+
+ print(format_method(problem, yaml_file))
+ if problem.level == linter.PROBLEM_LEVELS[2]:
+ has_errors = True
+ elif problem.level == linter.PROBLEM_LEVELS[1]:
+ has_warnings = True
+
+ if has_errors or has_warnings:
+ print('yammlint issues found')
+ raise SystemExit(1)
+
+
+class OpenShiftAnsiblePylint(PylintCommand):
+ ''' Class to override the default behavior of PylintCommand '''
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def find_all_modules(self):
+ ''' find all python files to test '''
+ exclude_dirs = ['.tox', 'utils', 'test', 'tests', 'git']
+ modules = []
+ for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
+ package = os.path.basename(match).replace('.py', '')
+ modules.append(('openshift_ansible', package, match))
+ return modules
+
+ def get_finalized_command(self, cmd):
+ ''' override get_finalized_command to ensure we use our
+ find_all_modules method '''
+ if cmd == 'build_py':
+ return self
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def with_project_on_sys_path(self, func, func_args, func_kwargs):
+ ''' override behavior, since we don't need to build '''
+ return func(*func_args, **func_kwargs)
+
+
+class UnsupportedCommand(Command):
+ ''' Basic Command to override unsupported commands '''
+ user_options = []
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def finalize_options(self):
+ ''' initialize_options '''
+ pass
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def run(self):
+ ''' run command '''
+ print("Unsupported command for openshift-ansible")
+
+
+setup(
+ name='openshift-ansible',
+ license="Apache 2.0",
+ cmdclass={
+ 'install': UnsupportedCommand,
+ 'develop': UnsupportedCommand,
+ 'build': UnsupportedCommand,
+ 'build_py': UnsupportedCommand,
+ 'build_ext': UnsupportedCommand,
+ 'egg_info': UnsupportedCommand,
+ 'sdist': UnsupportedCommand,
+ 'lint': OpenShiftAnsiblePylint,
+ 'yamllint': OpenShiftAnsibleYamlLint,
+ },
+ packages=[],
+)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 000000000..2ee1e657d
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,11 @@
+six
+pyOpenSSL
+flake8
+flake8-mutable
+flake8-print
+pylint
+setuptools-lint
+PyYAML
+yamllint
+nose
+coverage
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 000000000..158974fbe
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,18 @@
+[tox]
+minversion=2.3.1
+envlist =
+ py{27,35}-ansible22-{pylint,unit,flake8,yamllint}
+skipsdist=True
+skip_missing_interpreters=True
+
+[testenv]
+deps =
+ -rtest-requirements.txt
+ py35-flake8: flake8-bugbear
+ ansible22: ansible~=2.2
+
+commands =
+ flake8: flake8
+ pylint: python setup.py lint
+ yamllint: python setup.py yamllint
+ unit: nosetests
diff --git a/utils/.pylintrc b/utils/.pylintrc
new file mode 120000
index 000000000..30b33b524
--- /dev/null
+++ b/utils/.pylintrc
@@ -0,0 +1 @@
+../.pylintrc \ No newline at end of file
diff --git a/utils/Makefile b/utils/Makefile
index 2a37b922c..038c31fcf 100644
--- a/utils/Makefile
+++ b/utils/Makefile
@@ -46,7 +46,7 @@ clean:
@find . -type f \( -name "*~" -or -name "#*" \) -delete
@rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
@rm -fR $(VENV)
-
+ @rm -fR .tox
# To force a rebuild of the docs run 'touch' on any *.in file under
# docs/man/man1/
@@ -84,41 +84,27 @@ ci-unittests: $(VENV)
@echo "#############################################"
@echo "# Running Unit Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && tox -e py27-unit
- . $(VENV)/bin/activate && tox -e py35-unit
+ . $(VENV)/bin/activate && detox -e py27-unit,py35-unit
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
ci-pylint: $(VENV)
@echo "#############################################"
@echo "# Running PyLint Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(PYFILES)
-
-ci-yamllint: $(VENV)
- @echo "#############################################"
- @echo "# Running yamllint Tests in virtualenv"
- @echo "#############################################"
- @. $(VENV)/bin/activate && yamllint -c ../git/.yamllint $(YAMLFILES)
-
-ci-list-deps: $(VENV)
- @echo "#############################################"
- @echo "# Listing all pip deps"
- @echo "#############################################"
- . $(VENV)/bin/activate && pip freeze
+ . $(VENV)/bin/activate && detox -e py27-pylint,py35-pylint
ci-flake8: $(VENV)
@echo "#############################################"
@echo "# Running Flake8 Compliance Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && tox -e py27-flake8
- . $(VENV)/bin/activate && tox -e py35-flake8
+ . $(VENV)/bin/activate && detox -e py27-flake8,py35-flake8
-ci-tox:
- . $(VENV)/bin/activate && tox
+ci-tox: $(VENV)
+ . $(VENV)/bin/activate && detox
-ci: ci-list-deps ci-tox ci-pylint ci-yamllint
+ci: ci-tox
@echo
@echo "##################################################################################"
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
@echo "To clean your test environment run 'make clean'"
- @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8', 'ci-yamllint'"
+ @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8'"
diff --git a/utils/README.md b/utils/README.md
index 2abf2705e..c37ab41e6 100644
--- a/utils/README.md
+++ b/utils/README.md
@@ -6,6 +6,47 @@ Run the command:
to run an array of unittests locally.
+Underneath the covers, we use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
+tests. Alternatively, tests can be run using [detox](https://pypi.python.org/pypi/detox/) which allows
+for running tests in parallel
+
+
+```
+pip install tox detox
+```
+
+List the test environments available:
+```
+tox -l
+```
+
+Run all of the tests with:
+```
+tox
+```
+
+Run all of the tests in parallel with detox:
+```
+detox
+```
+
+Running a particular test environment (python 2.7 flake8 tests in this case):
+```
+tox -e py27-ansible22-flake8
+```
+
+Running a particular test environment in a clean virtualenv (python 3.5 pylint
+tests in this case):
+```
+tox -r -e py35-ansible22-pylint
+```
+
+If you want to enter the virtualenv created by tox to do additional
+testing/debugging (py27-flake8 env in this case):
+```
+source .tox/py27-ansible22-flake8/bin/activate
+```
+
You will get errors if the log files already exist and can not be
written to by the current user (`/tmp/ansible.log` and
`/tmp/installer.txt`). *We're working on it.*
diff --git a/utils/setup.cfg b/utils/setup.cfg
index ea07eea9f..862dffd7b 100644
--- a/utils/setup.cfg
+++ b/utils/setup.cfg
@@ -5,7 +5,6 @@
universal=1
[nosetests]
-tests=../,../roles/openshift_master_facts/test/,test/
verbosity=2
with-coverage=1
cover-html=1
@@ -19,3 +18,6 @@ cover-branches=1
max-line-length=120
exclude=test/*,setup.py,oo-installenv
ignore=E501
+
+[lint]
+lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
index e5c5360c3..f6a7bde10 100644
--- a/utils/test-requirements.txt
+++ b/utils/test-requirements.txt
@@ -1,6 +1,7 @@
ansible
configparser
pylint
+setuptools-lint
nose
coverage
mock
@@ -11,3 +12,4 @@ backports.functools_lru_cache
pyOpenSSL
yamllint
tox
+detox
diff --git a/utils/test/openshift_ansible_tests.py b/utils/test/openshift_ansible_tests.py
new file mode 100644
index 000000000..5847fe37b
--- /dev/null
+++ b/utils/test/openshift_ansible_tests.py
@@ -0,0 +1,71 @@
+import os
+import unittest
+import tempfile
+import shutil
+import yaml
+
+from six.moves import configparser
+
+from ooinstall import openshift_ansible
+from ooinstall.oo_config import Host, OOConfig
+
+
+BASE_CONFIG = """
+---
+variant: openshift-enterprise
+variant_version: 3.3
+version: v2
+deployment:
+ ansible_ssh_user: cloud-user
+ hosts: []
+ roles:
+ master:
+ node:
+"""
+
+
+class TestOpenShiftAnsible(unittest.TestCase):
+
+ def setUp(self):
+ self.tempfiles = []
+ self.work_dir = tempfile.mkdtemp(prefix='openshift_ansible_tests')
+ self.configfile = os.path.join(self.work_dir, 'ooinstall.config')
+ with open(self.configfile, 'w') as config_file:
+ config_file.write(BASE_CONFIG)
+ self.inventory = os.path.join(self.work_dir, 'hosts')
+ config = OOConfig(self.configfile)
+ config.settings['ansible_inventory_path'] = self.inventory
+ openshift_ansible.set_config(config)
+
+ def tearDown(self):
+ shutil.rmtree(self.work_dir)
+
+ def generate_hosts(self, num_hosts, name_prefix, roles=None, new_host=False):
+ hosts = []
+ for num in range(1, num_hosts + 1):
+ hosts.append(Host(connect_to=name_prefix + str(num),
+ roles=roles, new_host=new_host))
+ return hosts
+
+ def test_generate_inventory_new_nodes(self):
+ hosts = self.generate_hosts(1, 'master', roles=(['master', 'etcd']))
+ hosts.extend(self.generate_hosts(1, 'node', roles=['node']))
+ hosts.extend(self.generate_hosts(1, 'new_node', roles=['node'], new_host=True))
+ openshift_ansible.generate_inventory(hosts)
+ inventory = configparser.ConfigParser(allow_no_value=True)
+ inventory.read(self.inventory)
+ self.assertTrue(inventory.has_section('new_nodes'))
+ self.assertTrue(inventory.has_option('new_nodes', 'new_node1'))
+
+ def test_write_inventory_vars_role_vars(self):
+ with open(self.inventory, 'w') as inv:
+ openshift_ansible.CFG.deployment.roles['master'].variables={'color': 'blue'}
+ openshift_ansible.CFG.deployment.roles['node'].variables={'color': 'green'}
+ openshift_ansible.write_inventory_vars(inv, None)
+
+ inventory = configparser.ConfigParser(allow_no_value=True)
+ inventory.read(self.inventory)
+ self.assertTrue(inventory.has_section('masters:vars'))
+ self.assertEquals('blue', inventory.get('masters:vars', 'color'))
+ self.assertTrue(inventory.has_section('nodes:vars'))
+ self.assertEquals('green', inventory.get('nodes:vars', 'color'))
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index b18f85692..cbce64f7e 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -38,7 +38,6 @@ class TestUtils(unittest.TestCase):
with mock.patch('ooinstall.utils.installer_log') as _il:
debug_env(self.debug_all_params)
- print(_il.debug.call_args_list)
# Debug was called for each item we expect
self.assertEqual(
diff --git a/utils/tox.ini b/utils/tox.ini
index 747d79dfe..1308f7505 100644
--- a/utils/tox.ini
+++ b/utils/tox.ini
@@ -1,7 +1,7 @@
[tox]
minversion=2.3.1
envlist =
- py{27,35}-{flake8,unit}
+ py{27,35}-{flake8,unit,pylint}
skipsdist=True
skip_missing_interpreters=True
@@ -10,8 +10,7 @@ usedevelop=True
deps =
-rtest-requirements.txt
py35-flake8: flake8-bugbear
-
commands =
- flake8: flake8 --config=setup.cfg ../ --exclude="../utils,.tox,../inventory"
flake8: python setup.py flake8
unit: python setup.py nosetests
+ pylint: python setup.py lint