diff options
31 files changed, 7950 insertions, 22 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index 9e7b18195..3343cc789 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.6.5-1 ./ +3.6.8-1 ./ diff --git a/Dockerfile b/Dockerfile index c6593491d..eecf3630b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,15 @@ LABEL name="openshift-ansible" \        io.openshift.expose-services="" \        io.openshift.tags="openshift,install,upgrade,ansible" +USER root + +RUN INSTALL_PKGS="skopeo" && \ +    yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ +    rpm -V $INSTALL_PKGS && \ +    yum clean all + +USER ${USER_UID} +  # The playbook to be run is specified via the PLAYBOOK_FILE env var.  # This sets a default of openshift_facts.yml as it's an informative playbook  # that can help test that everything is set properly (inventory, sshkeys) diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 221740ff3..a80f72c07 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -9,7 +9,7 @@  %global __requires_exclude ^/usr/bin/ansible-playbook$  Name:           openshift-ansible -Version:        3.6.5 +Version:        3.6.8  Release:        1%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0 @@ -21,7 +21,7 @@ Requires:      ansible >= 2.2.0.0-1  Requires:      python2  Requires:      python-six  Requires:      tar -Requires:      openshift-ansible-docs = %{version}-%{release} +Requires:      openshift-ansible-docs = %{version}  Requires:      java-1.8.0-openjdk-headless  Requires:      httpd-tools  Requires:      libselinux-python @@ -250,7 +250,7 @@ BuildArch:     noarch  %package -n atomic-openshift-utils  Summary:       Atomic OpenShift Utilities  BuildRequires: python-setuptools -Requires:      %{name}-playbooks >= %{version} +Requires:      %{name}-playbooks = %{version}  Requires:      python-click  Requires:      python-setuptools  Requires:      PyYAML @@ -270,6 +270,39 @@ Atomic OpenShift Utilities includes  %changelog +* Fri Mar 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.8-1 +- vendor patched upstream docker_container module. (jvallejo@redhat.com) +- add docker_image_availability check (jvallejo@redhat.com) +- Do not use auto_expand_replicas (lukas.vlcek@gmail.com) +- Adding tests to increase TC. (kwoodson@redhat.com) +- Adding a pvc create test case. (kwoodson@redhat.com) +- Cherry picking from #3711 (ewolinet@redhat.com) + +* Thu Mar 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.7-1 +- openshift_logging calculate min_masters to fail early on split brain +  (jcantril@redhat.com) +- Fixed linting and configmap_name param (kwoodson@redhat.com) +- Adding configmap support. (kwoodson@redhat.com) +- Make /rootfs mount rslave (sdodson@redhat.com) +- Update imageConfig.format on upgrades to match oreg_url (sdodson@redhat.com) +- Adding configmap support and adding tests. (kwoodson@redhat.com) +- Adding oc_volume to lib_openshift. (kwoodson@redhat.com) +- upgrade: restart ovs-vswitchd and ovsdb-server (gscrivan@redhat.com) +- Make atomic-openshift-utils require playbooks of the same version +  (sdodson@redhat.com) + +* Wed Mar 22 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.6-1 +- Fix copy-pasta docstrings (rhcarvalho@gmail.com) +- Rename _ns -> node_selector (rhcarvalho@gmail.com) +- Reindent code (rhcarvalho@gmail.com) +- Update the failure methods and add required variables/functions +  (tbielawa@redhat.com) +- Import the default ansible output callback on_failed methods +  (tbielawa@redhat.com) +- Switched Cassandra to use certificates generated by OpenShift +  (juraci@kroehling.de) +- Allow user to specify additions to ES config (jcantril@redhat.com) +  * Tue Mar 21 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.5-1  - Attempt to match version of excluders to target version (sdodson@redhat.com)  - Get rid of adjust.yml (sdodson@redhat.com) diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index e16a1f6d0..c6e799261 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -64,6 +64,7 @@      static: yes    roles:    - openshift_facts +  - lib_utils    post_tasks:    # Run the pre-upgrade hook if defined: @@ -113,6 +114,13 @@        state: link      when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists +  - name: Update oreg value +    yedit: +      src: "{{ openshift.common.config_base }}/master/master-config.yaml" +      key: 'imageConfig.format' +      value: "{{ oreg_url }}" +    when: oreg_url is defined +    # Run the upgrade hook prior to restarting services/system if defined:    - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"      when: openshift_master_upgrade_hook is defined diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index 93cf34559..3a892971b 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -2061,7 +2061,7 @@ class Service(Yedit):  # -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-  class Volume(object): -    ''' Class to model an openshift volume object''' +    ''' Class to represent an openshift volume object'''      volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",                            "dc":  "spec.template.spec.containers[0].volumeMounts",                            "rc":  "spec.template.spec.containers[0].volumeMounts", @@ -2093,6 +2093,11 @@ class Volume(object):          elif volume_type == 'hostpath':              volume['hostPath'] = {}              volume['hostPath']['path'] = volume_info['path'] +        elif volume_type == 'configmap': +            volume['configMap'] = {} +            volume['configMap']['name'] = volume_info['configmap_name'] +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']}          return (volume, volume_mount) diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py new file mode 100644 index 000000000..df0b0d86a --- /dev/null +++ b/roles/lib_openshift/library/oc_pvc.py @@ -0,0 +1,1733 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/pvc -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_pvc +short_description: Modify, and idempotently manage openshift persistent volume claims +description: +  - Modify openshift persistent volume claims programmatically. +options: +  state: +    description: +    - Supported states, present, absent, list +    - present - will ensure object is created or updated to the value specified +    - list - will return a pvc +    - absent - will remove a pvc +    required: False +    default: present +    choices: ["present", 'absent', 'list'] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the object lives. +    required: false +    default: str +    aliases: [] +  volume_capacity: +    description: +    - The requested volume capacity +    required: False +    default: 1G +    aliases: [] +  access_modes: +    description: +    - The access modes allowed for the pvc +    - Expects a list +    required: False +    default: ReadWriteOnce +    choices: +    - ReadWriteOnce +    - ReadOnlyMany +    - ReadWriteMany +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create a pvc +  oc_pvc: +    namespace: awesomeapp +    name: dbstorage +    access_modes: +    - ReadWriteOnce +    volume_capacity: 5G +  register: pvcout +''' + +# -*- -*- -*- End included fragment: doc/pvc -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    raise YeditException("Unexpected item type found while going through key " + +                                         "path: {} (at key: {})".format(key, dict_key)) + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                raise YeditException("Unexpected item type found while going through key path: {}".format(key)) + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        # didn't add/update to an existing list, nor add/update key to a dict +        # so we must have been provided some syntax like a.b.c[<int>] = "data" for a +        # non-existent array +        else: +            raise YeditException("Error adding to object at path: {}".format(key)) + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        cmds.extend(cmd) + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as verr: +                        if "No JSON object could be decoded" in verr.args: +                            err = verr.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(user_def[key]) +                            print(value) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(user_values) +                        print(api_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key] +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/pvc.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class PersistentVolumeClaimConfig(object): +    ''' Handle pvc options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 kubeconfig, +                 access_modes=None, +                 vol_capacity='1G'): +        ''' constructor for handling pvc options ''' +        self.kubeconfig = kubeconfig +        self.name = sname +        self.namespace = namespace +        self.access_modes = access_modes +        self.vol_capacity = vol_capacity +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' return a service as a dict ''' +        # version +        self.data['apiVersion'] = 'v1' +        # kind +        self.data['kind'] = 'PersistentVolumeClaim' +        # metadata +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        # spec +        self.data['spec'] = {} +        self.data['spec']['accessModes'] = ['ReadWriteOnce'] +        if self.access_modes: +            self.data['spec']['accessModes'] = self.access_modes + +        # storage capacity +        self.data['spec']['resources'] = {} +        self.data['spec']['resources']['requests'] = {} +        self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class PersistentVolumeClaim(Yedit): +    ''' Class to wrap the oc command line tools ''' +    access_modes_path = "spec.accessModes" +    volume_capacity_path = "spec.requests.storage" +    volume_name_path = "spec.volumeName" +    bound_path = "status.phase" +    kind = 'PersistentVolumeClaim' + +    def __init__(self, content): +        '''RoleBinding constructor''' +        super(PersistentVolumeClaim, self).__init__(content=content) +        self._access_modes = None +        self._volume_capacity = None +        self._volume_name = None + +    @property +    def volume_name(self): +        ''' volume_name property ''' +        if self._volume_name is None: +            self._volume_name = self.get_volume_name() +        return self._volume_name + +    @volume_name.setter +    def volume_name(self, data): +        ''' volume_name property setter''' +        self._volume_name = data + +    @property +    def access_modes(self): +        ''' access_modes property ''' +        if self._access_modes is None: +            self._access_modes = self.get_access_modes() +            if not isinstance(self._access_modes, list): +                self._access_modes = list(self._access_modes) + +        return self._access_modes + +    @access_modes.setter +    def access_modes(self, data): +        ''' access_modes property setter''' +        if not isinstance(data, list): +            data = list(data) + +        self._access_modes = data + +    @property +    def volume_capacity(self): +        ''' volume_capacity property ''' +        if self._volume_capacity is None: +            self._volume_capacity = self.get_volume_capacity() +        return self._volume_capacity + +    @volume_capacity.setter +    def volume_capacity(self, data): +        ''' volume_capacity property setter''' +        self._volume_capacity = data + +    def get_access_modes(self): +        '''get access_modes''' +        return self.get(PersistentVolumeClaim.access_modes_path) or [] + +    def get_volume_capacity(self): +        '''get volume_capacity''' +        return self.get(PersistentVolumeClaim.volume_capacity_path) or [] + +    def get_volume_name(self): +        '''get volume_name''' +        return self.get(PersistentVolumeClaim.volume_name_path) or [] + +    def is_bound(self): +        '''return whether volume is bound''' +        return self.get(PersistentVolumeClaim.bound_path) or [] + +    #### ADD ##### +    def add_access_mode(self, inc_mode): +        ''' add an access_mode''' +        if self.access_modes: +            self.access_modes.append(inc_mode) +        else: +            self.put(PersistentVolumeClaim.access_modes_path, [inc_mode]) + +        return True + +    #### /ADD ##### + +    #### Remove ##### +    def remove_access_mode(self, inc_mode): +        ''' remove an access_mode''' +        try: +            self.access_modes.remove(inc_mode) +        except ValueError as _: +            return False + +        return True + +    #### /REMOVE ##### + +    #### UPDATE ##### +    def update_access_mode(self, inc_mode): +        ''' update an access_mode''' +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return self.add_access_mode(inc_mode) + +        self.access_modes[index] = inc_mode + +        return True + +    #### /UPDATE ##### + +    #### FIND #### +    def find_access_mode(self, inc_mode): +        ''' find a user ''' +        index = None +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return index + +        return index + +# -*- -*- -*- End included fragment: lib/pvc.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_pvc.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class OCPVC(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    kind = 'pvc' + +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 config, +                 verbose=False): +        ''' Constructor for OCVolume ''' +        super(OCPVC, self).__init__(config.namespace, config.kubeconfig) +        self.config = config +        self.namespace = config.namespace +        self._pvc = None + +    @property +    def pvc(self): +        ''' property function pvc''' +        if not self._pvc: +            self.get() +        return self._pvc + +    @pvc.setter +    def pvc(self, data): +        ''' setter function for yedit var ''' +        self._pvc = data + +    def bound(self): +        '''return whether the pvc is bound''' +        if self.pvc.get_volume_name(): +            return True + +        return False + +    def exists(self): +        ''' return whether a pvc exists ''' +        if self.pvc: +            return True + +        return False + +    def get(self): +        '''return pvc information ''' +        result = self._get(self.kind, self.config.name) +        if result['returncode'] == 0: +            self.pvc = PersistentVolumeClaim(content=result['results'][0]) +        elif '\"%s\" not found' % self.config.name in result['stderr']: +            result['returncode'] = 0 +            result['results'] = [{}] + +        return result + +    def delete(self): +        '''delete the object''' +        return self._delete(self.kind, self.config.name) + +    def create(self): +        '''create the object''' +        return self._create_from_content(self.config.name, self.config.data) + +    def update(self): +        '''update the object''' +        # need to update the tls information and the service name +        return self._replace_content(self.kind, self.config.name, self.config.data) + +    def needs_update(self): +        ''' verify an update is needed ''' +        if self.pvc.get_volume_name() or self.pvc.is_bound(): +            return False + +        skip = [] +        return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True) + +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run the idempotent ansible code''' +        pconfig = PersistentVolumeClaimConfig(params['name'], +                                              params['namespace'], +                                              params['kubeconfig'], +                                              params['access_modes'], +                                              params['volume_capacity'], +                                             ) +        oc_pvc = OCPVC(pconfig, verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_pvc.get() +        if api_rval['returncode'] != 0: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_pvc.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +                api_rval = oc_pvc.delete() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_pvc.exists(): + +                if check_mode: +                    return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} + +                # Create it here +                api_rval = oc_pvc.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name(): +                api_rval['msg'] = '##### - This volume is currently bound.  Will not update - ####' +                return {'changed': False, 'results': api_rval, 'state': state} + +            if oc_pvc.needs_update(): +                api_rval = oc_pvc.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} + +# -*- -*- -*- End included fragment: class/oc_pvc.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_pvc.py -*- -*- -*- + +#pylint: disable=too-many-branches +def main(): +    ''' +    ansible oc module for pvc +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            name=dict(default=None, required=True, type='str'), +            namespace=dict(default=None, required=True, type='str'), +            volume_capacity=dict(default='1G', type='str'), +            access_modes=dict(default='ReadWriteOnce', +                              choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], +                              type='str'), +        ), +        supports_check_mode=True, +    ) + +    rval = OCPVC.run_ansible(module.params, module.check_mode) + +    if 'failed' in rval: +        module.fail_json(**rval) + +    return module.exit_json(**rval) + + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_pvc.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py new file mode 100644 index 000000000..e9e29468a --- /dev/null +++ b/roles/lib_openshift/library/oc_volume.py @@ -0,0 +1,2024 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/volume -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_volume +short_description: Create, modify, and idempotently manage openshift volumes. +description: +  - Modify openshift volumes programmatically. +options: +  state: +    description: +    - State controls the action that will be taken with resource +    - 'present' will create or update and object to the desired state +    - 'absent' will ensure volumes are removed +    - 'list' will read the volumes +    default: present +    choices: ["present", "absent", "list"] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  namespace: +    description: +    - The name of the namespace where the object lives +    required: false +    default: default +    aliases: [] +  kind: +    description: +    - The kind of object that can be managed. +    default: dc +    choices: +    - dc +    - rc +    - pods +    aliases: [] +  mount_type: +    description: +    - The type of volume to be used +    required: false +    default: None +    choices: +    - emptydir +    - hostpath +    - secret +    - pvc +    - configmap +    aliases: [] +  mount_path: +    description: +    - The path to where the mount will be attached +    required: false +    default: None +    aliases: [] +  secret_name: +    description: +    - The name of the secret. Used when mount_type is secret. +    required: false +    default: None +    aliases: [] +  claim_size: +    description: +    - The size in GB of the pv claim. e.g. 100G +    required: false +    default: None +    aliases: [] +  claim_name: +    description: +    - The name of the pv claim +    required: false +    default: None +    aliases: [] +  configmap_name: +    description: +    - The name of the configmap +    required: false +    default: None +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: attach storage volumes to deploymentconfig +  oc_volume: +    namespace: logging +    kind: dc +    name: name_of_the_dc +    mount_type: pvc +    claim_name: loggingclaim +    claim_size: 100G +    vol_name: logging-storage +  run_once: true +''' + +# -*- -*- -*- End included fragment: doc/volume -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    raise YeditException("Unexpected item type found while going through key " + +                                         "path: {} (at key: {})".format(key, dict_key)) + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                raise YeditException("Unexpected item type found while going through key path: {}".format(key)) + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        # didn't add/update to an existing list, nor add/update key to a dict +        # so we must have been provided some syntax like a.b.c[<int>] = "data" for a +        # non-existent array +        else: +            raise YeditException("Error adding to object at path: {}".format(key)) + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        cmds.extend(cmd) + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as verr: +                        if "No JSON object could be decoded" in verr.args: +                            err = verr.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(user_def[key]) +                            print(value) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(user_values) +                        print(api_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key] +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*- + + +# pylint: disable=too-many-public-methods +class DeploymentConfig(Yedit): +    ''' Class to model an openshift DeploymentConfig''' +    default_deployment_config = ''' +apiVersion: v1 +kind: DeploymentConfig +metadata: +  name: default_dc +  namespace: default +spec: +  replicas: 0 +  selector: +    default_dc: default_dc +  strategy: +    resources: {} +    rollingParams: +      intervalSeconds: 1 +      maxSurge: 0 +      maxUnavailable: 25% +      timeoutSeconds: 600 +      updatePercent: -25 +      updatePeriodSeconds: 1 +    type: Rolling +  template: +    metadata: +    spec: +      containers: +      - env: +        - name: default +          value: default +        image: default +        imagePullPolicy: IfNotPresent +        name: default_dc +        ports: +        - containerPort: 8000 +          hostPort: 8000 +          protocol: TCP +          name: default_port +        resources: {} +        terminationMessagePath: /dev/termination-log +      dnsPolicy: ClusterFirst +      hostNetwork: true +      nodeSelector: +        type: compute +      restartPolicy: Always +      securityContext: {} +      serviceAccount: default +      serviceAccountName: default +      terminationGracePeriodSeconds: 30 +  triggers: +  - type: ConfigChange +''' + +    replicas_path = "spec.replicas" +    env_path = "spec.template.spec.containers[0].env" +    volumes_path = "spec.template.spec.volumes" +    container_path = "spec.template.spec.containers" +    volume_mounts_path = "spec.template.spec.containers[0].volumeMounts" + +    def __init__(self, content=None): +        ''' Constructor for deploymentconfig ''' +        if not content: +            content = DeploymentConfig.default_deployment_config + +        super(DeploymentConfig, self).__init__(content=content) + +    def add_env_value(self, key, value): +        ''' add key, value pair to env array ''' +        rval = False +        env = self.get_env_vars() +        if env: +            env.append({'name': key, 'value': value}) +            rval = True +        else: +            result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value}) +            rval = result[0] + +        return rval + +    def exists_env_value(self, key, value): +        ''' return whether a key, value  pair exists ''' +        results = self.get_env_vars() +        if not results: +            return False + +        for result in results: +            if result['name'] == key and result['value'] == value: +                return True + +        return False + +    def exists_env_key(self, key): +        ''' return whether a key, value  pair exists ''' +        results = self.get_env_vars() +        if not results: +            return False + +        for result in results: +            if result['name'] == key: +                return True + +        return False + +    def get_env_var(self, key): +        '''return a environment variables ''' +        results = self.get(DeploymentConfig.env_path) or [] +        if not results: +            return None + +        for env_var in results: +            if env_var['name'] == key: +                return env_var + +        return None + +    def get_env_vars(self): +        '''return a environment variables ''' +        return self.get(DeploymentConfig.env_path) or [] + +    def delete_env_var(self, keys): +        '''delete a list of keys ''' +        if not isinstance(keys, list): +            keys = [keys] + +        env_vars_array = self.get_env_vars() +        modified = False +        idx = None +        for key in keys: +            for env_idx, env_var in enumerate(env_vars_array): +                if env_var['name'] == key: +                    idx = env_idx +                    break + +            if idx: +                modified = True +                del env_vars_array[idx] + +        if modified: +            return True + +        return False + +    def update_env_var(self, key, value): +        '''place an env in the env var list''' + +        env_vars_array = self.get_env_vars() +        idx = None +        for env_idx, env_var in enumerate(env_vars_array): +            if env_var['name'] == key: +                idx = env_idx +                break + +        if idx: +            env_vars_array[idx]['value'] = value +        else: +            self.add_env_value(key, value) + +        return True + +    def exists_volume_mount(self, volume_mount): +        ''' return whether a volume mount exists ''' +        exist_volume_mounts = self.get_volume_mounts() + +        if not exist_volume_mounts: +            return False + +        volume_mount_found = False +        for exist_volume_mount in exist_volume_mounts: +            if exist_volume_mount['name'] == volume_mount['name']: +                volume_mount_found = True +                break + +        return volume_mount_found + +    def exists_volume(self, volume): +        ''' return whether a volume exists ''' +        exist_volumes = self.get_volumes() + +        volume_found = False +        for exist_volume in exist_volumes: +            if exist_volume['name'] == volume['name']: +                volume_found = True +                break + +        return volume_found + +    def find_volume_by_name(self, volume, mounts=False): +        ''' return the index of a volume ''' +        volumes = [] +        if mounts: +            volumes = self.get_volume_mounts() +        else: +            volumes = self.get_volumes() +        for exist_volume in volumes: +            if exist_volume['name'] == volume['name']: +                return exist_volume + +        return None + +    def get_replicas(self): +        ''' return replicas setting ''' +        return self.get(DeploymentConfig.replicas_path) + +    def get_volume_mounts(self): +        '''return volume mount information ''' +        return self.get_volumes(mounts=True) + +    def get_volumes(self, mounts=False): +        '''return volume mount information ''' +        if mounts: +            return self.get(DeploymentConfig.volume_mounts_path) or [] + +        return self.get(DeploymentConfig.volumes_path) or [] + +    def delete_volume_by_name(self, volume): +        '''delete a volume ''' +        modified = False +        exist_volume_mounts = self.get_volume_mounts() +        exist_volumes = self.get_volumes() +        del_idx = None +        for idx, exist_volume in enumerate(exist_volumes): +            if 'name' in exist_volume and exist_volume['name'] == volume['name']: +                del_idx = idx +                break + +        if del_idx != None: +            del exist_volumes[del_idx] +            modified = True + +        del_idx = None +        for idx, exist_volume_mount in enumerate(exist_volume_mounts): +            if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']: +                del_idx = idx +                break + +        if del_idx != None: +            del exist_volume_mounts[idx] +            modified = True + +        return modified + +    def add_volume_mount(self, volume_mount): +        ''' add a volume or volume mount to the proper location ''' +        exist_volume_mounts = self.get_volume_mounts() + +        if not exist_volume_mounts and volume_mount: +            self.put(DeploymentConfig.volume_mounts_path, [volume_mount]) +        else: +            exist_volume_mounts.append(volume_mount) + +    def add_volume(self, volume): +        ''' add a volume or volume mount to the proper location ''' +        exist_volumes = self.get_volumes() +        if not volume: +            return + +        if not exist_volumes: +            self.put(DeploymentConfig.volumes_path, [volume]) +        else: +            exist_volumes.append(volume) + +    def update_replicas(self, replicas): +        ''' update replicas value ''' +        self.put(DeploymentConfig.replicas_path, replicas) + +    def update_volume(self, volume): +        '''place an env in the env var list''' +        exist_volumes = self.get_volumes() + +        if not volume: +            return False + +        # update the volume +        update_idx = None +        for idx, exist_vol in enumerate(exist_volumes): +            if exist_vol['name'] == volume['name']: +                update_idx = idx +                break + +        if update_idx != None: +            exist_volumes[update_idx] = volume +        else: +            self.add_volume(volume) + +        return True + +    def update_volume_mount(self, volume_mount): +        '''place an env in the env var list''' +        modified = False + +        exist_volume_mounts = self.get_volume_mounts() + +        if not volume_mount: +            return False + +        # update the volume mount +        for exist_vol_mount in exist_volume_mounts: +            if exist_vol_mount['name'] == volume_mount['name']: +                if 'mountPath' in exist_vol_mount and \ +                   str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']): +                    exist_vol_mount['mountPath'] = volume_mount['mountPath'] +                    modified = True +                break + +        if not modified: +            self.add_volume_mount(volume_mount) +            modified = True + +        return modified + +    def needs_update_volume(self, volume, volume_mount): +        ''' verify a volume update is needed ''' +        exist_volume = self.find_volume_by_name(volume) +        exist_volume_mount = self.find_volume_by_name(volume, mounts=True) +        results = [] +        results.append(exist_volume['name'] == volume['name']) + +        if 'secret' in volume: +            results.append('secret' in exist_volume) +            results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName']) +            results.append(exist_volume_mount['name'] == volume_mount['name']) +            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + +        elif 'emptyDir' in volume: +            results.append(exist_volume_mount['name'] == volume['name']) +            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + +        elif 'persistentVolumeClaim' in volume: +            pvc = 'persistentVolumeClaim' +            results.append(pvc in exist_volume) +            if results[-1]: +                results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName']) + +                if 'claimSize' in volume[pvc]: +                    results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize']) + +        elif 'hostpath' in volume: +            results.append('hostPath' in exist_volume) +            results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath']) + +        return not all(results) + +    def needs_update_replicas(self, replicas): +        ''' verify whether a replica update is needed ''' +        current_reps = self.get(DeploymentConfig.replicas_path) +        return not current_reps == replicas + +# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*- + +class Volume(object): +    ''' Class to represent an openshift volume object''' +    volume_mounts_path = {"pod": "spec.containers[0].volumeMounts", +                          "dc":  "spec.template.spec.containers[0].volumeMounts", +                          "rc":  "spec.template.spec.containers[0].volumeMounts", +                         } +    volumes_path = {"pod": "spec.volumes", +                    "dc":  "spec.template.spec.volumes", +                    "rc":  "spec.template.spec.volumes", +                   } + +    @staticmethod +    def create_volume_structure(volume_info): +        ''' return a properly structured volume ''' +        volume_mount = None +        volume = {'name': volume_info['name']} +        volume_type = volume_info['type'].lower() +        if volume_type == 'secret': +            volume['secret'] = {} +            volume[volume_info['type']] = {'secretName': volume_info['secret_name']} +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']} +        elif volume_type == 'emptydir': +            volume['emptyDir'] = {} +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']} +        elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim': +            volume['persistentVolumeClaim'] = {} +            volume['persistentVolumeClaim']['claimName'] = volume_info['claimName'] +            volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize'] +        elif volume_type == 'hostpath': +            volume['hostPath'] = {} +            volume['hostPath']['path'] = volume_info['path'] +        elif volume_type == 'configmap': +            volume['configMap'] = {} +            volume['configMap']['name'] = volume_info['configmap_name'] +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']} + +        return (volume, volume_mount) + +# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_volume.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class OCVolume(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    volume_mounts_path = {"pod": "spec.containers[0].volumeMounts", +                          "dc":  "spec.template.spec.containers[0].volumeMounts", +                          "rc":  "spec.template.spec.containers[0].volumeMounts", +                         } +    volumes_path = {"pod": "spec.volumes", +                    "dc":  "spec.template.spec.volumes", +                    "rc":  "spec.template.spec.volumes", +                   } + +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 kind, +                 resource_name, +                 namespace, +                 vol_name, +                 mount_path, +                 mount_type, +                 secret_name, +                 claim_size, +                 claim_name, +                 configmap_name, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OCVolume ''' +        super(OCVolume, self).__init__(namespace, kubeconfig) +        self.kind = kind +        self.volume_info = {'name': vol_name, +                            'secret_name': secret_name, +                            'path': mount_path, +                            'type': mount_type, +                            'claimSize': claim_size, +                            'claimName': claim_name, +                            'configmap_name': configmap_name} +        self.volume, self.volume_mount = Volume.create_volume_structure(self.volume_info) +        self.name = resource_name +        self.namespace = namespace +        self.kubeconfig = kubeconfig +        self.verbose = verbose +        self._resource = None + +    @property +    def resource(self): +        ''' property function for resource var ''' +        if not self._resource: +            self.get() +        return self._resource + +    @resource.setter +    def resource(self, data): +        ''' setter function for resource var ''' +        self._resource = data + +    def exists(self): +        ''' return whether a volume exists ''' +        volume_mount_found = False +        volume_found = self.resource.exists_volume(self.volume) +        if not self.volume_mount and volume_found: +            return True + +        if self.volume_mount: +            volume_mount_found = self.resource.exists_volume_mount(self.volume_mount) + +        if volume_found and self.volume_mount and volume_mount_found: +            return True + +        return False + +    def get(self): +        '''return volume information ''' +        vol = self._get(self.kind, self.name) +        if vol['returncode'] == 0: +            if self.kind == 'dc': +                self.resource = DeploymentConfig(content=vol['results'][0]) +                vol['results'] = self.resource.get_volumes() + +        return vol + +    def delete(self): +        '''remove a volume''' +        self.resource.delete_volume_by_name(self.volume) +        return self._replace_content(self.kind, self.name, self.resource.yaml_dict) + +    def put(self): +        '''place volume into dc ''' +        self.resource.update_volume(self.volume) +        self.resource.get_volumes() +        self.resource.update_volume_mount(self.volume_mount) +        return self._replace_content(self.kind, self.name, self.resource.yaml_dict) + +    def needs_update(self): +        ''' verify an update is needed ''' +        return self.resource.needs_update_volume(self.volume, self.volume_mount) + +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode=False): +        '''run the idempotent ansible code''' +        oc_volume = OCVolume(params['kind'], +                             params['name'], +                             params['namespace'], +                             params['vol_name'], +                             params['mount_path'], +                             params['mount_type'], +                             # secrets +                             params['secret_name'], +                             # pvc +                             params['claim_size'], +                             params['claim_name'], +                             # configmap +                             params['configmap_name'], +                             kubeconfig=params['kubeconfig'], +                             verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_volume.get() + +        if api_rval['returncode'] != 0: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_volume.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +                api_rval = oc_volume.delete() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_volume.exists(): + +                if check_mode: +                    exit_json(changed=False, msg='Would have performed a create.') + +                # Create it here +                api_rval = oc_volume.put() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_volume.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_volume.needs_update(): +                api_rval = oc_volume.put() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_volume.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, state: state} + +            return {'changed': False, 'results': api_rval, state: state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} + +# -*- -*- -*- End included fragment: class/oc_volume.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_volume.py -*- -*- -*- + +def main(): +    ''' +    ansible oc module for volumes +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            kind=dict(default='dc', choices=['dc', 'rc', 'pods'], type='str'), +            namespace=dict(default='default', type='str'), +            vol_name=dict(default=None, type='str'), +            name=dict(default=None, type='str'), +            mount_type=dict(default=None, +                            choices=['emptydir', 'hostpath', 'secret', 'pvc', 'configmap'], +                            type='str'), +            mount_path=dict(default=None, type='str'), +            # secrets require a name +            secret_name=dict(default=None, type='str'), +            # pvc requires a size +            claim_size=dict(default=None, type='str'), +            claim_name=dict(default=None, type='str'), +            # configmap requires a name +            configmap_name=dict(default=None, type='str'), +        ), +        supports_check_mode=True, +    ) +    rval = OCVolume.run_ansible(module.params, module.check_mode) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_volume.py -*- -*- -*- diff --git a/roles/lib_openshift/src/ansible/oc_pvc.py b/roles/lib_openshift/src/ansible/oc_pvc.py new file mode 100644 index 000000000..a5181e281 --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_pvc.py @@ -0,0 +1,35 @@ +# pylint: skip-file +# flake8: noqa + +#pylint: disable=too-many-branches +def main(): +    ''' +    ansible oc module for pvc +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            name=dict(default=None, required=True, type='str'), +            namespace=dict(default=None, required=True, type='str'), +            volume_capacity=dict(default='1G', type='str'), +            access_modes=dict(default='ReadWriteOnce', +                              choices=['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany'], +                              type='str'), +        ), +        supports_check_mode=True, +    ) + +    rval = OCPVC.run_ansible(module.params, module.check_mode) + +    if 'failed' in rval: +        module.fail_json(**rval) + +    return module.exit_json(**rval) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/ansible/oc_volume.py b/roles/lib_openshift/src/ansible/oc_volume.py new file mode 100644 index 000000000..660376d2f --- /dev/null +++ b/roles/lib_openshift/src/ansible/oc_volume.py @@ -0,0 +1,41 @@ +# pylint: skip-file +# flake8: noqa + +def main(): +    ''' +    ansible oc module for volumes +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            state=dict(default='present', type='str', +                       choices=['present', 'absent', 'list']), +            debug=dict(default=False, type='bool'), +            kind=dict(default='dc', choices=['dc', 'rc', 'pods'], type='str'), +            namespace=dict(default='default', type='str'), +            vol_name=dict(default=None, type='str'), +            name=dict(default=None, type='str'), +            mount_type=dict(default=None, +                            choices=['emptydir', 'hostpath', 'secret', 'pvc', 'configmap'], +                            type='str'), +            mount_path=dict(default=None, type='str'), +            # secrets require a name +            secret_name=dict(default=None, type='str'), +            # pvc requires a size +            claim_size=dict(default=None, type='str'), +            claim_name=dict(default=None, type='str'), +            # configmap requires a name +            configmap_name=dict(default=None, type='str'), +        ), +        supports_check_mode=True, +    ) +    rval = OCVolume.run_ansible(module.params, module.check_mode) +    if 'failed' in rval: +        module.fail_json(**rval) + +    module.exit_json(**rval) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_openshift/src/class/oc_pvc.py b/roles/lib_openshift/src/class/oc_pvc.py new file mode 100644 index 000000000..c73abc47c --- /dev/null +++ b/roles/lib_openshift/src/class/oc_pvc.py @@ -0,0 +1,167 @@ +# pylint: skip-file +# flake8: noqa + + +# pylint: disable=too-many-instance-attributes +class OCPVC(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    kind = 'pvc' + +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 config, +                 verbose=False): +        ''' Constructor for OCVolume ''' +        super(OCPVC, self).__init__(config.namespace, config.kubeconfig) +        self.config = config +        self.namespace = config.namespace +        self._pvc = None + +    @property +    def pvc(self): +        ''' property function pvc''' +        if not self._pvc: +            self.get() +        return self._pvc + +    @pvc.setter +    def pvc(self, data): +        ''' setter function for yedit var ''' +        self._pvc = data + +    def bound(self): +        '''return whether the pvc is bound''' +        if self.pvc.get_volume_name(): +            return True + +        return False + +    def exists(self): +        ''' return whether a pvc exists ''' +        if self.pvc: +            return True + +        return False + +    def get(self): +        '''return pvc information ''' +        result = self._get(self.kind, self.config.name) +        if result['returncode'] == 0: +            self.pvc = PersistentVolumeClaim(content=result['results'][0]) +        elif '\"%s\" not found' % self.config.name in result['stderr']: +            result['returncode'] = 0 +            result['results'] = [{}] + +        return result + +    def delete(self): +        '''delete the object''' +        return self._delete(self.kind, self.config.name) + +    def create(self): +        '''create the object''' +        return self._create_from_content(self.config.name, self.config.data) + +    def update(self): +        '''update the object''' +        # need to update the tls information and the service name +        return self._replace_content(self.kind, self.config.name, self.config.data) + +    def needs_update(self): +        ''' verify an update is needed ''' +        if self.pvc.get_volume_name() or self.pvc.is_bound(): +            return False + +        skip = [] +        return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True) + +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run the idempotent ansible code''' +        pconfig = PersistentVolumeClaimConfig(params['name'], +                                              params['namespace'], +                                              params['kubeconfig'], +                                              params['access_modes'], +                                              params['volume_capacity'], +                                             ) +        oc_pvc = OCPVC(pconfig, verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_pvc.get() +        if api_rval['returncode'] != 0: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_pvc.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +                api_rval = oc_pvc.delete() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_pvc.exists(): + +                if check_mode: +                    return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} + +                # Create it here +                api_rval = oc_pvc.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name(): +                api_rval['msg'] = '##### - This volume is currently bound.  Will not update - ####' +                return {'changed': False, 'results': api_rval, 'state': state} + +            if oc_pvc.needs_update(): +                api_rval = oc_pvc.update() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_pvc.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} diff --git a/roles/lib_openshift/src/class/oc_volume.py b/roles/lib_openshift/src/class/oc_volume.py new file mode 100644 index 000000000..5211a1afd --- /dev/null +++ b/roles/lib_openshift/src/class/oc_volume.py @@ -0,0 +1,195 @@ +# pylint: skip-file +# flake8: noqa + + +# pylint: disable=too-many-instance-attributes +class OCVolume(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    volume_mounts_path = {"pod": "spec.containers[0].volumeMounts", +                          "dc":  "spec.template.spec.containers[0].volumeMounts", +                          "rc":  "spec.template.spec.containers[0].volumeMounts", +                         } +    volumes_path = {"pod": "spec.volumes", +                    "dc":  "spec.template.spec.volumes", +                    "rc":  "spec.template.spec.volumes", +                   } + +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 kind, +                 resource_name, +                 namespace, +                 vol_name, +                 mount_path, +                 mount_type, +                 secret_name, +                 claim_size, +                 claim_name, +                 configmap_name, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False): +        ''' Constructor for OCVolume ''' +        super(OCVolume, self).__init__(namespace, kubeconfig) +        self.kind = kind +        self.volume_info = {'name': vol_name, +                            'secret_name': secret_name, +                            'path': mount_path, +                            'type': mount_type, +                            'claimSize': claim_size, +                            'claimName': claim_name, +                            'configmap_name': configmap_name} +        self.volume, self.volume_mount = Volume.create_volume_structure(self.volume_info) +        self.name = resource_name +        self.namespace = namespace +        self.kubeconfig = kubeconfig +        self.verbose = verbose +        self._resource = None + +    @property +    def resource(self): +        ''' property function for resource var ''' +        if not self._resource: +            self.get() +        return self._resource + +    @resource.setter +    def resource(self, data): +        ''' setter function for resource var ''' +        self._resource = data + +    def exists(self): +        ''' return whether a volume exists ''' +        volume_mount_found = False +        volume_found = self.resource.exists_volume(self.volume) +        if not self.volume_mount and volume_found: +            return True + +        if self.volume_mount: +            volume_mount_found = self.resource.exists_volume_mount(self.volume_mount) + +        if volume_found and self.volume_mount and volume_mount_found: +            return True + +        return False + +    def get(self): +        '''return volume information ''' +        vol = self._get(self.kind, self.name) +        if vol['returncode'] == 0: +            if self.kind == 'dc': +                self.resource = DeploymentConfig(content=vol['results'][0]) +                vol['results'] = self.resource.get_volumes() + +        return vol + +    def delete(self): +        '''remove a volume''' +        self.resource.delete_volume_by_name(self.volume) +        return self._replace_content(self.kind, self.name, self.resource.yaml_dict) + +    def put(self): +        '''place volume into dc ''' +        self.resource.update_volume(self.volume) +        self.resource.get_volumes() +        self.resource.update_volume_mount(self.volume_mount) +        return self._replace_content(self.kind, self.name, self.resource.yaml_dict) + +    def needs_update(self): +        ''' verify an update is needed ''' +        return self.resource.needs_update_volume(self.volume, self.volume_mount) + +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode=False): +        '''run the idempotent ansible code''' +        oc_volume = OCVolume(params['kind'], +                             params['name'], +                             params['namespace'], +                             params['vol_name'], +                             params['mount_path'], +                             params['mount_type'], +                             # secrets +                             params['secret_name'], +                             # pvc +                             params['claim_size'], +                             params['claim_name'], +                             # configmap +                             params['configmap_name'], +                             kubeconfig=params['kubeconfig'], +                             verbose=params['debug']) + +        state = params['state'] + +        api_rval = oc_volume.get() + +        if api_rval['returncode'] != 0: +            return {'failed': True, 'msg': api_rval} + +        ##### +        # Get +        ##### +        if state == 'list': +            return {'changed': False, 'results': api_rval['results'], 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if oc_volume.exists(): + +                if check_mode: +                    return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +                api_rval = oc_volume.delete() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            return {'changed': False, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not oc_volume.exists(): + +                if check_mode: +                    exit_json(changed=False, msg='Would have performed a create.') + +                # Create it here +                api_rval = oc_volume.put() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_volume.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if oc_volume.needs_update(): +                api_rval = oc_volume.put() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                # return the created object +                api_rval = oc_volume.get() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, state: state} + +            return {'changed': False, 'results': api_rval, state: state} + +        return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)} diff --git a/roles/lib_openshift/src/doc/pvc b/roles/lib_openshift/src/doc/pvc new file mode 100644 index 000000000..9240f2a0f --- /dev/null +++ b/roles/lib_openshift/src/doc/pvc @@ -0,0 +1,76 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_pvc +short_description: Modify, and idempotently manage openshift persistent volume claims +description: +  - Modify openshift persistent volume claims programmatically. +options: +  state: +    description: +    - Supported states, present, absent, list +    - present - will ensure object is created or updated to the value specified +    - list - will return a pvc +    - absent - will remove a pvc +    required: False +    default: present +    choices: ["present", 'absent', 'list'] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - Name of the object that is being queried. +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The namespace where the object lives. +    required: false +    default: str +    aliases: [] +  volume_capacity: +    description: +    - The requested volume capacity +    required: False +    default: 1G +    aliases: [] +  access_modes: +    description: +    - The access modes allowed for the pvc +    - Expects a list +    required: False +    default: ReadWriteOnce +    choices: +    - ReadWriteOnce +    - ReadOnlyMany +    - ReadWriteMany +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create a pvc +  oc_pvc: +    namespace: awesomeapp +    name: dbstorage +    access_modes: +    - ReadWriteOnce +    volume_capacity: 5G +  register: pvcout +''' diff --git a/roles/lib_openshift/src/doc/volume b/roles/lib_openshift/src/doc/volume new file mode 100644 index 000000000..1d04afeef --- /dev/null +++ b/roles/lib_openshift/src/doc/volume @@ -0,0 +1,105 @@ +# flake8: noqa +# pylint: skip-file + +DOCUMENTATION = ''' +--- +module: oc_volume +short_description: Create, modify, and idempotently manage openshift volumes. +description: +  - Modify openshift volumes programmatically. +options: +  state: +    description: +    - State controls the action that will be taken with resource +    - 'present' will create or update and object to the desired state +    - 'absent' will ensure volumes are removed +    - 'list' will read the volumes +    default: present +    choices: ["present", "absent", "list"] +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  namespace: +    description: +    - The name of the namespace where the object lives +    required: false +    default: default +    aliases: [] +  kind: +    description: +    - The kind of object that can be managed. +    default: dc +    choices: +    - dc +    - rc +    - pods +    aliases: [] +  mount_type: +    description: +    - The type of volume to be used +    required: false +    default: None +    choices: +    - emptydir +    - hostpath +    - secret +    - pvc +    - configmap +    aliases: [] +  mount_path: +    description: +    - The path to where the mount will be attached +    required: false +    default: None +    aliases: [] +  secret_name: +    description: +    - The name of the secret. Used when mount_type is secret. +    required: false +    default: None +    aliases: [] +  claim_size: +    description: +    - The size in GB of the pv claim. e.g. 100G +    required: false +    default: None +    aliases: [] +  claim_name: +    description: +    - The name of the pv claim +    required: false +    default: None +    aliases: [] +  configmap_name: +    description: +    - The name of the configmap +    required: false +    default: None +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: attach storage volumes to deploymentconfig +  oc_volume: +    namespace: logging +    kind: dc +    name: name_of_the_dc +    mount_type: pvc +    claim_name: loggingclaim +    claim_size: 100G +    vol_name: logging-storage +  run_once: true +''' diff --git a/roles/lib_openshift/src/lib/pvc.py b/roles/lib_openshift/src/lib/pvc.py new file mode 100644 index 000000000..929b50990 --- /dev/null +++ b/roles/lib_openshift/src/lib/pvc.py @@ -0,0 +1,167 @@ +# pylint: skip-file +# flake8: noqa + + +# pylint: disable=too-many-instance-attributes +class PersistentVolumeClaimConfig(object): +    ''' Handle pvc options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 kubeconfig, +                 access_modes=None, +                 vol_capacity='1G'): +        ''' constructor for handling pvc options ''' +        self.kubeconfig = kubeconfig +        self.name = sname +        self.namespace = namespace +        self.access_modes = access_modes +        self.vol_capacity = vol_capacity +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' return a service as a dict ''' +        # version +        self.data['apiVersion'] = 'v1' +        # kind +        self.data['kind'] = 'PersistentVolumeClaim' +        # metadata +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        # spec +        self.data['spec'] = {} +        self.data['spec']['accessModes'] = ['ReadWriteOnce'] +        if self.access_modes: +            self.data['spec']['accessModes'] = self.access_modes + +        # storage capacity +        self.data['spec']['resources'] = {} +        self.data['spec']['resources']['requests'] = {} +        self.data['spec']['resources']['requests']['storage'] = self.vol_capacity + + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class PersistentVolumeClaim(Yedit): +    ''' Class to wrap the oc command line tools ''' +    access_modes_path = "spec.accessModes" +    volume_capacity_path = "spec.requests.storage" +    volume_name_path = "spec.volumeName" +    bound_path = "status.phase" +    kind = 'PersistentVolumeClaim' + +    def __init__(self, content): +        '''RoleBinding constructor''' +        super(PersistentVolumeClaim, self).__init__(content=content) +        self._access_modes = None +        self._volume_capacity = None +        self._volume_name = None + +    @property +    def volume_name(self): +        ''' volume_name property ''' +        if self._volume_name is None: +            self._volume_name = self.get_volume_name() +        return self._volume_name + +    @volume_name.setter +    def volume_name(self, data): +        ''' volume_name property setter''' +        self._volume_name = data + +    @property +    def access_modes(self): +        ''' access_modes property ''' +        if self._access_modes is None: +            self._access_modes = self.get_access_modes() +            if not isinstance(self._access_modes, list): +                self._access_modes = list(self._access_modes) + +        return self._access_modes + +    @access_modes.setter +    def access_modes(self, data): +        ''' access_modes property setter''' +        if not isinstance(data, list): +            data = list(data) + +        self._access_modes = data + +    @property +    def volume_capacity(self): +        ''' volume_capacity property ''' +        if self._volume_capacity is None: +            self._volume_capacity = self.get_volume_capacity() +        return self._volume_capacity + +    @volume_capacity.setter +    def volume_capacity(self, data): +        ''' volume_capacity property setter''' +        self._volume_capacity = data + +    def get_access_modes(self): +        '''get access_modes''' +        return self.get(PersistentVolumeClaim.access_modes_path) or [] + +    def get_volume_capacity(self): +        '''get volume_capacity''' +        return self.get(PersistentVolumeClaim.volume_capacity_path) or [] + +    def get_volume_name(self): +        '''get volume_name''' +        return self.get(PersistentVolumeClaim.volume_name_path) or [] + +    def is_bound(self): +        '''return whether volume is bound''' +        return self.get(PersistentVolumeClaim.bound_path) or [] + +    #### ADD ##### +    def add_access_mode(self, inc_mode): +        ''' add an access_mode''' +        if self.access_modes: +            self.access_modes.append(inc_mode) +        else: +            self.put(PersistentVolumeClaim.access_modes_path, [inc_mode]) + +        return True + +    #### /ADD ##### + +    #### Remove ##### +    def remove_access_mode(self, inc_mode): +        ''' remove an access_mode''' +        try: +            self.access_modes.remove(inc_mode) +        except ValueError as _: +            return False + +        return True + +    #### /REMOVE ##### + +    #### UPDATE ##### +    def update_access_mode(self, inc_mode): +        ''' update an access_mode''' +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return self.add_access_mode(inc_mode) + +        self.access_modes[index] = inc_mode + +        return True + +    #### /UPDATE ##### + +    #### FIND #### +    def find_access_mode(self, inc_mode): +        ''' find a user ''' +        index = None +        try: +            index = self.access_modes.index(inc_mode) +        except ValueError as _: +            return index + +        return index diff --git a/roles/lib_openshift/src/lib/volume.py b/roles/lib_openshift/src/lib/volume.py index e0abb1d1b..c049c8b49 100644 --- a/roles/lib_openshift/src/lib/volume.py +++ b/roles/lib_openshift/src/lib/volume.py @@ -2,7 +2,7 @@  # flake8: noqa  class Volume(object): -    ''' Class to model an openshift volume object''' +    ''' Class to represent an openshift volume object'''      volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",                            "dc":  "spec.template.spec.containers[0].volumeMounts",                            "rc":  "spec.template.spec.containers[0].volumeMounts", @@ -34,5 +34,10 @@ class Volume(object):          elif volume_type == 'hostpath':              volume['hostPath'] = {}              volume['hostPath']['path'] = volume_info['path'] +        elif volume_type == 'configmap': +            volume['configMap'] = {} +            volume['configMap']['name'] = volume_info['configmap_name'] +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']}          return (volume, volume_mount) diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml index 91ee86750..0dba6016b 100644 --- a/roles/lib_openshift/src/sources.yml +++ b/roles/lib_openshift/src/sources.yml @@ -152,6 +152,17 @@ oc_project.py:  - class/oc_project.py  - ansible/oc_project.py +oc_pvc.py: +- doc/generated +- doc/license +- lib/import.py +- doc/pvc +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- lib/pvc.py +- class/oc_pvc.py +- ansible/oc_pvc.py +  oc_route.py:  - doc/generated  - doc/license @@ -229,6 +240,18 @@ oc_version.py:  - class/oc_version.py  - ansible/oc_version.py +oc_volume.py: +- doc/generated +- doc/license +- lib/import.py +- doc/volume +- ../../lib_utils/src/class/yedit.py +- lib/base.py +- lib/deploymentconfig.py +- lib/volume.py +- class/oc_volume.py +- ansible/oc_volume.py +  oc_objectvalidator.py:  - doc/generated  - doc/license diff --git a/roles/lib_openshift/src/test/unit/test_oc_pvc.py b/roles/lib_openshift/src/test/unit/test_oc_pvc.py new file mode 100755 index 000000000..82187917d --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_pvc.py @@ -0,0 +1,366 @@ +''' + Unit tests for oc pvc +''' + +import copy +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error,wrong-import-position +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_pvc import OCPVC, locate_oc_binary  # noqa: E402 + + +class OCPVCTest(unittest.TestCase): +    ''' +     Test class for OCPVC +    ''' +    params = {'kubeconfig': '/etc/origin/master/admin.kubeconfig', +              'state': 'present', +              'debug': False, +              'name': 'mypvc', +              'namespace': 'test', +              'volume_capacity': '1G', +              'access_modes': 'ReadWriteMany'} + +    @mock.patch('oc_pvc.Utils.create_tmpfile_copy') +    @mock.patch('oc_pvc.OCPVC._run') +    def test_create_pvc(self, mock_run, mock_tmpfile_copy): +        ''' Testing a pvc create ''' +        params = copy.deepcopy(OCPVCTest.params) + +        pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mock_run.side_effect = [ +            (1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'), +            (1, '', 'Error from server: persistentvolumeclaims "mypvc" not found'), +            (0, '', ''), +            (0, pvc, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCPVC.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mypvc') + +    @mock.patch('oc_pvc.Utils.create_tmpfile_copy') +    @mock.patch('oc_pvc.OCPVC._run') +    def test_update_pvc(self, mock_run, mock_tmpfile_copy): +        ''' Testing a pvc create ''' +        params = copy.deepcopy(OCPVCTest.params) +        params['access_modes'] = 'ReadWriteMany' + +        pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mod_pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteMany" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mock_run.side_effect = [ +            (0, pvc, ''), +            (0, pvc, ''), +            (0, '', ''), +            (0, mod_pvc, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCPVC.run_ansible(params, False) + +        self.assertFalse(results['changed']) +        self.assertEqual(results['results']['msg'], '##### - This volume is currently bound.  Will not update - ####') + +    @mock.patch('oc_pvc.Utils.create_tmpfile_copy') +    @mock.patch('oc_pvc.OCPVC._run') +    def test_delete_pvc(self, mock_run, mock_tmpfile_copy): +        ''' Testing a pvc create ''' +        params = copy.deepcopy(OCPVCTest.params) +        params['state'] = 'absent' + +        pvc = '''{"kind": "PersistentVolumeClaim", +               "apiVersion": "v1", +               "metadata": { +                   "name": "mypvc", +                   "namespace": "test", +                   "selfLink": "/api/v1/namespaces/test/persistentvolumeclaims/mypvc", +                   "uid": "77597898-d8d8-11e6-aea5-0e3c0c633889", +                   "resourceVersion": "126510787", +                   "creationTimestamp": "2017-01-12T15:04:50Z", +                   "labels": { +                       "mypvc": "database" +                   }, +                   "annotations": { +                       "pv.kubernetes.io/bind-completed": "yes", +                       "pv.kubernetes.io/bound-by-controller": "yes", +                       "v1.2-volume.experimental.kubernetes.io/provisioning-required": "volume.experimental.kubernetes.io/provisioning-completed" +                   } +               }, +               "spec": { +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                   "resources": { +                       "requests": { +                           "storage": "1Gi" +                       } +                   }, +                   "volumeName": "pv-aws-ow5vl" +               }, +               "status": { +                  "phase": "Bound", +                   "accessModes": [ +                       "ReadWriteOnce" +                   ], +                    "capacity": { +                      "storage": "1Gi" +                    } +               } +              }''' + +        mock_run.side_effect = [ +            (0, pvc, ''), +            (0, '', ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCPVC.run_ansible(params, False) + +        self.assertTrue(results['changed']) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_path_exists.side_effect = lambda _: False + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_shutil_which.side_effect = lambda _f, path=None: None + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/lib_openshift/src/test/unit/test_oc_volume.py b/roles/lib_openshift/src/test/unit/test_oc_volume.py new file mode 100755 index 000000000..d91e22bc7 --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_volume.py @@ -0,0 +1,633 @@ +''' + Unit tests for oc volume +''' + +import copy +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_volume import OCVolume, locate_oc_binary  # noqa: E402 + + +class OCVolumeTest(unittest.TestCase): +    ''' +     Test class for OCVolume +    ''' +    params = {'name': 'oso-rhel7-zagg-web', +              'kubeconfig': '/etc/origin/master/admin.kubeconfig', +              'namespace': 'test', +              'labels': None, +              'state': 'present', +              'kind': 'dc', +              'mount_path': None, +              'secret_name': None, +              'mount_type': 'pvc', +              'claim_name': 'testclaim', +              'claim_size': '1G', +              'configmap_name': None, +              'vol_name': 'test-volume', +              'debug': False} + +    @mock.patch('oc_volume.Utils.create_tmpfile_copy') +    @mock.patch('oc_volume.OCVolume._run') +    def test_create_pvc(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing a label list ''' +        params = copy.deepcopy(OCVolumeTest.params) + +        dc = '''{ +                "kind": "DeploymentConfig", +                "apiVersion": "v1", +                "metadata": { +                    "name": "oso-rhel7-zagg-web", +                    "namespace": "new-monitoring", +                    "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web", +                    "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587", +                    "resourceVersion": "137095771", +                    "generation": 4, +                    "creationTimestamp": "2016-09-16T13:46:24Z", +                    "labels": { +                        "app": "oso-rhel7-ops-base", +                        "name": "oso-rhel7-zagg-web" +                    }, +                    "annotations": { +                        "openshift.io/generated-by": "OpenShiftNewApp" +                    } +                }, +                "spec": { +                    "strategy": { +                        "type": "Rolling", +                        "rollingParams": { +                            "updatePeriodSeconds": 1, +                            "intervalSeconds": 1, +                            "timeoutSeconds": 600, +                            "maxUnavailable": "25%", +                            "maxSurge": "25%" +                        }, +                        "resources": {} +                    }, +                    "triggers": [ +                        { +                            "type": "ConfigChange" +                        }, +                        { +                            "type": "ImageChange", +                            "imageChangeParams": { +                                "automatic": true, +                                "containerNames": [ +                                    "oso-rhel7-zagg-web" +                                ], +                                "from": { +                                    "kind": "ImageStreamTag", +                                    "namespace": "new-monitoring", +                                    "name": "oso-rhel7-zagg-web:latest" +                                }, +                                "lastTriggeredImage": "notused" +                            } +                        } +                    ], +                    "replicas": 10, +                    "test": false, +                    "selector": { +                        "deploymentconfig": "oso-rhel7-zagg-web" +                    }, +                    "template": { +                        "metadata": { +                            "creationTimestamp": null, +                            "labels": { +                                "app": "oso-rhel7-ops-base", +                                "deploymentconfig": "oso-rhel7-zagg-web" +                            }, +                            "annotations": { +                                "openshift.io/generated-by": "OpenShiftNewApp" +                            } +                        }, +                        "spec": { +                            "volumes": [ +                                { +                                    "name": "monitoring-secrets", +                                    "secret": { +                                        "secretName": "monitoring-secrets" +                                    } +                                } +                            ], +                            "containers": [ +                                { +                                    "name": "oso-rhel7-zagg-web", +                                    "image": "notused", +                                    "resources": {}, +                                    "volumeMounts": [ +                                        { +                                            "name": "monitoring-secrets", +                                            "mountPath": "/secrets" +                                        } +                                    ], +                                    "terminationMessagePath": "/dev/termination-log", +                                    "imagePullPolicy": "Always", +                                    "securityContext": { +                                        "capabilities": {}, +                                        "privileged": false +                                    } +                                } +                            ], +                            "restartPolicy": "Always", +                            "terminationGracePeriodSeconds": 30, +                            "dnsPolicy": "ClusterFirst", +                            "securityContext": {} +                        } +                    } +                } +            }''' + +        post_dc = '''{ +                "kind": "DeploymentConfig", +                "apiVersion": "v1", +                "metadata": { +                    "name": "oso-rhel7-zagg-web", +                    "namespace": "new-monitoring", +                    "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web", +                    "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587", +                    "resourceVersion": "137095771", +                    "generation": 4, +                    "creationTimestamp": "2016-09-16T13:46:24Z", +                    "labels": { +                        "app": "oso-rhel7-ops-base", +                        "name": "oso-rhel7-zagg-web" +                    }, +                    "annotations": { +                        "openshift.io/generated-by": "OpenShiftNewApp" +                    } +                }, +                "spec": { +                    "strategy": { +                        "type": "Rolling", +                        "rollingParams": { +                            "updatePeriodSeconds": 1, +                            "intervalSeconds": 1, +                            "timeoutSeconds": 600, +                            "maxUnavailable": "25%", +                            "maxSurge": "25%" +                        }, +                        "resources": {} +                    }, +                    "triggers": [ +                        { +                            "type": "ConfigChange" +                        }, +                        { +                            "type": "ImageChange", +                            "imageChangeParams": { +                                "automatic": true, +                                "containerNames": [ +                                    "oso-rhel7-zagg-web" +                                ], +                                "from": { +                                    "kind": "ImageStreamTag", +                                    "namespace": "new-monitoring", +                                    "name": "oso-rhel7-zagg-web:latest" +                                }, +                                "lastTriggeredImage": "notused" +                            } +                        } +                    ], +                    "replicas": 10, +                    "test": false, +                    "selector": { +                        "deploymentconfig": "oso-rhel7-zagg-web" +                    }, +                    "template": { +                        "metadata": { +                            "creationTimestamp": null, +                            "labels": { +                                "app": "oso-rhel7-ops-base", +                                "deploymentconfig": "oso-rhel7-zagg-web" +                            }, +                            "annotations": { +                                "openshift.io/generated-by": "OpenShiftNewApp" +                            } +                        }, +                        "spec": { +                            "volumes": [ +                                { +                                    "name": "monitoring-secrets", +                                    "secret": { +                                        "secretName": "monitoring-secrets" +                                    } +                                }, +                                { +                                    "name": "test-volume", +                                    "persistentVolumeClaim": { +                                        "claimName": "testclass", +                                        "claimSize": "1G" +                                    } +                                } +                            ], +                            "containers": [ +                                { +                                    "name": "oso-rhel7-zagg-web", +                                    "image": "notused", +                                    "resources": {}, +                                    "volumeMounts": [ +                                        { +                                            "name": "monitoring-secrets", +                                            "mountPath": "/secrets" +                                        }, +                                        { +                                            "name": "test-volume", +                                            "mountPath": "/data" +                                        } +                                    ], +                                    "terminationMessagePath": "/dev/termination-log", +                                    "imagePullPolicy": "Always", +                                    "securityContext": { +                                        "capabilities": {}, +                                        "privileged": false +                                    } +                                } +                            ], +                            "restartPolicy": "Always", +                            "terminationGracePeriodSeconds": 30, +                            "dnsPolicy": "ClusterFirst", +                            "securityContext": {} +                        } +                    } +                } +            }''' + +        mock_cmd.side_effect = [ +            (0, dc, ''), +            (0, dc, ''), +            (0, '', ''), +            (0, post_dc, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCVolume.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        self.assertTrue(results['results']['results'][-1]['name'] == 'test-volume') + +    @mock.patch('oc_volume.Utils.create_tmpfile_copy') +    @mock.patch('oc_volume.OCVolume._run') +    def test_create_configmap(self, mock_cmd, mock_tmpfile_copy): +        ''' Testing a label list ''' +        params = copy.deepcopy(OCVolumeTest.params) +        params.update({'mount_path': '/configmap', +                       'mount_type': 'configmap', +                       'configmap_name': 'configtest', +                       'vol_name': 'configvol'}) + +        dc = '''{ +                "kind": "DeploymentConfig", +                "apiVersion": "v1", +                "metadata": { +                    "name": "oso-rhel7-zagg-web", +                    "namespace": "new-monitoring", +                    "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web", +                    "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587", +                    "resourceVersion": "137095771", +                    "generation": 4, +                    "creationTimestamp": "2016-09-16T13:46:24Z", +                    "labels": { +                        "app": "oso-rhel7-ops-base", +                        "name": "oso-rhel7-zagg-web" +                    }, +                    "annotations": { +                        "openshift.io/generated-by": "OpenShiftNewApp" +                    } +                }, +                "spec": { +                    "strategy": { +                        "type": "Rolling", +                        "rollingParams": { +                            "updatePeriodSeconds": 1, +                            "intervalSeconds": 1, +                            "timeoutSeconds": 600, +                            "maxUnavailable": "25%", +                            "maxSurge": "25%" +                        }, +                        "resources": {} +                    }, +                    "triggers": [ +                        { +                            "type": "ConfigChange" +                        }, +                        { +                            "type": "ImageChange", +                            "imageChangeParams": { +                                "automatic": true, +                                "containerNames": [ +                                    "oso-rhel7-zagg-web" +                                ], +                                "from": { +                                    "kind": "ImageStreamTag", +                                    "namespace": "new-monitoring", +                                    "name": "oso-rhel7-zagg-web:latest" +                                }, +                                "lastTriggeredImage": "notused" +                            } +                        } +                    ], +                    "replicas": 10, +                    "test": false, +                    "selector": { +                        "deploymentconfig": "oso-rhel7-zagg-web" +                    }, +                    "template": { +                        "metadata": { +                            "creationTimestamp": null, +                            "labels": { +                                "app": "oso-rhel7-ops-base", +                                "deploymentconfig": "oso-rhel7-zagg-web" +                            }, +                            "annotations": { +                                "openshift.io/generated-by": "OpenShiftNewApp" +                            } +                        }, +                        "spec": { +                            "volumes": [ +                                { +                                    "name": "monitoring-secrets", +                                    "secret": { +                                        "secretName": "monitoring-secrets" +                                    } +                                } +                            ], +                            "containers": [ +                                { +                                    "name": "oso-rhel7-zagg-web", +                                    "image": "notused", +                                    "resources": {}, +                                    "volumeMounts": [ +                                        { +                                            "name": "monitoring-secrets", +                                            "mountPath": "/secrets" +                                        } +                                    ], +                                    "terminationMessagePath": "/dev/termination-log", +                                    "imagePullPolicy": "Always", +                                    "securityContext": { +                                        "capabilities": {}, +                                        "privileged": false +                                    } +                                } +                            ], +                            "restartPolicy": "Always", +                            "terminationGracePeriodSeconds": 30, +                            "dnsPolicy": "ClusterFirst", +                            "securityContext": {} +                        } +                    } +                } +            }''' + +        post_dc = '''{ +                "kind": "DeploymentConfig", +                "apiVersion": "v1", +                "metadata": { +                    "name": "oso-rhel7-zagg-web", +                    "namespace": "new-monitoring", +                    "selfLink": "/oapi/v1/namespaces/new-monitoring/deploymentconfigs/oso-rhel7-zagg-web", +                    "uid": "f56e9dd2-7c13-11e6-b046-0e8844de0587", +                    "resourceVersion": "137095771", +                    "generation": 4, +                    "creationTimestamp": "2016-09-16T13:46:24Z", +                    "labels": { +                        "app": "oso-rhel7-ops-base", +                        "name": "oso-rhel7-zagg-web" +                    }, +                    "annotations": { +                        "openshift.io/generated-by": "OpenShiftNewApp" +                    } +                }, +                "spec": { +                    "strategy": { +                        "type": "Rolling", +                        "rollingParams": { +                            "updatePeriodSeconds": 1, +                            "intervalSeconds": 1, +                            "timeoutSeconds": 600, +                            "maxUnavailable": "25%", +                            "maxSurge": "25%" +                        }, +                        "resources": {} +                    }, +                    "triggers": [ +                        { +                            "type": "ConfigChange" +                        }, +                        { +                            "type": "ImageChange", +                            "imageChangeParams": { +                                "automatic": true, +                                "containerNames": [ +                                    "oso-rhel7-zagg-web" +                                ], +                                "from": { +                                    "kind": "ImageStreamTag", +                                    "namespace": "new-monitoring", +                                    "name": "oso-rhel7-zagg-web:latest" +                                }, +                                "lastTriggeredImage": "notused" +                            } +                        } +                    ], +                    "replicas": 10, +                    "test": false, +                    "selector": { +                        "deploymentconfig": "oso-rhel7-zagg-web" +                    }, +                    "template": { +                        "metadata": { +                            "creationTimestamp": null, +                            "labels": { +                                "app": "oso-rhel7-ops-base", +                                "deploymentconfig": "oso-rhel7-zagg-web" +                            }, +                            "annotations": { +                                "openshift.io/generated-by": "OpenShiftNewApp" +                            } +                        }, +                        "spec": { +                            "volumes": [ +                                { +                                    "name": "monitoring-secrets", +                                    "secret": { +                                        "secretName": "monitoring-secrets" +                                    } +                                }, +                                { +                                    "name": "configvol", +                                    "configMap": { +                                        "name": "configtest" +                                    } +                                } +                            ], +                            "containers": [ +                                { +                                    "name": "oso-rhel7-zagg-web", +                                    "image": "notused", +                                    "resources": {}, +                                    "volumeMounts": [ +                                        { +                                            "name": "monitoring-secrets", +                                            "mountPath": "/secrets" +                                        }, +                                        { +                                            "name": "configvol", +                                            "mountPath": "/configmap" +                                        } +                                    ], +                                    "terminationMessagePath": "/dev/termination-log", +                                    "imagePullPolicy": "Always", +                                    "securityContext": { +                                        "capabilities": {}, +                                        "privileged": false +                                    } +                                } +                            ], +                            "restartPolicy": "Always", +                            "terminationGracePeriodSeconds": 30, +                            "dnsPolicy": "ClusterFirst", +                            "securityContext": {} +                        } +                    } +                } +            }''' + +        mock_cmd.side_effect = [ +            (0, dc, ''), +            (0, dc, ''), +            (0, '', ''), +            (0, post_dc, ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = OCVolume.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        self.assertTrue(results['results']['results'][-1]['name'] == 'configvol') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_path_exists.side_effect = lambda _: False + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_shutil_which.side_effect = lambda _f, path=None: None + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py index 0411797b1..8b23533c8 100644 --- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py +++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py @@ -74,6 +74,7 @@ class ActionModule(ActionBase):                  result["failed"] = True                  result["msg"] = "One or more checks failed" +        result["changed"] = any(r.get("changed", False) for r in check_results.values())          return result      def load_known_checks(self): diff --git a/roles/openshift_health_checker/library/docker_container.py b/roles/openshift_health_checker/library/docker_container.py new file mode 100644 index 000000000..f81b4ec01 --- /dev/null +++ b/roles/openshift_health_checker/library/docker_container.py @@ -0,0 +1,2036 @@ +#!/usr/bin/python +# pylint: skip-file +# flake8: noqa + +# TODO: remove this file once openshift-ansible requires ansible >= 2.3. +# This file is a copy of +# https://github.com/ansible/ansible/blob/20bf02f/lib/ansible/modules/cloud/docker/docker_container.py. +# It has been temporarily vendored here due to issue https://github.com/ansible/ansible/issues/22323. + + +# Copyright 2016 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible.  If not, see <http://www.gnu.org/licenses/>. + +ANSIBLE_METADATA = {'status': ['preview'], +                    'supported_by': 'committer', +                    'version': '1.0'} + +DOCUMENTATION = ''' +--- +module: docker_container + +short_description: manage docker containers + +description: +  - Manage the life cycle of docker containers. +  - Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken. + +version_added: "2.1" + +options: +  blkio_weight: +    description: +      - Block IO (relative weight), between 10 and 1000. +    default: null +    required: false +  capabilities: +    description: +      - List of capabilities to add to the container. +    default: null +    required: false +  cleanup: +    description: +      - Use with I(detach) to remove the container after successful execution. +    default: false +    required: false +    version_added: "2.2" +  command: +    description: +      - Command to execute when the container starts. +    default: null +    required: false +  cpu_period: +    description: +      - Limit CPU CFS (Completely Fair Scheduler) period +    default: 0 +    required: false +  cpu_quota: +    description: +      - Limit CPU CFS (Completely Fair Scheduler) quota +    default: 0 +    required: false +  cpuset_cpus: +    description: +      - CPUs in which to allow execution C(1,3) or C(1-3). +    default: null +    required: false +  cpuset_mems: +    description: +      - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1) +    default: null +    required: false +  cpu_shares: +    description: +      - CPU shares (relative weight). +    default: null +    required: false +  detach: +    description: +      - Enable detached mode to leave the container running in background. +        If disabled, the task will reflect the status of the container run (failed if the command failed). +    default: true +    required: false +  devices: +    description: +      - "List of host device bindings to add to the container. Each binding is a mapping expressed +        in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>" +    default: null +    required: false +  dns_servers: +    description: +      - List of custom DNS servers. +    default: null +    required: false +  dns_search_domains: +    description: +      - List of custom DNS search domains. +    default: null +    required: false +  env: +    description: +      - Dictionary of key,value pairs. +    default: null +    required: false +  env_file: +    version_added: "2.2" +    description: +      - Path to a file containing environment variables I(FOO=BAR). +      - If variable also present in C(env), then C(env) value will override. +      - Requires docker-py >= 1.4.0. +    default: null +    required: false +  entrypoint: +    description: +      - Command that overwrites the default ENTRYPOINT of the image. +    default: null +    required: false +  etc_hosts: +    description: +      - Dict of host-to-IP mappings, where each host name is a key in the dictionary. +        Each host name will be added to the container's /etc/hosts file. +    default: null +    required: false +  exposed_ports: +    description: +      - List of additional container ports which informs Docker that the container +        listens on the specified network ports at runtime. +        If the port is already exposed using EXPOSE in a Dockerfile, it does not +        need to be exposed again. +    default: null +    required: false +    aliases: +      - exposed +  force_kill: +    description: +      - Use the kill command when stopping a running container. +    default: false +    required: false +  groups: +    description: +      - List of additional group names and/or IDs that the container process will run as. +    default: null +    required: false +  hostname: +    description: +      - Container hostname. +    default: null +    required: false +  ignore_image: +    description: +      - When C(state) is I(present) or I(started) the module compares the configuration of an existing +        container to requested configuration. The evaluation includes the image version. If +        the image version in the registry does not match the container, the container will be +        recreated. Stop this behavior by setting C(ignore_image) to I(True). +    default: false +    required: false +    version_added: "2.2" +  image: +    description: +      - Repository path and tag used to create the container. If an image is not found or pull is true, the image +        will be pulled from the registry. If no tag is included, 'latest' will be used. +    default: null +    required: false +  interactive: +    description: +      - Keep stdin open after a container is launched, even if not attached. +    default: false +    required: false +  ipc_mode: +    description: +      - Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another +        container's IPC namespace or 'host' to use the host's IPC namespace within the container. +    default: null +    required: false +  keep_volumes: +    description: +      - Retain volumes associated with a removed container. +    default: true +    required: false +  kill_signal: +    description: +      - Override default signal used to kill a running container. +    default null: +    required: false +  kernel_memory: +    description: +      - "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer. +        Unit can be one of b, k, m, or g. Minimum is 4M." +    default: 0 +    required: false +  labels: +     description: +       - Dictionary of key value pairs. +     default: null +     required: false +  links: +    description: +      - List of name aliases for linked containers in the format C(container_name:alias) +    default: null +    required: false +  log_driver: +    description: +      - Specify the logging driver. Docker uses json-file by default. +    choices: +      - none +      - json-file +      - syslog +      - journald +      - gelf +      - fluentd +      - awslogs +      - splunk +    default: null +    required: false +  log_options: +    description: +      - Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/ +        for details. +    required: false +    default: null +  mac_address: +    description: +      - Container MAC address (e.g. 92:d0:c6:0a:29:33) +    default: null +    required: false +  memory: +    description: +      - "Memory limit (format: <number>[<unit>]). Number is a positive integer. +        Unit can be one of b, k, m, or g" +    default: 0 +    required: false +  memory_reservation: +    description: +      - "Memory soft limit (format: <number>[<unit>]). Number is a positive integer. +        Unit can be one of b, k, m, or g" +    default: 0 +    required: false +  memory_swap: +    description: +      - Total memory limit (memory + swap, format:<number>[<unit>]). +        Number is a positive integer. Unit can be one of b, k, m, or g. +    default: 0 +    required: false +  memory_swappiness: +    description: +        - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. +    default: 0 +    required: false +  name: +    description: +      - Assign a name to a new container or match an existing container. +      - When identifying an existing container name may be a name or a long or short container ID. +    required: true +  network_mode: +    description: +      - Connect the container to a network. +    choices: +      - bridge +      - container:<name|id> +      - host +      - none +    default: null +    required: false +  networks: +     description: +       - List of networks the container belongs to. +       - Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases). +       - For each network C(name) is required, all other keys are optional. +       - If included, C(links) or C(aliases) are lists. +       - For examples of the data structure and usage see EXAMPLES below. +       - To remove a container from one or more networks, use the C(purge_networks) option. +     default: null +     required: false +     version_added: "2.2" +  oom_killer: +    description: +      - Whether or not to disable OOM Killer for the container. +    default: false +    required: false +  oom_score_adj: +    description: +      - An integer value containing the score given to the container in order to tune OOM killer preferences. +    default: 0 +    required: false +    version_added: "2.2" +  paused: +    description: +      - Use with the started state to pause running processes inside the container. +    default: false +    required: false +  pid_mode: +    description: +      - Set the PID namespace mode for the container. Currently only supports 'host'. +    default: null +    required: false +  privileged: +    description: +      - Give extended privileges to the container. +    default: false +    required: false +  published_ports: +    description: +      - List of ports to publish from the container to the host. +      - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a +        container port, 9000 is a host port, and 0.0.0.0 is a host interface." +      - Container ports must be exposed either in the Dockerfile or via the C(expose) option. +      - A value of all will publish all exposed container ports to random host ports, ignoring +        any other mappings. +      - If C(networks) parameter is provided, will inspect each network to see if there exists +        a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4. +        If such a network is found, then published ports where no host IP address is specified +        will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4. +        Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4 +        value encountered in the list of C(networks) is the one that will be used. +    aliases: +      - ports +    required: false +    default: null +  pull: +    description: +       - If true, always pull the latest version of an image. Otherwise, will only pull an image when missing. +    default: false +    required: false +  purge_networks: +    description: +       - Remove the container from ALL networks not included in C(networks) parameter. +       - Any default networks such as I(bridge), if not found in C(networks), will be removed as well. +    default: false +    required: false +    version_added: "2.2" +  read_only: +    description: +      - Mount the container's root file system as read-only. +    default: false +    required: false +  recreate: +    description: +      - Use with present and started states to force the re-creation of an existing container. +    default: false +    required: false +  restart: +    description: +      - Use with started state to force a matching container to be stopped and restarted. +    default: false +    required: false +  restart_policy: +    description: +      - Container restart policy. Place quotes around I(no) option. +    choices: +      - always +      - no +      - on-failure +      - unless-stopped +    default: on-failure +    required: false +  restart_retries: +    description: +       - Use with restart policy to control maximum number of restart attempts. +    default: 0 +    required: false +  shm_size: +    description: +      - Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`. +        Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). +      - Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`. +    default: null +    required: false +  security_opts: +    description: +      - List of security options in the form of C("label:user:User") +    default: null +    required: false +  state: +    description: +      - 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container +         rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.' +      - 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no +        container matches the name, a container will be created. If a container matches the name but the provided configuration +        does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created +        with the requested config. Image version will be taken into account when comparing configuration. To ignore image +        version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use +        force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed +        container.' +      - 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container +        matches the name, a container will be created and started. If a container matching the name is found but the +        configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed +        and a new container will be created with the requested configuration and started. Image version will be taken into +        account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always +        re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and +        restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated +        with a removed container.' +      - 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped +        state. Use force_kill to kill a container rather than stopping it.' +    required: false +    default: started +    choices: +      - absent +      - present +      - stopped +      - started +  stop_signal: +    description: +      - Override default signal used to stop the container. +    default: null +    required: false +  stop_timeout: +    description: +      - Number of seconds to wait for the container to stop before sending SIGKILL. +    required: false +    default: null +  trust_image_content: +    description: +      - If true, skip image verification. +    default: false +    required: false +  tty: +    description: +      - Allocate a psuedo-TTY. +    default: false +    required: false +  ulimits: +    description: +      - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)" +    default: null +    required: false +  user: +    description: +      - Sets the username or UID used and optionally the groupname or GID for the specified command. +      - "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]" +    default: null +    required: false +  uts: +    description: +      - Set the UTS namespace mode for the container. +    default: null +    required: false +  volumes: +    description: +      - List of volumes to mount within the container. +      - "Use docker CLI-style syntax: C(/host:/container[:mode])" +      - You can specify a read mode for the mount with either C(ro) or C(rw). +      - SELinux hosts can additionally use C(z) or C(Z) to use a shared or +        private label for the volume. +    default: null +    required: false +  volume_driver: +    description: +      - The container volume driver. +    default: none +    required: false +  volumes_from: +    description: +      - List of container names or Ids to get volumes from. +    default: null +    required: false +extends_documentation_fragment: +    - docker + +author: +    - "Cove Schneider (@cove)" +    - "Joshua Conner (@joshuaconner)" +    - "Pavel Antonov (@softzilla)" +    - "Thomas Steinbach (@ThomasSteinbach)" +    - "Philippe Jandot (@zfil)" +    - "Daan Oosterveld (@dusdanig)" +    - "James Tanner (@jctanner)" +    - "Chris Houseknecht (@chouseknecht)" + +requirements: +    - "python >= 2.6" +    - "docker-py >= 1.7.0" +    - "Docker API >= 1.20" +''' + +EXAMPLES = ''' +- name: Create a data container +  docker_container: +    name: mydata +    image: busybox +    volumes: +      - /data + +- name: Re-create a redis container +  docker_container: +    name: myredis +    image: redis +    command: redis-server --appendonly yes +    state: present +    recreate: yes +    exposed_ports: +      - 6379 +    volumes_from: +      - mydata + +- name: Restart a container +  docker_container: +    name: myapplication +    image: someuser/appimage +    state: started +    restart: yes +    links: +     - "myredis:aliasedredis" +    devices: +     - "/dev/sda:/dev/xvda:rwm" +    ports: +     - "8080:9000" +     - "127.0.0.1:8081:9001/udp" +    env: +        SECRET_KEY: ssssh + +- name: Container present +  docker_container: +    name: mycontainer +    state: present +    image: ubuntu:14.04 +    command: sleep infinity + +- name: Stop a container +  docker_container: +    name: mycontainer +    state: stopped + +- name: Start 4 load-balanced containers +  docker_container: +    name: "container{{ item }}" +    recreate: yes +    image: someuser/anotherappimage +    command: sleep 1d +  with_sequence: count=4 + +- name: remove container +  docker_container: +    name: ohno +    state: absent + +- name: Syslogging output +  docker_container: +    name: myservice +    image: busybox +    log_driver: syslog +    log_options: +      syslog-address: tcp://my-syslog-server:514 +      syslog-facility: daemon +      # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for +      # older docker installs, use "syslog-tag" instead +      tag: myservice + +- name: Create db container and connect to network +  docker_container: +    name: db_test +    image: "postgres:latest" +    networks: +      - name: "{{ docker_network_name }}" + +- name: Start container, connect to network and link +  docker_container: +    name: sleeper +    image: ubuntu:14.04 +    networks: +      - name: TestingNet +        ipv4_address: "172.1.1.100" +        aliases: +          - sleepyzz +        links: +          - db_test:db +      - name: TestingNet2 + +- name: Start a container with a command +  docker_container: +    name: sleepy +    image: ubuntu:14.04 +    command: sleep infinity + +- name: Add container to networks +  docker_container: +    name: sleepy +    networks: +      - name: TestingNet +        ipv4_address: 172.1.1.18 +        links: +          - sleeper +      - name: TestingNet2 +        ipv4_address: 172.1.10.20 + +- name: Update network with aliases +  docker_container: +    name: sleepy +    networks: +      - name: TestingNet +        aliases: +          - sleepyz +          - zzzz + +- name: Remove container from one network +  docker_container: +    name: sleepy +    networks: +      - name: TestingNet2 +    purge_networks: yes + +- name: Remove container from all networks +  docker_container: +    name: sleepy +    purge_networks: yes + +''' + +RETURN = ''' +docker_container: +    description: +      - Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin. +      - Facts representing the current state of the container. Matches the docker inspection output. +      - Note that facts are not part of registered vars but accessible directly. +      - Empty if C(state) is I(absent) +      - If detached is I(False), will include Output attribute containing any output from container run. +    returned: always +    type: dict +    sample: '{ +        "AppArmorProfile": "", +        "Args": [], +        "Config": { +            "AttachStderr": false, +            "AttachStdin": false, +            "AttachStdout": false, +            "Cmd": [ +                "/usr/bin/supervisord" +            ], +            "Domainname": "", +            "Entrypoint": null, +            "Env": [ +                "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" +            ], +            "ExposedPorts": { +                "443/tcp": {}, +                "80/tcp": {} +            }, +            "Hostname": "8e47bf643eb9", +            "Image": "lnmp_nginx:v1", +            "Labels": {}, +            "OnBuild": null, +            "OpenStdin": false, +            "StdinOnce": false, +            "Tty": false, +            "User": "", +            "Volumes": { +                "/tmp/lnmp/nginx-sites/logs/": {} +            }, +            ... +    }' +''' + +import re + +from ansible.module_utils.docker_common import * + +try: +    from docker import utils +    if HAS_DOCKER_PY_2: +        from docker.types import Ulimit +    else: +        from docker.utils.types import Ulimit +except: +    # missing docker-py handled in ansible.module_utils.docker +    pass + + +REQUIRES_CONVERSION_TO_BYTES = [ +    'memory', +    'memory_reservation', +    'memory_swap', +    'shm_size' +] + +VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z') + +class TaskParameters(DockerBaseClass): +    ''' +    Access and parse module parameters +    ''' + +    def __init__(self, client): +        super(TaskParameters, self).__init__() +        self.client = client + +        self.blkio_weight = None +        self.capabilities = None +        self.cleanup = None +        self.command = None +        self.cpu_period = None +        self.cpu_quota = None +        self.cpuset_cpus = None +        self.cpuset_mems = None +        self.cpu_shares = None +        self.detach = None +        self.debug = None +        self.devices = None +        self.dns_servers = None +        self.dns_opts = None +        self.dns_search_domains = None +        self.env = None +        self.env_file = None +        self.entrypoint = None +        self.etc_hosts = None +        self.exposed_ports = None +        self.force_kill = None +        self.groups = None +        self.hostname = None +        self.ignore_image = None +        self.image = None +        self.interactive = None +        self.ipc_mode = None +        self.keep_volumes = None +        self.kernel_memory = None +        self.kill_signal = None +        self.labels = None +        self.links = None +        self.log_driver = None +        self.log_options = None +        self.mac_address = None +        self.memory = None +        self.memory_reservation = None +        self.memory_swap = None +        self.memory_swappiness = None +        self.name = None +        self.network_mode = None +        self.networks = None +        self.oom_killer = None +        self.oom_score_adj = None +        self.paused = None +        self.pid_mode = None +        self.privileged = None +        self.purge_networks = None +        self.pull = None +        self.read_only = None +        self.recreate = None +        self.restart = None +        self.restart_retries = None +        self.restart_policy = None +        self.shm_size = None +        self.security_opts = None +        self.state = None +        self.stop_signal = None +        self.stop_timeout = None +        self.trust_image_content = None +        self.tty = None +        self.user = None +        self.uts = None +        self.volumes = None +        self.volume_binds = dict() +        self.volumes_from = None +        self.volume_driver = None + +        for key, value in client.module.params.items(): +            setattr(self, key, value) + +        for param_name in REQUIRES_CONVERSION_TO_BYTES: +            if client.module.params.get(param_name): +                try: +                    setattr(self, param_name, human_to_bytes(client.module.params.get(param_name))) +                except ValueError as exc: +                    self.fail("Failed to convert %s to bytes: %s" % (param_name, exc)) + +        self.publish_all_ports = False +        self.published_ports = self._parse_publish_ports() +        if self.published_ports in ('all', 'ALL'): +            self.publish_all_ports = True +            self.published_ports = None + +        self.ports = self._parse_exposed_ports(self.published_ports) +        self.log("expose ports:") +        self.log(self.ports, pretty_print=True) + +        self.links = self._parse_links(self.links) + +        if self.volumes: +            self.volumes = self._expand_host_paths() + +        self.env = self._get_environment() +        self.ulimits = self._parse_ulimits() +        self.log_config = self._parse_log_config() +        self.exp_links = None +        self.volume_binds = self._get_volume_binds(self.volumes) + +        self.log("volumes:") +        self.log(self.volumes, pretty_print=True) +        self.log("volume binds:") +        self.log(self.volume_binds, pretty_print=True) + +        if self.networks: +            for network in self.networks: +                if not network.get('name'): +                    self.fail("Parameter error: network must have a name attribute.") +                network['id'] = self._get_network_id(network['name']) +                if not network['id']: +                    self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name']) +                if network.get('links'): +                    network['links'] = self._parse_links(network['links']) + +    def fail(self, msg): +        self.client.module.fail_json(msg=msg) + +    @property +    def update_parameters(self): +        ''' +        Returns parameters used to update a container +        ''' + +        update_parameters = dict( +            blkio_weight='blkio_weight', +            cpu_period='cpu_period', +            cpu_quota='cpu_quota', +            cpu_shares='cpu_shares', +            cpuset_cpus='cpuset_cpus', +            mem_limit='memory', +            mem_reservation='mem_reservation', +            memswap_limit='memory_swap', +            kernel_memory='kernel_memory' +        ) +        result = dict() +        for key, value in update_parameters.items(): +            if getattr(self, value, None) is not None: +                result[key] = getattr(self, value) +        return result + +    @property +    def create_parameters(self): +        ''' +        Returns parameters used to create a container +        ''' +        create_params = dict( +            command='command', +            hostname='hostname', +            user='user', +            detach='detach', +            stdin_open='interactive', +            tty='tty', +            ports='ports', +            environment='env', +            name='name', +            entrypoint='entrypoint', +            cpu_shares='cpu_shares', +            mac_address='mac_address', +            labels='labels', +            stop_signal='stop_signal', +            volume_driver='volume_driver', +        ) + +        result = dict( +            host_config=self._host_config(), +            volumes=self._get_mounts(), +        ) + +        for key, value in create_params.items(): +            if getattr(self, value, None) is not None: +                result[key] = getattr(self, value) +        return result + +    def _expand_host_paths(self): +        new_vols = [] +        for vol in self.volumes: +            if ':' in vol: +                if len(vol.split(':')) == 3: +                    host, container, mode = vol.split(':') +                    if re.match(r'[\.~]', host): +                        host = os.path.abspath(host) +                    new_vols.append("%s:%s:%s" % (host, container, mode)) +                    continue +                elif len(vol.split(':')) == 2: +                    parts = vol.split(':') +                    if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]): +                        host = os.path.abspath(parts[0]) +                        new_vols.append("%s:%s:rw" % (host, parts[1])) +                        continue +            new_vols.append(vol) +        return new_vols + +    def _get_mounts(self): +        ''' +        Return a list of container mounts. +        :return: +        ''' +        result = [] +        if self.volumes: +            for vol in self.volumes: +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, _ = vol.split(':') +                        result.append(container) +                        continue +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            result.append(parts[1]) +                            continue +                result.append(vol) +        self.log("mounts:") +        self.log(result, pretty_print=True) +        return result + +    def _host_config(self): +        ''' +        Returns parameters used to create a HostConfig object +        ''' + +        host_config_params=dict( +            port_bindings='published_ports', +            publish_all_ports='publish_all_ports', +            links='links', +            privileged='privileged', +            dns='dns_servers', +            dns_search='dns_search_domains', +            binds='volume_binds', +            volumes_from='volumes_from', +            network_mode='network_mode', +            cap_add='capabilities', +            extra_hosts='etc_hosts', +            read_only='read_only', +            ipc_mode='ipc_mode', +            security_opt='security_opts', +            ulimits='ulimits', +            log_config='log_config', +            mem_limit='memory', +            memswap_limit='memory_swap', +            mem_swappiness='memory_swappiness', +            oom_score_adj='oom_score_adj', +            shm_size='shm_size', +            group_add='groups', +            devices='devices', +            pid_mode='pid_mode' +        ) +        params = dict() +        for key, value in host_config_params.items(): +            if getattr(self, value, None) is not None: +                params[key] = getattr(self, value) + +        if self.restart_policy: +            params['restart_policy'] = dict(Name=self.restart_policy, +                                            MaximumRetryCount=self.restart_retries) + +        return self.client.create_host_config(**params) + +    @property +    def default_host_ip(self): +        ip = '0.0.0.0' +        if not self.networks: +            return ip +        for net in self.networks: +            if net.get('name'): +                network = self.client.inspect_network(net['name']) +                if network.get('Driver') == 'bridge' and \ +                   network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'): +                    ip = network['Options']['com.docker.network.bridge.host_binding_ipv4'] +                    break +        return ip + +    def _parse_publish_ports(self): +        ''' +        Parse ports from docker CLI syntax +        ''' +        if self.published_ports is None: +            return None + +        if 'all' in self.published_ports: +            return 'all' + +        default_ip = self.default_host_ip + +        binds = {} +        for port in self.published_ports: +            parts = str(port).split(':') +            container_port = parts[-1] +            if '/' not in container_port: +                container_port = int(parts[-1]) + +            p_len = len(parts) +            if p_len == 1: +                bind = (default_ip,) +            elif p_len == 2: +                bind = (default_ip, int(parts[0])) +            elif p_len == 3: +                bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],) + +            if container_port in binds: +                old_bind = binds[container_port] +                if isinstance(old_bind, list): +                    old_bind.append(bind) +                else: +                    binds[container_port] = [binds[container_port], bind] +            else: +                binds[container_port] = bind +        return binds + +    @staticmethod +    def _get_volume_binds(volumes): +        ''' +        Extract host bindings, if any, from list of volume mapping strings. + +        :return: dictionary of bind mappings +        ''' +        result = dict() +        if volumes: +            for vol in volumes: +                host = None +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, mode = vol.split(':') +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            host, container, mode = (vol.split(':') + ['rw']) +                if host is not None: +                    result[host] = dict( +                        bind=container, +                        mode=mode +                    ) +        return result + +    def _parse_exposed_ports(self, published_ports): +        ''' +        Parse exposed ports from docker CLI-style ports syntax. +        ''' +        exposed = [] +        if self.exposed_ports: +            for port in self.exposed_ports: +                port = str(port).strip() +                protocol = 'tcp' +                match = re.search(r'(/.+$)', port) +                if match: +                    protocol = match.group(1).replace('/', '') +                    port = re.sub(r'/.+$', '', port) +                exposed.append((port, protocol)) +        if published_ports: +            # Any published port should also be exposed +            for publish_port in published_ports: +                match = False +                if isinstance(publish_port, basestring) and '/' in publish_port: +                    port, protocol = publish_port.split('/') +                    port = int(port) +                else: +                    protocol = 'tcp' +                    port = int(publish_port) +                for exposed_port in exposed: +                    if isinstance(exposed_port[0], basestring) and '-' in exposed_port[0]: +                        start_port, end_port = exposed_port[0].split('-') +                        if int(start_port) <= port <= int(end_port): +                            match = True +                    elif exposed_port[0] == port: +                        match = True +                if not match: +                    exposed.append((port, protocol)) +        return exposed + +    @staticmethod +    def _parse_links(links): +        ''' +        Turn links into a dictionary +        ''' +        if links is None: +            return None + +        result = {} +        for link in links: +            parsed_link = link.split(':', 1) +            if len(parsed_link) == 2: +                result[parsed_link[0]] = parsed_link[1] +            else: +                result[parsed_link[0]] = parsed_link[0] +        return result + +    def _parse_ulimits(self): +        ''' +        Turn ulimits into an array of Ulimit objects +        ''' +        if self.ulimits is None: +            return None + +        results = [] +        for limit in self.ulimits: +            limits = dict() +            pieces = limit.split(':') +            if len(pieces) >= 2: +                limits['name'] = pieces[0] +                limits['soft'] = int(pieces[1]) +                limits['hard'] = int(pieces[1]) +            if len(pieces) == 3: +                limits['hard'] = int(pieces[2]) +            try: +                results.append(Ulimit(**limits)) +            except ValueError as exc: +                self.fail("Error parsing ulimits value %s - %s" % (limit, exc)) +        return results + +    def _parse_log_config(self): +        ''' +        Create a LogConfig object +        ''' +        if self.log_driver is None: +            return None + +        options = dict( +            Type=self.log_driver, +            Config = dict() +        ) + +        if self.log_options is not None: +            options['Config'] = self.log_options + +        try: +            return LogConfig(**options) +        except ValueError as exc: +            self.fail('Error parsing logging options - %s' % (exc)) + +    def _get_environment(self): +        """ +        If environment file is combined with explicit environment variables, the explicit environment variables +        take precedence. +        """ +        final_env = {} +        if self.env_file: +            parsed_env_file = utils.parse_env_file(self.env_file) +            for name, value in parsed_env_file.items(): +                final_env[name] = str(value) +        if self.env: +            for name, value in self.env.items(): +                final_env[name] = str(value) +        return final_env + +    def _get_network_id(self, network_name): +        network_id = None +        try: +            for network in self.client.networks(names=[network_name]): +                if network['Name'] == network_name: +                    network_id = network['Id'] +                    break +        except Exception as exc: +            self.fail("Error getting network id for %s - %s" % (network_name, str(exc))) +        return network_id + + + +class Container(DockerBaseClass): + +    def __init__(self, container, parameters): +        super(Container, self).__init__() +        self.raw = container +        self.Id = None +        self.container = container +        if container: +            self.Id = container['Id'] +            self.Image = container['Image'] +        self.log(self.container, pretty_print=True) +        self.parameters = parameters +        self.parameters.expected_links = None +        self.parameters.expected_ports = None +        self.parameters.expected_exposed = None +        self.parameters.expected_volumes = None +        self.parameters.expected_ulimits = None +        self.parameters.expected_etc_hosts = None +        self.parameters.expected_env = None + +    def fail(self, msg): +        self.parameters.client.module.fail_json(msg=msg) + +    @property +    def exists(self): +        return True if self.container else False + +    @property +    def running(self): +        if self.container and self.container.get('State'): +            if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False): +                return True +        return False + +    def has_different_configuration(self, image): +        ''' +        Diff parameters vs existing container config. Returns tuple: (True | False, List of differences) +        ''' +        self.log('Starting has_different_configuration') +        self.parameters.expected_entrypoint = self._get_expected_entrypoint() +        self.parameters.expected_links = self._get_expected_links() +        self.parameters.expected_ports = self._get_expected_ports() +        self.parameters.expected_exposed = self._get_expected_exposed(image) +        self.parameters.expected_volumes = self._get_expected_volumes(image) +        self.parameters.expected_binds = self._get_expected_binds(image) +        self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits) +        self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts') +        self.parameters.expected_env = self._get_expected_env(image) +        self.parameters.expected_cmd = self._get_expected_cmd() +        self.parameters.expected_devices = self._get_expected_devices() + +        if not self.container.get('HostConfig'): +            self.fail("has_config_diff: Error parsing container properties. HostConfig missing.") +        if not self.container.get('Config'): +            self.fail("has_config_diff: Error parsing container properties. Config missing.") +        if not self.container.get('NetworkSettings'): +            self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.") + +        host_config = self.container['HostConfig'] +        log_config = host_config.get('LogConfig', dict()) +        restart_policy = host_config.get('RestartPolicy', dict()) +        config = self.container['Config'] +        network = self.container['NetworkSettings'] + +        # The previous version of the docker module ignored the detach state by +        # assuming if the container was running, it must have been detached. +        detach = not (config.get('AttachStderr') and config.get('AttachStdout')) + +        # "ExposedPorts": null returns None type & causes AttributeError - PR #5517 +        if config.get('ExposedPorts') is not None: +            expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()] +        else: +            expected_exposed = [] + +        # Map parameters to container inspect results +        config_mapping = dict( +            image=config.get('Image'), +            expected_cmd=config.get('Cmd'), +            hostname=config.get('Hostname'), +            user=config.get('User'), +            detach=detach, +            interactive=config.get('OpenStdin'), +            capabilities=host_config.get('CapAdd'), +            expected_devices=host_config.get('Devices'), +            dns_servers=host_config.get('Dns'), +            dns_opts=host_config.get('DnsOptions'), +            dns_search_domains=host_config.get('DnsSearch'), +            expected_env=(config.get('Env') or []), +            expected_entrypoint=config.get('Entrypoint'), +            expected_etc_hosts=host_config['ExtraHosts'], +            expected_exposed=expected_exposed, +            groups=host_config.get('GroupAdd'), +            ipc_mode=host_config.get("IpcMode"), +            labels=config.get('Labels'), +            expected_links=host_config.get('Links'), +            log_driver=log_config.get('Type'), +            log_options=log_config.get('Config'), +            mac_address=network.get('MacAddress'), +            memory_swappiness=host_config.get('MemorySwappiness'), +            network_mode=host_config.get('NetworkMode'), +            oom_killer=host_config.get('OomKillDisable'), +            oom_score_adj=host_config.get('OomScoreAdj'), +            pid_mode=host_config.get('PidMode'), +            privileged=host_config.get('Privileged'), +            expected_ports=host_config.get('PortBindings'), +            read_only=host_config.get('ReadonlyRootfs'), +            restart_policy=restart_policy.get('Name'), +            restart_retries=restart_policy.get('MaximumRetryCount'), +            # Cannot test shm_size, as shm_size is not included in container inspection results. +            # shm_size=host_config.get('ShmSize'), +            security_opts=host_config.get("SecuriytOpt"), +            stop_signal=config.get("StopSignal"), +            tty=config.get('Tty'), +            expected_ulimits=host_config.get('Ulimits'), +            uts=host_config.get('UTSMode'), +            expected_volumes=config.get('Volumes'), +            expected_binds=host_config.get('Binds'), +            volumes_from=host_config.get('VolumesFrom'), +            volume_driver=host_config.get('VolumeDriver') +        ) + +        differences = [] +        for key, value in config_mapping.items(): +            self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value))) +            if getattr(self.parameters, key, None) is not None: +                if isinstance(getattr(self.parameters, key), list) and isinstance(value, list): +                    if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict): +                        # compare list of dictionaries +                        self.log("comparing list of dict: %s" % key) +                        match = self._compare_dictionary_lists(getattr(self.parameters, key), value) +                    else: +                        # compare two lists. Is list_a in list_b? +                        self.log("comparing lists: %s" % key) +                        set_a = set(getattr(self.parameters, key)) +                        set_b = set(value) +                        match = (set_a <= set_b) +                elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict): +                    # compare two dicts +                    self.log("comparing two dicts: %s" % key) +                    match = self._compare_dicts(getattr(self.parameters, key), value) +                else: +                    # primitive compare +                    self.log("primitive compare: %s" % key) +                    match = (getattr(self.parameters, key) == value) + +                if not match: +                    # no match. record the differences +                    item = dict() +                    item[key] = dict( +                        parameter=getattr(self.parameters, key), +                        container=value +                    ) +                    differences.append(item) + +        has_differences = True if len(differences) > 0 else False +        return has_differences, differences + +    def _compare_dictionary_lists(self, list_a, list_b): +        ''' +        If all of list_a exists in list_b, return True +        ''' +        if not isinstance(list_a, list) or not isinstance(list_b, list): +            return False +        matches = 0 +        for dict_a in list_a: +            for dict_b in list_b: +                if self._compare_dicts(dict_a, dict_b): +                    matches += 1 +                    break +        result = (matches == len(list_a)) +        return result + +    def _compare_dicts(self, dict_a, dict_b): +        ''' +        If dict_a in dict_b, return True +        ''' +        if not isinstance(dict_a, dict) or not isinstance(dict_b, dict): +            return False +        for key, value in dict_a.items(): +            if isinstance(value, dict): +                match = self._compare_dicts(value, dict_b.get(key)) +            elif isinstance(value, list): +                if len(value) > 0 and isinstance(value[0], dict): +                    match = self._compare_dictionary_lists(value, dict_b.get(key)) +                else: +                    set_a = set(value) +                    set_b = set(dict_b.get(key)) +                    match = (set_a == set_b) +            else: +                match = (value == dict_b.get(key)) +            if not match: +                return False +        return True + +    def has_different_resource_limits(self): +        ''' +        Diff parameters and container resource limits +        ''' +        if not self.container.get('HostConfig'): +            self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.") + +        host_config = self.container['HostConfig'] + +        config_mapping = dict( +            cpu_period=host_config.get('CpuPeriod'), +            cpu_quota=host_config.get('CpuQuota'), +            cpuset_cpus=host_config.get('CpusetCpus'), +            cpuset_mems=host_config.get('CpusetMems'), +            cpu_shares=host_config.get('CpuShares'), +            kernel_memory=host_config.get("KernelMemory"), +            memory=host_config.get('Memory'), +            memory_reservation=host_config.get('MemoryReservation'), +            memory_swap=host_config.get('MemorySwap'), +            oom_score_adj=host_config.get('OomScoreAdj'), +        ) + +        differences = [] +        for key, value in config_mapping.items(): +            if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value: +                # no match. record the differences +                item = dict() +                item[key] = dict( +                    parameter=getattr(self.parameters, key), +                    container=value +                ) +                differences.append(item) +        different = (len(differences) > 0) +        return different, differences + +    def has_network_differences(self): +        ''' +        Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6 +        ''' +        different = False +        differences = [] + +        if not self.parameters.networks: +            return different, differences + +        if not self.container.get('NetworkSettings'): +            self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.") + +        connected_networks = self.container['NetworkSettings']['Networks'] +        for network in self.parameters.networks: +            if connected_networks.get(network['name'], None) is None: +                different = True +                differences.append(dict( +                    parameter=network, +                    container=None +                )) +            else: +                diff = False +                if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'): +                    diff = True +                if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'): +                    diff = True +                if network.get('aliases') and not connected_networks[network['name']].get('Aliases'): +                    diff = True +                if network.get('aliases') and connected_networks[network['name']].get('Aliases'): +                    for alias in network.get('aliases'): +                        if alias not in connected_networks[network['name']].get('Aliases', []): +                            diff = True +                if network.get('links') and not connected_networks[network['name']].get('Links'): +                    diff = True +                if network.get('links') and connected_networks[network['name']].get('Links'): +                    expected_links = [] +                    for link, alias in network['links'].items(): +                        expected_links.append("%s:%s" % (link, alias)) +                    for link in expected_links: +                        if link not in connected_networks[network['name']].get('Links', []): +                            diff = True +                if diff: +                    different = True +                    differences.append(dict( +                        parameter=network, +                        container=dict( +                            name=network['name'], +                            ipv4_address=connected_networks[network['name']].get('IPAddress'), +                            ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'), +                            aliases=connected_networks[network['name']].get('Aliases'), +                            links=connected_networks[network['name']].get('Links') +                        ) +                    )) +        return different, differences + +    def has_extra_networks(self): +        ''' +        Check if the container is connected to non-requested networks +        ''' +        extra_networks = [] +        extra = False + +        if not self.container.get('NetworkSettings'): +            self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.") + +        connected_networks = self.container['NetworkSettings'].get('Networks') +        if connected_networks: +            for network, network_config in connected_networks.items(): +                keep = False +                if self.parameters.networks: +                    for expected_network in self.parameters.networks: +                        if expected_network['name'] == network: +                            keep = True +                if not keep: +                    extra = True +                    extra_networks.append(dict(name=network, id=network_config['NetworkID'])) +        return extra, extra_networks + +    def _get_expected_devices(self): +        if not self.parameters.devices: +            return None +        expected_devices = [] +        for device in self.parameters.devices: +            parts = device.split(':') +            if len(parts) == 1: +                expected_devices.append( +                    dict( +                        CgroupPermissions='rwm', +                        PathInContainer=parts[0], +                        PathOnHost=parts[0] +                    )) +            elif len(parts) == 2: +                parts = device.split(':') +                expected_devices.append( +                    dict( +                        CgroupPermissions='rwm', +                        PathInContainer=parts[1], +                        PathOnHost=parts[0] +                    ) +                ) +            else: +                expected_devices.append( +                    dict( +                        CgroupPermissions=parts[2], +                        PathInContainer=parts[1], +                        PathOnHost=parts[0] +                        )) +        return expected_devices + +    def _get_expected_entrypoint(self): +        self.log('_get_expected_entrypoint') +        if not self.parameters.entrypoint: +            return None +        return shlex.split(self.parameters.entrypoint) + +    def _get_expected_ports(self): +        if not self.parameters.published_ports: +            return None +        expected_bound_ports = {} +        for container_port, config in self.parameters.published_ports.items(): +            if isinstance(container_port, int): +                container_port = "%s/tcp" % container_port +            if len(config) == 1: +                expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] +            elif isinstance(config[0], tuple): +                expected_bound_ports[container_port] = [] +                for host_ip, host_port in config: +                    expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)}) +            else: +                expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] +        return expected_bound_ports + +    def _get_expected_links(self): +        if self.parameters.links is None: +            return None +        self.log('parameter links:') +        self.log(self.parameters.links, pretty_print=True) +        exp_links = [] +        for link, alias in self.parameters.links.items(): +            exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias)) +        return exp_links + +    def _get_expected_binds(self, image): +        self.log('_get_expected_binds') +        image_vols = [] +        if image: +            image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes')) +        param_vols = [] +        if self.parameters.volumes: +            for vol in self.parameters.volumes: +                host = None +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, mode = vol.split(':') +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            host, container, mode = vol.split(':') + ['rw'] +                if host: +                    param_vols.append("%s:%s:%s" % (host, container, mode)) +        result = list(set(image_vols + param_vols)) +        self.log("expected_binds:") +        self.log(result, pretty_print=True) +        return result + +    def _get_image_binds(self, volumes): +        ''' +        Convert array of binds to array of strings with format host_path:container_path:mode + +        :param volumes: array of bind dicts +        :return: array of strings +        ''' +        results = [] +        if isinstance(volumes, dict): +            results += self._get_bind_from_dict(volumes) +        elif isinstance(volumes, list): +            for vol in volumes: +                results += self._get_bind_from_dict(vol) +        return results + +    @staticmethod +    def _get_bind_from_dict(volume_dict): +        results = [] +        if volume_dict: +            for host_path, config in volume_dict.items(): +                if isinstance(config, dict) and config.get('bind'): +                    container_path = config.get('bind') +                    mode = config.get('mode', 'rw') +                    results.append("%s:%s:%s" % (host_path, container_path, mode)) +        return results + +    def _get_expected_volumes(self, image): +        self.log('_get_expected_volumes') +        expected_vols = dict() +        if image and image['ContainerConfig'].get('Volumes'): +            expected_vols.update(image['ContainerConfig'].get('Volumes')) + +        if self.parameters.volumes: +            for vol in self.parameters.volumes: +                container = None +                if ':' in vol: +                    if len(vol.split(':')) == 3: +                        host, container, mode = vol.split(':') +                    if len(vol.split(':')) == 2: +                        parts = vol.split(':') +                        if parts[1] not in VOLUME_PERMISSIONS: +                            host, container, mode = vol.split(':') + ['rw'] +                new_vol = dict() +                if container: +                    new_vol[container] = dict() +                else: +                    new_vol[vol] = dict() +                expected_vols.update(new_vol) + +        if not expected_vols: +            expected_vols = None +        self.log("expected_volumes:") +        self.log(expected_vols, pretty_print=True) +        return expected_vols + +    def _get_expected_env(self, image): +        self.log('_get_expected_env') +        expected_env = dict() +        if image and image['ContainerConfig'].get('Env'): +            for env_var in image['ContainerConfig']['Env']: +                parts = env_var.split('=', 1) +                expected_env[parts[0]] = parts[1] +        if self.parameters.env: +            expected_env.update(self.parameters.env) +        param_env = [] +        for key, value in expected_env.items(): +            param_env.append("%s=%s" % (key, value)) +        return param_env + +    def _get_expected_exposed(self, image): +        self.log('_get_expected_exposed') +        image_ports = [] +        if image: +            image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()] +        param_ports = [] +        if self.parameters.ports: +            param_ports = [str(p[0]) for p in self.parameters.ports] +        result = list(set(image_ports + param_ports)) +        self.log(result, pretty_print=True) +        return result + +    def _get_expected_ulimits(self, config_ulimits): +        self.log('_get_expected_ulimits') +        if config_ulimits is None: +            return None +        results = [] +        for limit in config_ulimits: +            results.append(dict( +                Name=limit.name, +                Soft=limit.soft, +                Hard=limit.hard +            )) +        return results + +    def _get_expected_cmd(self): +        self.log('_get_expected_cmd') +        if not self.parameters.command: +            return None +        return shlex.split(self.parameters.command) + +    def _convert_simple_dict_to_list(self, param_name, join_with=':'): +        if getattr(self.parameters, param_name, None) is None: +            return None +        results = [] +        for key, value in getattr(self.parameters, param_name).items(): +            results.append("%s%s%s" % (key, join_with, value)) +        return results + + +class ContainerManager(DockerBaseClass): +    ''' +    Perform container management tasks +    ''' + +    def __init__(self, client): + +        super(ContainerManager, self).__init__() + +        self.client = client +        self.parameters = TaskParameters(client) +        self.check_mode = self.client.check_mode +        self.results = {'changed': False, 'actions': []} +        self.diff = {} +        self.facts = {} + +        state = self.parameters.state +        if state in ('stopped', 'started', 'present'): +            self.present(state) +        elif state == 'absent': +            self.absent() + +        if not self.check_mode and not self.parameters.debug: +            self.results.pop('actions') + +        if self.client.module._diff or self.parameters.debug: +            self.results['diff'] = self.diff + +        if self.facts: +            self.results['ansible_facts'] = {'docker_container': self.facts} + +    def present(self, state): +        container = self._get_container(self.parameters.name) +        image = self._get_image() + +        if not container.exists: +            # New container +            self.log('No container found') +            new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) +            if new_container: +                container = new_container +        else: +            # Existing container +            different, differences = container.has_different_configuration(image) +            image_different = False +            if not self.parameters.ignore_image: +                image_different = self._image_is_different(image, container) +            if image_different or different or self.parameters.recreate: +                self.diff['differences'] = differences +                if image_different: +                    self.diff['image_different'] = True +                self.log("differences") +                self.log(differences, pretty_print=True) +                if container.running: +                    self.container_stop(container.Id) +                self.container_remove(container.Id) +                new_container = self.container_create(self.parameters.image, self.parameters.create_parameters) +                if new_container: +                    container = new_container + +        if container and container.exists: +            container = self.update_limits(container) +            container = self.update_networks(container) + +            if state == 'started' and not container.running: +                container = self.container_start(container.Id) +            elif state == 'started' and self.parameters.restart: +                self.container_stop(container.Id) +                container = self.container_start(container.Id) +            elif state == 'stopped' and container.running: +                self.container_stop(container.Id) +                container = self._get_container(container.Id) + +        self.facts = container.raw + +    def absent(self): +        container = self._get_container(self.parameters.name) +        if container.exists: +            if container.running: +                self.container_stop(container.Id) +            self.container_remove(container.Id) + +    def fail(self, msg, **kwargs): +        self.client.module.fail_json(msg=msg, **kwargs) + +    def _get_container(self, container): +        ''' +        Expects container ID or Name. Returns a container object +        ''' +        return Container(self.client.get_container(container), self.parameters) + +    def _get_image(self): +        if not self.parameters.image: +            self.log('No image specified') +            return None +        repository, tag = utils.parse_repository_tag(self.parameters.image) +        if not tag: +            tag = "latest" +        image = self.client.find_image(repository, tag) +        if not self.check_mode: +            if not image or self.parameters.pull: +                self.log("Pull the image.") +                image, alreadyToLatest = self.client.pull_image(repository, tag) +                if alreadyToLatest: +                    self.results['changed'] = False +                else: +                    self.results['changed'] = True +                    self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) +        self.log("image") +        self.log(image, pretty_print=True) +        return image + +    def _image_is_different(self, image, container): +        if image and image.get('Id'): +            if container and container.Image: +                if image.get('Id') != container.Image: +                    return True +        return False + +    def update_limits(self, container): +        limits_differ, different_limits = container.has_different_resource_limits() +        if limits_differ: +            self.log("limit differences:") +            self.log(different_limits, pretty_print=True) +        if limits_differ and not self.check_mode: +            self.container_update(container.Id, self.parameters.update_parameters) +            return self._get_container(container.Id) +        return container + +    def update_networks(self, container): +        has_network_differences, network_differences = container.has_network_differences() +        updated_container = container +        if has_network_differences: +            if self.diff.get('differences'): +                self.diff['differences'].append(dict(network_differences=network_differences)) +            else: +                self.diff['differences'] = [dict(network_differences=network_differences)] +            self.results['changed'] = True +            updated_container = self._add_networks(container, network_differences) + +        if self.parameters.purge_networks: +            has_extra_networks, extra_networks = container.has_extra_networks() +            if has_extra_networks: +                if self.diff.get('differences'): +                    self.diff['differences'].append(dict(purge_networks=extra_networks)) +                else: +                    self.diff['differences'] = [dict(purge_networks=extra_networks)] +                self.results['changed'] = True +                updated_container = self._purge_networks(container, extra_networks) +        return updated_container + +    def _add_networks(self, container, differences): +        for diff in differences: +            # remove the container from the network, if connected +            if diff.get('container'): +                self.results['actions'].append(dict(removed_from_network=diff['parameter']['name'])) +                if not self.check_mode: +                    try: +                        self.client.disconnect_container_from_network(container.Id, diff['parameter']['id']) +                    except Exception as exc: +                        self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'], +                                                                                          str(exc))) +            # connect to the network +            params = dict( +                ipv4_address=diff['parameter'].get('ipv4_address', None), +                ipv6_address=diff['parameter'].get('ipv6_address', None), +                links=diff['parameter'].get('links', None), +                aliases=diff['parameter'].get('aliases', None) +            ) +            self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params)) +            if not self.check_mode: +                try: +                    self.log("Connecting container to network %s" % diff['parameter']['id']) +                    self.log(params, pretty_print=True) +                    self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params) +                except Exception as exc: +                    self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc))) +        return self._get_container(container.Id) + +    def _purge_networks(self, container, networks): +        for network in networks: +            self.results['actions'].append(dict(removed_from_network=network['name'])) +            if not self.check_mode: +                try: +                    self.client.disconnect_container_from_network(container.Id, network['name']) +                except Exception as exc: +                    self.fail("Error disconnecting container from network %s - %s" % (network['name'], +                                                                                      str(exc))) +        return self._get_container(container.Id) + +    def container_create(self, image, create_parameters): +        self.log("create container") +        self.log("image: %s parameters:" % image) +        self.log(create_parameters, pretty_print=True) +        self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters)) +        self.results['changed'] = True +        new_container = None +        if not self.check_mode: +            try: +                new_container = self.client.create_container(image, **create_parameters) +            except Exception as exc: +                self.fail("Error creating container: %s" % str(exc)) +            return self._get_container(new_container['Id']) +        return new_container + +    def container_start(self, container_id): +        self.log("start container %s" % (container_id)) +        self.results['actions'].append(dict(started=container_id)) +        self.results['changed'] = True +        if not self.check_mode: +            try: +                self.client.start(container=container_id) +            except Exception as exc: +                self.fail("Error starting container %s: %s" % (container_id, str(exc))) + +            if not self.parameters.detach: +                status = self.client.wait(container_id) +                output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False) +                if status != 0: +                    self.fail(output, status=status) +                if self.parameters.cleanup: +                    self.container_remove(container_id, force=True) +                insp = self._get_container(container_id) +                if insp.raw: +                    insp.raw['Output'] = output +                else: +                    insp.raw = dict(Output=output) +                return insp +        return self._get_container(container_id) + +    def container_remove(self, container_id, link=False, force=False): +        volume_state = (not self.parameters.keep_volumes) +        self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force)) +        self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force)) +        self.results['changed'] = True +        response = None +        if not self.check_mode: +            try: +                response = self.client.remove_container(container_id, v=volume_state, link=link, force=force) +            except Exception as exc: +                self.fail("Error removing container %s: %s" % (container_id, str(exc))) +        return response + +    def container_update(self, container_id, update_parameters): +        if update_parameters: +            self.log("update container %s" % (container_id)) +            self.log(update_parameters, pretty_print=True) +            self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters)) +            self.results['changed'] = True +            if not self.check_mode and callable(getattr(self.client, 'update_container')): +                try: +                    self.client.update_container(container_id, **update_parameters) +                except Exception as exc: +                    self.fail("Error updating container %s: %s" % (container_id, str(exc))) +        return self._get_container(container_id) + +    def container_kill(self, container_id): +        self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal)) +        self.results['changed'] = True +        response = None +        if not self.check_mode: +            try: +                if self.parameters.kill_signal: +                    response = self.client.kill(container_id, signal=self.parameters.kill_signal) +                else: +                    response = self.client.kill(container_id) +            except Exception as exc: +                self.fail("Error killing container %s: %s" % (container_id, exc)) +        return response + +    def container_stop(self, container_id): +        if self.parameters.force_kill: +            self.container_kill(container_id) +            return +        self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout)) +        self.results['changed'] = True +        response = None +        if not self.check_mode: +            try: +                if self.parameters.stop_timeout: +                    response = self.client.stop(container_id, timeout=self.parameters.stop_timeout) +                else: +                    response = self.client.stop(container_id) +            except Exception as exc: +                self.fail("Error stopping container %s: %s" % (container_id, str(exc))) +        return response + + +def main(): +    argument_spec = dict( +        blkio_weight=dict(type='int'), +        capabilities=dict(type='list'), +        cleanup=dict(type='bool', default=False), +        command=dict(type='str'), +        cpu_period=dict(type='int'), +        cpu_quota=dict(type='int'), +        cpuset_cpus=dict(type='str'), +        cpuset_mems=dict(type='str'), +        cpu_shares=dict(type='int'), +        detach=dict(type='bool', default=True), +        devices=dict(type='list'), +        dns_servers=dict(type='list'), +        dns_opts=dict(type='list'), +        dns_search_domains=dict(type='list'), +        env=dict(type='dict'), +        env_file=dict(type='path'), +        entrypoint=dict(type='str'), +        etc_hosts=dict(type='dict'), +        exposed_ports=dict(type='list', aliases=['exposed', 'expose']), +        force_kill=dict(type='bool', default=False, aliases=['forcekill']), +        groups=dict(type='list'), +        hostname=dict(type='str'), +        ignore_image=dict(type='bool', default=False), +        image=dict(type='str'), +        interactive=dict(type='bool', default=False), +        ipc_mode=dict(type='str'), +        keep_volumes=dict(type='bool', default=True), +        kernel_memory=dict(type='str'), +        kill_signal=dict(type='str'), +        labels=dict(type='dict'), +        links=dict(type='list'), +        log_driver=dict(type='str', +                        choices=['none', 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'], +                        default=None), +        log_options=dict(type='dict', aliases=['log_opt']), +        mac_address=dict(type='str'), +        memory=dict(type='str', default='0'), +        memory_reservation=dict(type='str'), +        memory_swap=dict(type='str'), +        memory_swappiness=dict(type='int'), +        name=dict(type='str', required=True), +        network_mode=dict(type='str'), +        networks=dict(type='list'), +        oom_killer=dict(type='bool'), +        oom_score_adj=dict(type='int'), +        paused=dict(type='bool', default=False), +        pid_mode=dict(type='str'), +        privileged=dict(type='bool', default=False), +        published_ports=dict(type='list', aliases=['ports']), +        pull=dict(type='bool', default=False), +        purge_networks=dict(type='bool', default=False), +        read_only=dict(type='bool', default=False), +        recreate=dict(type='bool', default=False), +        restart=dict(type='bool', default=False), +        restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']), +        restart_retries=dict(type='int', default=None), +        shm_size=dict(type='str'), +        security_opts=dict(type='list'), +        state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'), +        stop_signal=dict(type='str'), +        stop_timeout=dict(type='int'), +        trust_image_content=dict(type='bool', default=False), +        tty=dict(type='bool', default=False), +        ulimits=dict(type='list'), +        user=dict(type='str'), +        uts=dict(type='str'), +        volumes=dict(type='list'), +        volumes_from=dict(type='list'), +        volume_driver=dict(type='str'), +    ) + +    required_if = [ +        ('state', 'present', ['image']) +    ] + +    client = AnsibleDockerClient( +        argument_spec=argument_spec, +        required_if=required_if, +        supports_check_mode=True +    ) + +    cm = ContainerManager(client) +    client.module.exit_json(**cm.results) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': +    main()
\ No newline at end of file diff --git a/roles/openshift_health_checker/library/docker_info.py b/roles/openshift_health_checker/library/docker_info.py new file mode 100644 index 000000000..7f712bcff --- /dev/null +++ b/roles/openshift_health_checker/library/docker_info.py @@ -0,0 +1,24 @@ +# pylint: disable=missing-docstring +""" +Ansible module for determining information about the docker host. + +While there are several ansible modules that make use of the docker +api to expose container and image facts in a remote host, they +are unable to return specific information about the host machine +itself. This module exposes the same information obtained through +executing the `docker info` command on a docker host, in json format. +""" + +from ansible.module_utils.docker_common import AnsibleDockerClient + + +def main(): +    client = AnsibleDockerClient() + +    client.module.exit_json( +        info=client.info(), +    ) + + +if __name__ == '__main__': +    main() diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py new file mode 100644 index 000000000..7a7498cb7 --- /dev/null +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -0,0 +1,168 @@ +# pylint: disable=missing-docstring +from openshift_checks import OpenShiftCheck, get_var + + +class DockerImageAvailability(OpenShiftCheck): +    """Check that required Docker images are available. + +    This check attempts to ensure that required docker images are +    either present locally, or able to be pulled down from available +    registries defined in a host machine. +    """ + +    name = "docker_image_availability" +    tags = ["preflight"] + +    skopeo_image = "openshift/openshift-ansible" + +    docker_image_base = { +        "origin": { +            "repo": "openshift", +            "image": "origin", +        }, +        "openshift-enterprise": { +            "repo": "openshift3", +            "image": "ose", +        }, +    } + +    def run(self, tmp, task_vars): +        required_images = self.required_images(task_vars) +        missing_images = set(required_images) - set(self.local_images(required_images, task_vars)) + +        # exit early if all images were found locally +        if not missing_images: +            return {"changed": False} + +        msg, failed, changed = self.update_skopeo_image(task_vars) + +        # exit early if Skopeo update fails +        if failed: +            return { +                "failed": True, +                "changed": changed, +                "msg": "Failed to update Skopeo image ({img_name}). {msg}".format(img_name=self.skopeo_image, msg=msg), +            } + +        registries = self.known_docker_registries(task_vars) +        available_images = self.available_images(missing_images, registries, task_vars) +        unavailable_images = set(missing_images) - set(available_images) + +        if unavailable_images: +            return { +                "failed": True, +                "msg": ( +                    "One or more required images are not available: {}.\n" +                    "Configured registries: {}" +                ).format(", ".join(sorted(unavailable_images)), ", ".join(registries)), +                "changed": changed, +            } + +        return {"changed": changed} + +    def required_images(self, task_vars): +        deployment_type = get_var(task_vars, "deployment_type") +        image_base_name = self.docker_image_base[deployment_type] + +        openshift_release = get_var(task_vars, "openshift_release") +        openshift_image_tag = get_var(task_vars, "openshift_image_tag") + +        is_containerized = get_var(task_vars, "openshift", "common", "is_containerized") + +        if is_containerized: +            images = set(self.containerized_docker_images(image_base_name, openshift_release)) +        else: +            images = set(self.rpm_docker_images(image_base_name, openshift_release)) + +        # append images with qualified image tags to our list of required images. +        # these are images with a (v0.0.0.0) tag, rather than a standard release +        # format tag (v0.0). We want to check this set in both containerized and +        # non-containerized installations. +        images.update( +            self.qualified_docker_images(self.image_from_base_name(image_base_name), "v" + openshift_image_tag) +        ) + +        return images + +    def local_images(self, images, task_vars): +        """Filter a list of images and return those available locally.""" +        return [ +            image for image in images +            if self.is_image_local(image, task_vars) +        ] + +    def is_image_local(self, image, task_vars): +        result = self.module_executor("docker_image_facts", {"name": image}, task_vars) +        if result.get("failed", False): +            return False + +        return bool(result.get("images", [])) + +    def known_docker_registries(self, task_vars): +        result = self.module_executor("docker_info", {}, task_vars) + +        if result.get("failed", False): +            return [] + +        docker_info = result.get("info", "") +        return [registry.get("Name", "") for registry in docker_info.get("Registries", {})] + +    def available_images(self, images, registries, task_vars): +        """Inspect existing images using Skopeo and return all images successfully inspected.""" +        return [ +            image for image in images +            if self.is_image_available(image, registries, task_vars) +        ] + +    def is_image_available(self, image, registries, task_vars): +        for registry in registries: +            if self.is_available_skopeo_image(image, registry, task_vars): +                return True + +        return False + +    def is_available_skopeo_image(self, image, registry, task_vars): +        """Uses Skopeo to determine if required image exists in a given registry.""" + +        cmd_str = "skopeo inspect docker://{registry}/{image}".format( +            registry=registry, +            image=image, +        ) + +        args = { +            "name": "skopeo_inspect", +            "image": self.skopeo_image, +            "command": cmd_str, +            "detach": False, +            "cleanup": True, +        } +        result = self.module_executor("docker_container", args, task_vars) +        return result.get("failed", False) + +    def containerized_docker_images(self, base_name, version): +        return [ +            "{image}:{version}".format(image=self.image_from_base_name(base_name), version=version) +        ] + +    @staticmethod +    def rpm_docker_images(base, version): +        return [ +            "{image_repo}/registry-console:{version}".format(image_repo=base["repo"], version=version) +        ] + +    @staticmethod +    def qualified_docker_images(image_name, version): +        return [ +            "{}-{}:{}".format(image_name, component, version) +            for component in "haproxy-router docker-registry deployer pod".split() +        ] + +    @staticmethod +    def image_from_base_name(base): +        return "".join([base["repo"], "/", base["image"]]) + +    # ensures that the skopeo docker image exists, and updates it +    # with latest if image was already present locally. +    def update_skopeo_image(self, task_vars): +        result = self.module_executor("docker_image", {"name": self.skopeo_image}, task_vars) +        return result.get("msg", ""), result.get("failed", False), result.get("changed", False) diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 14b80304d..570c41ecc 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -72,6 +72,8 @@ When both `openshift_logging_install_logging` and `openshift_logging_upgrade_log  - `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.  - `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.  - `openshift_logging_es_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land. +- `openshift_logging_es_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. +- `openshift_logging_es_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.  When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the  same as above for their non-ops counterparts, but apply to the OPS cluster instance: @@ -88,6 +90,8 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta  - `openshift_logging_es_ops_pvc_prefix`: logging-es-ops  - `openshift_logging_es_ops_recover_after_time`: 5m  - `openshift_logging_es_ops_storage_group`: 65534 +- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. +- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'.   - `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.  - `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.  - `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 75a6e4d69..1ea0fbe12 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -54,6 +54,18 @@ openshift_logging_kibana_ops_proxy_cpu_limit: null  openshift_logging_kibana_ops_proxy_memory_limit: null  openshift_logging_kibana_ops_replica_count: 1 +#The absolute path on the control node to the cert file to use +#for the public facing ops kibana certs +openshift_logging_kibana_ops_cert: "" + +#The absolute path on the control node to the key file to use +#for the public facing ops kibana certs +openshift_logging_kibana_ops_key: "" + +#The absolute path on the control node to the CA file to use +#for the public facing ops kibana certs +openshift_logging_kibana_ops_ca: "" +  openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"  openshift_logging_fluentd_cpu_limit: 100m  openshift_logging_fluentd_memory_limit: 512Mi @@ -80,6 +92,8 @@ openshift_logging_es_storage_group: "{{ openshift_hosted_logging_elasticsearch_s  openshift_logging_es_nodeselector: "{{ openshift_hosted_logging_elasticsearch_nodeselector | default('') | map_from_pairs }}"  # openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml  openshift_logging_es_config: {} +openshift_logging_es_number_of_shards: 1 +openshift_logging_es_number_of_replicas: 0  # allow cluster-admin or cluster-reader to view operations index  openshift_logging_es_ops_allow_cluster_reader: False @@ -99,6 +113,8 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_  openshift_logging_es_ops_recover_after_time: 5m  openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}"  openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" +openshift_logging_es_ops_number_of_shards: 1 +openshift_logging_es_ops_number_of_replicas: 0  # storage related defaults  openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml index 7af17a708..e77da7a24 100644 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -16,12 +16,12 @@    changed_when: false  - name: Generating logging routes -  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml +  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-route.yaml    tags: routes    vars: -    obj_name: "{{route_info.name}}" -    route_host: "{{route_info.host}}" -    service_name: "{{route_info.name}}" +    obj_name: "logging-kibana" +    route_host: "{{openshift_logging_kibana_hostname}}" +    service_name: "logging-kibana"      tls_key: "{{kibana_key | default('') | b64decode}}"      tls_cert: "{{kibana_cert | default('') | b64decode}}"      tls_ca_cert: "{{kibana_ca | b64decode}}" @@ -31,10 +31,47 @@        component: support        logging-infra: support        provider: openshift -  with_items: -    - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"} -    - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"} -  loop_control: -    loop_var: route_info -  when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops | bool) or route_info.name == 'logging-kibana' +  changed_when: no + +- set_fact: kibana_ops_key={{ lookup('file', openshift_logging_kibana_ops_key) | b64encode }} +  when: +  - openshift_logging_use_ops | bool +  - "{{ openshift_logging_kibana_ops_key | trim | length > 0 }}" +  changed_when: false + +- set_fact: kibana_ops_cert={{ lookup('file', openshift_logging_kibana_ops_cert)| b64encode  }} +  when: +  - openshift_logging_use_ops | bool +  - "{{openshift_logging_kibana_ops_cert | trim | length > 0}}" +  changed_when: false + +- set_fact: kibana_ops_ca={{ lookup('file', openshift_logging_kibana_ops_ca)| b64encode  }} +  when: +  - openshift_logging_use_ops | bool +  - "{{openshift_logging_kibana_ops_ca | trim | length > 0}}" +  changed_when: false + +- set_fact: kibana_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }} +  when: +  - openshift_logging_use_ops | bool +  - kibana_ops_ca is not defined +  changed_when: false + +- name: Generating logging ops routes +  template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-ops-route.yaml +  tags: routes +  vars: +    obj_name: "logging-kibana-ops" +    route_host: "{{openshift_logging_kibana_ops_hostname}}" +    service_name: "logging-kibana-ops" +    tls_key: "{{kibana_ops_key | default('') | b64decode}}" +    tls_cert: "{{kibana_ops_cert | default('') | b64decode}}" +    tls_ca_cert: "{{kibana_ops_ca | b64decode}}" +    tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}" +    edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}" +    labels: +      component: support +      logging-infra: support +      provider: openshift +  when: openshift_logging_use_ops | bool    changed_when: no diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index 1b750bcbe..28fad420b 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -53,6 +53,8 @@      deploy_name: "{{item.1}}"      es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}"      es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}" +    es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}" +    es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}"    with_indexed_items:      - "{{ es_dc_pool }}"    check_mode: no @@ -134,6 +136,8 @@      openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"      es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}"      es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}" +    es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}" +    es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}"    with_indexed_items:      - "{{ es_ops_dc_pool | default([]) }}"    when: diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2 index 9ed20e40e..07e8c0c98 100644 --- a/roles/openshift_logging/templates/elasticsearch.yml.j2 +++ b/roles/openshift_logging/templates/elasticsearch.yml.j2 @@ -6,9 +6,8 @@ script:    indexed: on  index: -  number_of_shards: 1 -  number_of_replicas: 0 -  auto_expand_replicas: 0-2 +  number_of_shards: {{ es_number_of_shards | default ('1') }} +  number_of_replicas: {{ es_number_of_replicas | default ('0') }}    unassigned.node_left.delayed_timeout: 2m    translog:      flush_threshold_size: 256mb @@ -29,6 +28,7 @@ cloud:  discovery:    type: kubernetes    zen.ping.multicast.enabled: false +  zen.minimum_master_nodes: {{es_min_masters}}  gateway:    expected_master_nodes: ${NODE_QUORUM} diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml index 07cc05683..c3064cee9 100644 --- a/roles/openshift_logging/vars/main.yaml +++ b/roles/openshift_logging/vars/main.yaml @@ -1,6 +1,8 @@  ---  openshift_master_config_dir: "{{ openshift.common.config_base }}/master"  es_node_quorum: "{{openshift_logging_es_cluster_size|int/2 + 1}}" +es_min_masters_default: "{{ (openshift_logging_es_cluster_size | int / 2 | round(0,'floor') + 1) | int }}" +es_min_masters: "{{ (openshift_logging_es_cluster_size == 1) | ternary(1, es_min_masters_default)}}"  es_recover_after_nodes: "{{openshift_logging_es_cluster_size|int - 1}}"  es_recover_expected_nodes: "{{openshift_logging_es_cluster_size|int}}"  es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size|int/2 + 1}}" diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index b4fd5aeb0..c42bdb7c3 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -17,7 +17,7 @@ After={{ openshift.common.service_type }}-node-dep.service  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep  ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node  SyslogIdentifier={{ openshift.common.service_type }}-node diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index 2f79931df..f052ed505 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -60,8 +60,12 @@  - name: Restart openvswitch    systemd: -    name: openvswitch +    name: "{{ item }}"      state: restarted +  with_items: +  - ovs-vswitchd +  - ovsdb-server +  - openvswitch    when:    - not openshift.common.is_containerized | bool    - ovs_pkg | changed @@ -69,6 +73,13 @@  # Mandatory Docker restart, ensure all containerized services are running:  - include: docker/restart.yml +- name: Update oreg value +  yedit: +    src: "{{ openshift.common.config_base }}/node/node-config.yaml" +    key: 'imageConfig.format' +    value: "{{ oreg_url }}" +  when: oreg_url is defined +  - name: Restart rpm node service    service:      name: "{{ openshift.common.service_type }}-node" diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service index 6ec88f85e..0ff398152 100644 --- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service +++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service @@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node  EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep  ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node -ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro,rslave -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node  SyslogIdentifier={{ openshift.common.service_type }}-node  | 
