diff options
46 files changed, 6284 insertions, 115 deletions
| diff --git a/.coveragerc b/.coveragerc index 00f46b61b..ad7893b91 100644 --- a/.coveragerc +++ b/.coveragerc @@ -14,7 +14,7 @@ omit =      */test/*  [report] -fail_under = 29 +fail_under = 28  [html]  directory = cover diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example index 0ddca6576..6dec97fda 100644 --- a/inventory/byo/hosts.origin.example +++ b/inventory/byo/hosts.origin.example @@ -89,6 +89,8 @@ openshift_release=v1.4  # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.  # docker_upgrade=False +# Specify exact version of etcd to configure or upgrade to. +# etcd_version="3.1.0"  # Upgrade Hooks  # diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example index 7f80a9639..2b61e7d8d 100644 --- a/inventory/byo/hosts.ose.example +++ b/inventory/byo/hosts.ose.example @@ -89,6 +89,8 @@ openshift_release=v3.4  # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.  # docker_upgrade=False +# Specify exact version of etcd to configure or upgrade to. +# etcd_version="3.1.0"  # Upgrade Hooks  # diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index c2b72339c..1ccae61f2 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -2,11 +2,18 @@  # The version of Contiv binaries to use  contiv_version: 1.0.0-beta.3-02-21-2017.20-52-42.UTC +# The version of cni binaries +cni_version: v0.4.0 +  contiv_default_subnet: "20.1.1.1/24"  contiv_default_gw: "20.1.1.254"  # TCP port that Netmaster listens for network connections  netmaster_port: 9999 +# Default for contiv_role +contiv_role: netmaster + +  # TCP port that Netplugin listens for network connections  netplugin_port: 6640  contiv_rpc_port1: 9001 @@ -33,6 +40,14 @@ bin_dir: /usr/bin  # Path to the contivk8s cni binary  cni_bin_dir: /opt/cni/bin +# Path to cni archive download directory +cni_download_dir: /tmp + +# URL for cni binaries +cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/" +cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tbz2" + +  # Contiv config directory  contiv_config_dir: /opt/contiv/config diff --git a/roles/contiv/files/loopback b/roles/contiv/files/loopbackBinary files differ deleted file mode 100644 index f02b0b1fb..000000000 --- a/roles/contiv/files/loopback +++ /dev/null diff --git a/roles/contiv/tasks/download_bins.yml b/roles/contiv/tasks/download_bins.yml index 28ed50fae..319fce46c 100644 --- a/roles/contiv/tasks/download_bins.yml +++ b/roles/contiv/tasks/download_bins.yml @@ -25,3 +25,22 @@      src: "{{ contiv_current_release_directory }}/netplugin-{{ contiv_version }}.tar.bz2"      dest: "{{ contiv_current_release_directory }}"      copy: no + +- name: Download Bins | Download cni tar file +  get_url: +    url: "{{ cni_bin_url }}" +    dest: "{{ cni_download_dir }}" +    mode: 0755 +    validate_certs: False +  environment: +    http_proxy: "{{ http_proxy|default('') }}" +    https_proxy: "{{ https_proxy|default('') }}" +    no_proxy: "{{ no_proxy|default('') }}" +  register: download_file + +- name: Download Bins | Extract cni tar file +  unarchive: +    src: "{{ download_file.dest }}" +    dest: "{{ cni_download_dir }}" +    copy: no +  when: download_file.changed diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index ec6c72fe9..97b9762df 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -43,8 +43,9 @@  - name: Netplugin | Copy CNI loopback bin    copy: -    src: loopback +    src: "{{ cni_download_dir }}/loopback"      dest: "{{ cni_bin_dir }}/loopback" +    remote_src: True      mode: 0755  - name: Netplugin | Ensure kube_plugin_dir and cni/net.d directories exist diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd/tasks/etcdctl.yml index bb6fabf64..649ad23c1 100644 --- a/roles/etcd/tasks/etcdctl.yml +++ b/roles/etcd/tasks/etcdctl.yml @@ -1,6 +1,6 @@  ---  - name: Install etcd for etcdctl -  package: name=etcd state=present +  package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present    when: not openshift.common.is_atomic | bool  - name: Configure etcd profile.d alises diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index b4ffc99e3..c09da3b61 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -7,7 +7,7 @@      etcd_ip: "{{ etcd_ip }}"  - name: Install etcd -  package: name=etcd state=present +  package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present    when: not etcd_is_containerized | bool  - name: Pull etcd container diff --git a/roles/etcd_server_certificates/tasks/main.yml b/roles/etcd_server_certificates/tasks/main.yml index 242c1e997..4ae9b79c4 100644 --- a/roles/etcd_server_certificates/tasks/main.yml +++ b/roles/etcd_server_certificates/tasks/main.yml @@ -1,6 +1,6 @@  ---  - name: Install etcd -  package: name=etcd state=present +  package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present    when: not etcd_is_containerized | bool  - name: Check status of etcd certificates diff --git a/roles/lib_openshift/library/oadm_manage_node.py b/roles/lib_openshift/library/oadm_manage_node.py index 8c0a29ac7..8bb0538c0 100644 --- a/roles/lib_openshift/library/oadm_manage_node.py +++ b/roles/lib_openshift/library/oadm_manage_node.py @@ -1358,10 +1358,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index bbcd9d0c5..4ecfd2bff 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1366,10 +1366,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 0ab1c8d49..49ff22584 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1344,10 +1344,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval @@ -1907,6 +1908,28 @@ class PolicyGroup(OpenShiftCLI):          self.verbose = verbose          self._rolebinding = None          self._scc = None +        self._cluster_policy_bindings = None +        self._policy_bindings = None + +    @property +    def policybindings(self): +        if self._policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve policybindings') +            self._policy_bindings = results['results'][0]['items'][0] + +        return self._policy_bindings + +    @property +    def clusterpolicybindings(self): +        if self._cluster_policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') +            self._cluster_policy_bindings = results['results'][0]['items'][0] + +        return self._cluster_policy_bindings      @property      def role_binding(self): @@ -1947,18 +1970,24 @@ class PolicyGroup(OpenShiftCLI):      def exists_role_binding(self):          ''' return whether role_binding exists ''' -        results = self.get() -        if results['returncode'] == 0: -            self.role_binding = RoleBinding(results['results'][0]) -            if self.role_binding.find_group_name(self.config.config_options['group']['value']) != None: -                return True +        bindings = None +        if self.config.config_options['resource_kind']['value'] == 'cluster-role': +            bindings = self.clusterpolicybindings +        else: +            bindings = self.policybindings +        if bindings is None:              return False -        elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: -            return False +        for binding in bindings['roleBindings']: +            _rb = binding['roleBinding'] +            if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ +                    _rb['groupNames'] is not None and \ +                    self.config.config_options['group']['value'] in _rb['groupNames']: +                self.role_binding = binding +                return True -        return results +        return False      def exists_scc(self):          ''' return whether scc exists ''' diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 91bd85122..bed05044c 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1344,10 +1344,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval @@ -1906,6 +1907,28 @@ class PolicyUser(OpenShiftCLI):          self.verbose = verbose          self._rolebinding = None          self._scc = None +        self._cluster_policy_bindings = None +        self._policy_bindings = None + +    @property +    def policybindings(self): +        if self._policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve policybindings') +            self._policy_bindings = results['results'][0]['items'][0] + +        return self._policy_bindings + +    @property +    def clusterpolicybindings(self): +        if self._cluster_policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') +            self._cluster_policy_bindings = results['results'][0]['items'][0] + +        return self._cluster_policy_bindings      @property      def role_binding(self): @@ -1928,36 +1951,37 @@ class PolicyUser(OpenShiftCLI):          self._scc = scc      def get(self): -        '''fetch the desired kind''' +        '''fetch the desired kind + +           This is only used for scc objects. +           The {cluster}rolebindings happen in exists. +        '''          resource_name = self.config.config_options['name']['value']          if resource_name == 'cluster-reader':              resource_name += 's' -        # oc adm policy add-... creates policy bindings with the name -        # "[resource_name]-binding", however some bindings in the system -        # simply use "[resource_name]". So try both. - -        results = self._get(self.config.kind, resource_name) -        if results['returncode'] == 0: -            return results - -        # Now try -binding naming convention -        return self._get(self.config.kind, resource_name + "-binding") +        return self._get(self.config.kind, resource_name)      def exists_role_binding(self):          ''' return whether role_binding exists ''' -        results = self.get() -        if results['returncode'] == 0: -            self.role_binding = RoleBinding(results['results'][0]) -            if self.role_binding.find_user_name(self.config.config_options['user']['value']) != None: -                return True +        bindings = None +        if self.config.config_options['resource_kind']['value'] == 'cluster-role': +            bindings = self.clusterpolicybindings +        else: +            bindings = self.policybindings +        if bindings is None:              return False -        elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: -            return False +        for binding in bindings['roleBindings']: +            _rb = binding['roleBinding'] +            if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ +                    _rb['userNames'] is not None and \ +                    self.config.config_options['user']['value'] in _rb['userNames']: +                self.role_binding = binding +                return True -        return results +        return False      def exists_scc(self):          ''' return whether scc exists ''' diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index dcfc326a9..c398c5551 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1462,10 +1462,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval @@ -2266,7 +2267,6 @@ class Registry(OpenShiftCLI):      def exists(self):          '''does the object exist?''' -        self.get()          if self.deploymentconfig and self.service:              return True @@ -2293,7 +2293,7 @@ class Registry(OpenShiftCLI):          ''' prepare a registry for instantiation '''          options = self.config.to_option_list() -        cmd = ['registry', '-n', self.config.namespace] +        cmd = ['registry']          cmd.extend(options)          cmd.extend(['--dry-run=True', '-o', 'json']) @@ -2327,7 +2327,8 @@ class Registry(OpenShiftCLI):              service.put('spec.portalIP', self.portal_ip)          # the dry-run doesn't apply the selector correctly -        service.put('spec.selector', self.service.get_selector()) +        if self.service: +            service.put('spec.selector', self.service.get_selector())          # need to create the service and the deploymentconfig          service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict) diff --git a/roles/lib_openshift/library/oc_adm_registry.py.bak b/roles/lib_openshift/library/oc_adm_registry.py.bak new file mode 100644 index 000000000..d2532a784 --- /dev/null +++ b/roles/lib_openshift/library/oc_adm_registry.py.bak @@ -0,0 +1,2562 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_adm_registry +short_description: Module to manage openshift registry +description: +  - Manage openshift registry programmatically. +options: +  state: +    description: +    - The desired action when managing openshift registry +    - present - update or create the registry +    - absent - tear down the registry service and deploymentconfig +    - list - returns the current representiation of a registry +    required: false +    default: False +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - The name of the registry +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The selector when filtering on node labels +    required: false +    default: None +    aliases: [] +  images: +    description: +    - The image to base this registry on - ${component} will be replaced with --type +    required: 'openshift3/ose-${component}:${version}' +    default: None +    aliases: [] +  latest_images: +    description: +    - If true, attempt to use the latest image for the registry instead of the latest release. +    required: false +    default: False +    aliases: [] +  labels: +    description: +    - A set of labels to uniquely identify the registry and its components. +    required: false +    default: None +    aliases: [] +  enforce_quota: +    description: +    - If set, the registry will refuse to write blobs if they exceed quota limits +    required: False +    default: False +    aliases: [] +  mount_host: +    description: +    - If set, the registry volume will be created as a host-mount at this path. +    required: False +    default: False +    aliases: [] +  ports: +    description: +    - A comma delimited list of ports or port pairs to expose on the registry pod.  The default is set for 5000. +    required: False +    default: [5000] +    aliases: [] +  replicas: +    description: +    - The replication factor of the registry; commonly 2 when high availability is desired. +    required: False +    default: 1 +    aliases: [] +  selector: +    description: +    - Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes. +    required: False +    default: None +    aliases: [] +  service_account: +    description: +    - Name of the service account to use to run the registry pod. +    required: False +    default: 'registry' +    aliases: [] +  tls_certificate: +    description: +    - An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS +    required: false +    default: None +    aliases: [] +  tls_key: +    description: +    - An optional path to a PEM encoded private key for serving over TLS +    required: false +    default: None +    aliases: [] +  volume_mounts: +    description: +    - The volume mounts for the registry. +    required: false +    default: None +    aliases: [] +  daemonset: +    description: +    - Use a daemonset instead of a deployment config. +    required: false +    default: False +    aliases: [] +  edits: +    description: +    - A list of modifications to make on the deploymentconfig +    required: false +    default: None +    aliases: [] +  env_vars: +    description: +    - A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR +    required: false +    default: None +    aliases: [] +  force: +    description: +    - Force a registry update. +    required: false +    default: False +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create a secure registry +  oc_adm_registry: +    name: docker-registry +    service_account: registry +    replicas: 2 +    namespace: default +    selector: type=infra +    images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}" +    env_vars: +      REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml +      REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt +      REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key +      REGISTRY_HTTP_SECRET: supersecret +    volume_mounts: +    - path: /etc/secrets +      name: dockercerts +      type: secret +      secret_name: registry-secret +    - path: /etc/registryconfig +      name: dockersecrets +      type: secret +      secret_name: docker-registry-config +    edits: +    - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme +      value: HTTPS +      action: put +    - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme +      value: HTTPS +      action: put +    - key: spec.strategy.rollingParams +      value: +        intervalSeconds: 1 +        maxSurge: 50% +        maxUnavailable: 50% +        timeoutSeconds: 600 +        updatePeriodSeconds: 1 +      action: put +    - key: spec.template.spec.containers[0].resources.limits.memory +      value: 2G +      action: update +    - key: spec.template.spec.containers[0].resources.requests.memory +      value: 1G +      action: update + +  register: registryout + +''' + +# -*- -*- -*- End included fragment: doc/registry -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    return None + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [] +        if oadm: +            cmds = ['oadm'] +        else: +            cmds = ['oc'] + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        cmds.extend(cmd) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        returncode, stdout, stderr = self._run(cmds, input_data) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as err: +                        if "No JSON object could be decoded" in err.args: +                            err = err.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(value) +                            print(user_def[key]) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print(api_values) +                        print(user_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key, data in self.config_options.items(): +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*- + + +# pylint: disable=too-many-public-methods +class DeploymentConfig(Yedit): +    ''' Class to model an openshift DeploymentConfig''' +    default_deployment_config = ''' +apiVersion: v1 +kind: DeploymentConfig +metadata: +  name: default_dc +  namespace: default +spec: +  replicas: 0 +  selector: +    default_dc: default_dc +  strategy: +    resources: {} +    rollingParams: +      intervalSeconds: 1 +      maxSurge: 0 +      maxUnavailable: 25% +      timeoutSeconds: 600 +      updatePercent: -25 +      updatePeriodSeconds: 1 +    type: Rolling +  template: +    metadata: +    spec: +      containers: +      - env: +        - name: default +          value: default +        image: default +        imagePullPolicy: IfNotPresent +        name: default_dc +        ports: +        - containerPort: 8000 +          hostPort: 8000 +          protocol: TCP +          name: default_port +        resources: {} +        terminationMessagePath: /dev/termination-log +      dnsPolicy: ClusterFirst +      hostNetwork: true +      nodeSelector: +        type: compute +      restartPolicy: Always +      securityContext: {} +      serviceAccount: default +      serviceAccountName: default +      terminationGracePeriodSeconds: 30 +  triggers: +  - type: ConfigChange +''' + +    replicas_path = "spec.replicas" +    env_path = "spec.template.spec.containers[0].env" +    volumes_path = "spec.template.spec.volumes" +    container_path = "spec.template.spec.containers" +    volume_mounts_path = "spec.template.spec.containers[0].volumeMounts" + +    def __init__(self, content=None): +        ''' Constructor for deploymentconfig ''' +        if not content: +            content = DeploymentConfig.default_deployment_config + +        super(DeploymentConfig, self).__init__(content=content) + +    def add_env_value(self, key, value): +        ''' add key, value pair to env array ''' +        rval = False +        env = self.get_env_vars() +        if env: +            env.append({'name': key, 'value': value}) +            rval = True +        else: +            result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value}) +            rval = result[0] + +        return rval + +    def exists_env_value(self, key, value): +        ''' return whether a key, value  pair exists ''' +        results = self.get_env_vars() +        if not results: +            return False + +        for result in results: +            if result['name'] == key and result['value'] == value: +                return True + +        return False + +    def exists_env_key(self, key): +        ''' return whether a key, value  pair exists ''' +        results = self.get_env_vars() +        if not results: +            return False + +        for result in results: +            if result['name'] == key: +                return True + +        return False + +    def get_env_var(self, key): +        '''return a environment variables ''' +        results = self.get(DeploymentConfig.env_path) or [] +        if not results: +           return None + +        for env_var in results: +            if env_var['name'] == key: +                return env_var + +        return None + +    def get_env_vars(self): +        '''return a environment variables ''' +        return self.get(DeploymentConfig.env_path) or [] + +    def delete_env_var(self, keys): +        '''delete a list of keys ''' +        if not isinstance(keys, list): +            keys = [keys] + +        env_vars_array = self.get_env_vars() +        modified = False +        idx = None +        for key in keys: +            for env_idx, env_var in enumerate(env_vars_array): +                if env_var['name'] == key: +                    idx = env_idx +                    break + +            if idx: +                modified = True +                del env_vars_array[idx] + +        if modified: +            return True + +        return False + +    def update_env_var(self, key, value): +        '''place an env in the env var list''' + +        env_vars_array = self.get_env_vars() +        idx = None +        for env_idx, env_var in enumerate(env_vars_array): +            if env_var['name'] == key: +                idx = env_idx +                break + +        if idx: +            env_vars_array[idx]['value'] = value +        else: +            self.add_env_value(key, value) + +        return True + +    def exists_volume_mount(self, volume_mount): +        ''' return whether a volume mount exists ''' +        exist_volume_mounts = self.get_volume_mounts() + +        if not exist_volume_mounts: +            return False + +        volume_mount_found = False +        for exist_volume_mount in exist_volume_mounts: +            if exist_volume_mount['name'] == volume_mount['name']: +                volume_mount_found = True +                break + +        return volume_mount_found + +    def exists_volume(self, volume): +        ''' return whether a volume exists ''' +        exist_volumes = self.get_volumes() + +        volume_found = False +        for exist_volume in exist_volumes: +            if exist_volume['name'] == volume['name']: +                volume_found = True +                break + +        return volume_found + +    def find_volume_by_name(self, volume, mounts=False): +        ''' return the index of a volume ''' +        volumes = [] +        if mounts: +            volumes = self.get_volume_mounts() +        else: +            volumes = self.get_volumes() +        for exist_volume in volumes: +            if exist_volume['name'] == volume['name']: +                return exist_volume + +        return None + +    def get_replicas(self): +        ''' return replicas setting ''' +        return self.get(DeploymentConfig.replicas_path) + +    def get_volume_mounts(self): +        '''return volume mount information ''' +        return self.get_volumes(mounts=True) + +    def get_volumes(self, mounts=False): +        '''return volume mount information ''' +        if mounts: +            return self.get(DeploymentConfig.volume_mounts_path) or [] + +        return self.get(DeploymentConfig.volumes_path) or [] + +    def delete_volume_by_name(self, volume): +        '''delete a volume ''' +        modified = False +        exist_volume_mounts = self.get_volume_mounts() +        exist_volumes = self.get_volumes() +        del_idx = None +        for idx, exist_volume in enumerate(exist_volumes): +            if 'name' in exist_volume and exist_volume['name'] == volume['name']: +                del_idx = idx +                break + +        if del_idx != None: +            del exist_volumes[del_idx] +            modified = True + +        del_idx = None +        for idx, exist_volume_mount in enumerate(exist_volume_mounts): +            if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']: +                del_idx = idx +                break + +        if del_idx != None: +            del exist_volume_mounts[idx] +            modified = True + +        return modified + +    def add_volume_mount(self, volume_mount): +        ''' add a volume or volume mount to the proper location ''' +        exist_volume_mounts = self.get_volume_mounts() + +        if not exist_volume_mounts and volume_mount: +            self.put(DeploymentConfig.volume_mounts_path, [volume_mount]) +        else: +            exist_volume_mounts.append(volume_mount) + +    def add_volume(self, volume): +        ''' add a volume or volume mount to the proper location ''' +        exist_volumes = self.get_volumes() +        if not volume: +            return + +        if not exist_volumes: +            self.put(DeploymentConfig.volumes_path, [volume]) +        else: +            exist_volumes.append(volume) + +    def update_replicas(self, replicas): +        ''' update replicas value ''' +        self.put(DeploymentConfig.replicas_path, replicas) + +    def update_volume(self, volume): +        '''place an env in the env var list''' +        exist_volumes = self.get_volumes() + +        if not volume: +            return False + +        # update the volume +        update_idx = None +        for idx, exist_vol in enumerate(exist_volumes): +            if exist_vol['name'] == volume['name']: +                update_idx = idx +                break + +        if update_idx != None: +            exist_volumes[update_idx] = volume +        else: +            self.add_volume(volume) + +        return True + +    def update_volume_mount(self, volume_mount): +        '''place an env in the env var list''' +        modified = False + +        exist_volume_mounts = self.get_volume_mounts() + +        if not volume_mount: +            return False + +        # update the volume mount +        for exist_vol_mount in exist_volume_mounts: +            if exist_vol_mount['name'] == volume_mount['name']: +                if 'mountPath' in exist_vol_mount and \ +                   str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']): +                    exist_vol_mount['mountPath'] = volume_mount['mountPath'] +                    modified = True +                break + +        if not modified: +            self.add_volume_mount(volume_mount) +            modified = True + +        return modified + +    def needs_update_volume(self, volume, volume_mount): +        ''' verify a volume update is needed ''' +        exist_volume = self.find_volume_by_name(volume) +        exist_volume_mount = self.find_volume_by_name(volume, mounts=True) +        results = [] +        results.append(exist_volume['name'] == volume['name']) + +        if 'secret' in volume: +            results.append('secret' in exist_volume) +            results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName']) +            results.append(exist_volume_mount['name'] == volume_mount['name']) +            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + +        elif 'emptyDir' in volume: +            results.append(exist_volume_mount['name'] == volume['name']) +            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + +        elif 'persistentVolumeClaim' in volume: +            pvc = 'persistentVolumeClaim' +            results.append(pvc in exist_volume) +            if results[-1]: +                results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName']) + +                if 'claimSize' in volume[pvc]: +                    results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize']) + +        elif 'hostpath' in volume: +            results.append('hostPath' in exist_volume) +            results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath']) + +        return not all(results) + +    def needs_update_replicas(self, replicas): +        ''' verify whether a replica update is needed ''' +        current_reps = self.get(DeploymentConfig.replicas_path) +        return not current_reps == replicas + +# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*- + +# pylint: disable=too-many-instance-attributes +class SecretConfig(object): +    ''' Handle secret options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 kubeconfig, +                 secrets=None): +        ''' constructor for handling secret options ''' +        self.kubeconfig = kubeconfig +        self.name = sname +        self.namespace = namespace +        self.secrets = secrets +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' assign the correct properties for a secret dict ''' +        self.data['apiVersion'] = 'v1' +        self.data['kind'] = 'Secret' +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        self.data['metadata']['namespace'] = self.namespace +        self.data['data'] = {} +        if self.secrets: +            for key, value in self.secrets.items(): +                self.data['data'][key] = value + +# pylint: disable=too-many-instance-attributes +class Secret(Yedit): +    ''' Class to wrap the oc command line tools ''' +    secret_path = "data" +    kind = 'secret' + +    def __init__(self, content): +        '''secret constructor''' +        super(Secret, self).__init__(content=content) +        self._secrets = None + +    @property +    def secrets(self): +        '''secret property getter''' +        if self._secrets is None: +            self._secrets = self.get_secrets() +        return self._secrets + +    @secrets.setter +    def secrets(self): +        '''secret property setter''' +        if self._secrets is None: +            self._secrets = self.get_secrets() +        return self._secrets + +    def get_secrets(self): +        ''' returns all of the defined secrets ''' +        return self.get(Secret.secret_path) or {} + +    def add_secret(self, key, value): +        ''' add a secret ''' +        if self.secrets: +            self.secrets[key] = value +        else: +            self.put(Secret.secret_path, {key: value}) + +        return True + +    def delete_secret(self, key): +        ''' delete secret''' +        try: +            del self.secrets[key] +        except KeyError as _: +            return False + +        return True + +    def find_secret(self, key): +        ''' find secret''' +        rval = None +        try: +            rval = self.secrets[key] +        except KeyError as _: +            return None + +        return {'key': key, 'value': rval} + +    def update_secret(self, key, value): +        ''' update a secret''' +        if key in self.secrets: +            self.secrets[key] = value +        else: +            self.add_secret(key, value) + +        return True + +# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class ServiceConfig(object): +    ''' Handle service options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 ports, +                 selector=None, +                 labels=None, +                 cluster_ip=None, +                 portal_ip=None, +                 session_affinity=None, +                 service_type=None): +        ''' constructor for handling service options ''' +        self.name = sname +        self.namespace = namespace +        self.ports = ports +        self.selector = selector +        self.labels = labels +        self.cluster_ip = cluster_ip +        self.portal_ip = portal_ip +        self.session_affinity = session_affinity +        self.service_type = service_type +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' instantiates a service dict ''' +        self.data['apiVersion'] = 'v1' +        self.data['kind'] = 'Service' +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        self.data['metadata']['namespace'] = self.namespace +        if self.labels: +            for lab, lab_value  in self.labels.items(): +                self.data['metadata'][lab] = lab_value +        self.data['spec'] = {} + +        if self.ports: +            self.data['spec']['ports'] = self.ports +        else: +            self.data['spec']['ports'] = [] + +        if self.selector: +            self.data['spec']['selector'] = self.selector + +        self.data['spec']['sessionAffinity'] = self.session_affinity or 'None' + +        if self.cluster_ip: +            self.data['spec']['clusterIP'] = self.cluster_ip + +        if self.portal_ip: +            self.data['spec']['portalIP'] = self.portal_ip + +        if self.service_type: +            self.data['spec']['type'] = self.service_type + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class Service(Yedit): +    ''' Class to model the oc service object ''' +    port_path = "spec.ports" +    portal_ip = "spec.portalIP" +    cluster_ip = "spec.clusterIP" +    kind = 'Service' + +    def __init__(self, content): +        '''Service constructor''' +        super(Service, self).__init__(content=content) + +    def get_ports(self): +        ''' get a list of ports ''' +        return self.get(Service.port_path) or [] + +    def add_ports(self, inc_ports): +        ''' add a port object to the ports list ''' +        if not isinstance(inc_ports, list): +            inc_ports = [inc_ports] + +        ports = self.get_ports() +        if not ports: +            self.put(Service.port_path, inc_ports) +        else: +            ports.extend(inc_ports) + +        return True + +    def find_ports(self, inc_port): +        ''' find a specific port ''' +        for port in self.get_ports(): +            if port['port'] == inc_port['port']: +                return port + +        return None + +    def delete_ports(self, inc_ports): +        ''' remove a port from a service ''' +        if not isinstance(inc_ports, list): +            inc_ports = [inc_ports] + +        ports = self.get(Service.port_path) or [] + +        if not ports: +            return True + +        removed = False +        for inc_port in inc_ports: +            port = self.find_ports(inc_port) +            if port: +                ports.remove(port) +                removed = True + +        return removed + +    def add_cluster_ip(self, sip): +        '''add cluster ip''' +        self.put(Service.cluster_ip, sip) + +    def add_portal_ip(self, pip): +        '''add cluster ip''' +        self.put(Service.portal_ip, pip) + +# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*- + +class Volume(object): +    ''' Class to model an openshift volume object''' +    volume_mounts_path = {"pod": "spec.containers[0].volumeMounts", +                          "dc":  "spec.template.spec.containers[0].volumeMounts", +                          "rc":  "spec.template.spec.containers[0].volumeMounts", +                         } +    volumes_path = {"pod": "spec.volumes", +                    "dc":  "spec.template.spec.volumes", +                    "rc":  "spec.template.spec.volumes", +                   } + +    @staticmethod +    def create_volume_structure(volume_info): +        ''' return a properly structured volume ''' +        volume_mount = None +        volume = {'name': volume_info['name']} +        if volume_info['type'] == 'secret': +            volume['secret'] = {} +            volume[volume_info['type']] = {'secretName': volume_info['secret_name']} +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']} +        elif volume_info['type'] == 'emptydir': +            volume['emptyDir'] = {} +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']} +        elif volume_info['type'] == 'pvc': +            volume['persistentVolumeClaim'] = {} +            volume['persistentVolumeClaim']['claimName'] = volume_info['claimName'] +            volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize'] +        elif volume_info['type'] == 'hostpath': +            volume['hostPath'] = {} +            volume['hostPath']['path'] = volume_info['path'] + +        return (volume, volume_mount) + +# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class OCVersion(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 config, +                 debug): +        ''' Constructor for OCVersion ''' +        super(OCVersion, self).__init__(None, config) +        self.debug = debug + +    def get(self): +        '''get and return version information ''' + +        results = {} + +        version_results = self._version() + +        if version_results['returncode'] == 0: +            filtered_vers = Utils.filter_versions(version_results['results']) +            custom_vers = Utils.add_custom_versions(filtered_vers) + +            results['returncode'] = version_results['returncode'] +            results.update(filtered_vers) +            results.update(custom_vers) + +            return results + +        raise OpenShiftCLIError('Problem detecting openshift version.') + +    @staticmethod +    def run_ansible(params): +        '''run the idempotent ansible code''' +        oc_version = OCVersion(params['kubeconfig'], params['debug']) + +        if params['state'] == 'list': + +            #pylint: disable=protected-access +            result = oc_version.get() +            return {'state': params['state'], +                    'results': result, +                    'changed': False} + +# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*- + +class RegistryException(Exception): +    ''' Registry Exception Class ''' +    pass + + +class RegistryConfig(OpenShiftCLIConfig): +    ''' RegistryConfig is a DTO for the registry.  ''' +    def __init__(self, rname, namespace, kubeconfig, registry_options): +        super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options) + + +class Registry(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' + +    volume_mount_path = 'spec.template.spec.containers[0].volumeMounts' +    volume_path = 'spec.template.spec.volumes' +    env_path = 'spec.template.spec.containers[0].env' + +    def __init__(self, +                 registry_config, +                 verbose=False): +        ''' Constructor for Registry + +           a registry consists of 3 or more parts +           - dc/docker-registry +           - svc/docker-registry + +           Parameters: +           :registry_config: +           :verbose: +        ''' +        super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose) +        self.version = OCVersion(registry_config.kubeconfig, verbose) +        self.svc_ip = None +        self.portal_ip = None +        self.config = registry_config +        self.verbose = verbose +        self.registry_parts = [{'kind': 'dc', 'name': self.config.name}, +                               {'kind': 'svc', 'name': self.config.name}, +                              ] + +        self.__prepared_registry = None +        self.volume_mounts = [] +        self.volumes = [] +        if self.config.config_options['volume_mounts']['value']: +            for volume in self.config.config_options['volume_mounts']['value']: +                volume_info = {'secret_name': volume.get('secret_name', None), +                               'name':        volume.get('name', None), +                               'type':        volume.get('type', None), +                               'path':        volume.get('path', None), +                               'claimName':   volume.get('claim_name', None), +                               'claimSize':   volume.get('claim_size', None), +                              } + +                vol, vol_mount = Volume.create_volume_structure(volume_info) +                self.volumes.append(vol) +                self.volume_mounts.append(vol_mount) + +        self.dconfig = None +        self.svc = None + +    @property +    def deploymentconfig(self): +        ''' deploymentconfig property ''' +        return self.dconfig + +    @deploymentconfig.setter +    def deploymentconfig(self, config): +        ''' setter for deploymentconfig property ''' +        self.dconfig = config + +    @property +    def service(self): +        ''' service property ''' +        return self.svc + +    @service.setter +    def service(self, config): +        ''' setter for service property ''' +        self.svc = config + +    @property +    def prepared_registry(self): +        ''' prepared_registry property ''' +        if not self.__prepared_registry: +            results = self.prepare_registry() +            if not results: +                raise RegistryException('Could not perform registry preparation.') +            self.__prepared_registry = results + +        return self.__prepared_registry + +    @prepared_registry.setter +    def prepared_registry(self, data): +        ''' setter method for prepared_registry attribute ''' +        self.__prepared_registry = data + +    def get(self): +        ''' return the self.registry_parts ''' +        self.deploymentconfig = None +        self.service = None + +        rval = 0 +        for part in self.registry_parts: +            result = self._get(part['kind'], rname=part['name']) +            if result['returncode'] == 0 and part['kind'] == 'dc': +                self.deploymentconfig = DeploymentConfig(result['results'][0]) +            elif result['returncode'] == 0 and part['kind'] == 'svc': +                self.service = Yedit(content=result['results'][0]) + +            if result['returncode'] != 0: +                rval = result['returncode'] + + +        return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service} + +    def exists(self): +        '''does the object exist?''' +        self.get() +        if self.deploymentconfig or self.service: +            return True + +        return False + +    def delete(self, complete=True): +        '''return all pods ''' +        parts = [] +        for part in self.registry_parts: +            if not complete and part['kind'] == 'svc': +                continue +            parts.append(self._delete(part['kind'], part['name'])) + +        # Clean up returned results +        rval = 0 +        for part in parts: +            # pylint: disable=invalid-sequence-index +            if 'returncode' in part and part['returncode'] != 0: +                rval = part['returncode'] + +        return {'returncode': rval, 'results': parts} + +    def prepare_registry(self): +        ''' prepare a registry for instantiation ''' +        options = self.config.to_option_list() + +        cmd = ['registry', '-n', self.config.namespace] +        cmd.extend(options) +        cmd.extend(['--dry-run=True', '-o', 'json']) + +        results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json') +        # probably need to parse this +        # pylint thinks results is a string +        # pylint: disable=no-member +        if results['returncode'] != 0 and 'items' in results['results']: +            return results + +        service = None +        deploymentconfig = None +        # pylint: disable=invalid-sequence-index +        for res in results['results']['items']: +            if res['kind'] == 'DeploymentConfig': +                deploymentconfig = DeploymentConfig(res) +            elif res['kind'] == 'Service': +                service = Service(res) + +        # Verify we got a service and a deploymentconfig +        if not service or not deploymentconfig: +            return results + +        # results will need to get parsed here and modifications added +        deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig)) + +        # modify service ip +        if self.svc_ip: +            service.put('spec.clusterIP', self.svc_ip) +        if self.portal_ip: +            service.put('spec.portalIP', self.portal_ip) + +        # need to create the service and the deploymentconfig +        service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict) +        deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict) + +        return {"service": service, +                "service_file": service_file, +                "service_update": False, +                "deployment": deploymentconfig, +                "deployment_file": deployment_file, +                "deployment_update": False} + +    def create(self): +        '''Create a registry''' +        results = [] +        for config_file in ['deployment_file', 'service_file']: +            results.append(self._create(self.prepared_registry[config_file])) + +        # Clean up returned results +        rval = 0 +        for result in results: +            # pylint: disable=invalid-sequence-index +            if 'returncode' in result and result['returncode'] != 0: +                rval = result['returncode'] + +        return {'returncode': rval, 'results': results} + +    def update(self): +        '''run update for the registry.  This performs a delete and then create ''' +        # Store the current service IP +        if self.service: +            svcip = self.service.get('spec.clusterIP') +            if svcip: +                self.svc_ip = svcip +            portip = self.service.get('spec.portalIP') +            if portip: +                self.portal_ip = portip + +        results = [] +        if self.prepared_registry['deployment_update']: +            results.append(self._replace(self.prepared_registry['deployment_file'])) +        if self.prepared_registry['service_update']: +            results.append(self._replace(self.prepared_registry['service_file'])) + +        # Clean up returned results +        rval = 0 +        for result in results: +            if result['returncode'] != 0: +                rval = result['returncode'] + +        return {'returncode': rval, 'results': results} + +    def add_modifications(self, deploymentconfig): +        ''' update a deployment config with changes ''' +        # The environment variable for REGISTRY_HTTP_SECRET is autogenerated +        # We should set the generated deploymentconfig to the in memory version +        # the following modifications will overwrite if needed +        result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET') +        if result: +            deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value']) + +        # Currently we know that our deployment of a registry requires a few extra modifications +        # Modification 1 +        # we need specific environment variables to be set +        for key, value in self.config.config_options['env_vars'].get('value', {}).items(): +            if not deploymentconfig.exists_env_key(key): +                deploymentconfig.add_env_value(key, value) +            else: +                deploymentconfig.update_env_var(key, value) + +        # Modification 2 +        # we need specific volume variables to be set +        for volume in self.volumes: +            deploymentconfig.update_volume(volume) + +        for vol_mount in self.volume_mounts: +            deploymentconfig.update_volume_mount(vol_mount) + +        # Modification 3 +        # Edits +        edit_results = [] +        for edit in self.config.config_options['edits'].get('value', []): +            if edit['action'] == 'put': +                edit_results.append(deploymentconfig.put(edit['key'], +                                                         edit['value'])) +            if edit['action'] == 'update': +                edit_results.append(deploymentconfig.update(edit['key'], +                                                            edit['value'], +                                                            edit.get('index', None), +                                                            edit.get('curr_value', None))) +            if edit['action'] == 'append': +                edit_results.append(deploymentconfig.append(edit['key'], +                                                            edit['value'])) + +        if edit_results and not any([res[0] for res in edit_results]): +            return None + +        return deploymentconfig.yaml_dict + +    def needs_update(self): +        ''' check to see if we need to update ''' +        if not self.service or not self.deploymentconfig: +            return True + +        exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol'] +        if not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict, +                                     self.service.yaml_dict, +                                     exclude_list, +                                     debug=self.verbose): +            self.prepared_registry['service_update'] = True + +        exclude_list = ['dnsPolicy', +                        'terminationGracePeriodSeconds', +                        'restartPolicy', 'timeoutSeconds', +                        'livenessProbe', 'readinessProbe', +                        'terminationMessagePath', +                        'securityContext', +                        'imagePullPolicy', +                        'protocol', # ports.portocol: TCP +                        'type', # strategy: {'type': 'rolling'} +                        'defaultMode', # added on secrets +                        'activeDeadlineSeconds', # added in 1.5 for timeouts +                       ] + +        if not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict, +                                     self.deploymentconfig.yaml_dict, +                                     exclude_list, +                                     debug=self.verbose): +            self.prepared_registry['deployment_update'] = True + +        return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False + +    # In the future, we would like to break out each ansible state into a function. +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run idempotent ansible code''' + +        rconfig = RegistryConfig(params['name'], +                                 params['namespace'], +                                 params['kubeconfig'], +                                 {'images': {'value': params['images'], 'include': True}, +                                  'latest_images': {'value': params['latest_images'], 'include': True}, +                                  'labels': {'value': params['labels'], 'include': True}, +                                  'ports': {'value': ','.join(params['ports']), 'include': True}, +                                  'replicas': {'value': params['replicas'], 'include': True}, +                                  'selector': {'value': params['selector'], 'include': True}, +                                  'service_account': {'value': params['service_account'], 'include': True}, +                                  'mount_host': {'value': params['mount_host'], 'include': True}, +                                  'env_vars': {'value': params['env_vars'], 'include': False}, +                                  'volume_mounts': {'value': params['volume_mounts'], 'include': False}, +                                  'edits': {'value': params['edits'], 'include': False}, +                                  'enforce_quota': {'value': params['enforce_quota'], 'include': True}, +                                  'daemonset': {'value': params['daemonset'], 'include': True}, +                                  'tls_key': {'value': params['tls_key'], 'include': True}, +                                  'tls_certificate': {'value': params['tls_certificate'], 'include': True}, +                                 }) + + +        ocregistry = Registry(rconfig, params['debug']) + +        api_rval = ocregistry.get() + +        state = params['state'] +        ######## +        # get +        ######## +        if state == 'list': + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if not ocregistry.exists(): +                return {'changed': False, 'state': state} + +            if check_mode: +                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +            # Unsure as to why this is angry with the return type. +            # pylint: disable=redefined-variable-type +            api_rval = ocregistry.delete() + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': True, 'results': api_rval, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not ocregistry.exists(): + +                if check_mode: +                    return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} + +                api_rval = ocregistry.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if not params['force'] and not ocregistry.needs_update(): +                return {'changed': False, 'state': state} + +            if check_mode: +                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} + +            api_rval = ocregistry.update() + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': True, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. %s' % state} + +# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*- + +def main(): +    ''' +    ansible oc module for registry +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            state=dict(default='present', type='str', +                       choices=['present', 'absent']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            name=dict(default=None, required=True, type='str'), + +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            images=dict(default=None, type='str'), +            latest_images=dict(default=False, type='bool'), +            labels=dict(default=None, type='list'), +            ports=dict(default=['5000'], type='list'), +            replicas=dict(default=1, type='int'), +            selector=dict(default=None, type='str'), +            service_account=dict(default='registry', type='str'), +            mount_host=dict(default=None, type='str'), +            volume_mounts=dict(default=None, type='list'), +            env_vars=dict(default={}, type='dict'), +            edits=dict(default=[], type='list'), +            enforce_quota=dict(default=False, type='bool'), +            force=dict(default=False, type='bool'), +            daemonset=dict(default=False, type='bool'), +            tls_key=dict(default=None, type='str'), +            tls_certificate=dict(default=None, type='str'), +        ), + +        supports_check_mode=True, +    ) + +    results = Registry.run_ansible(module.params, module.check_mode) +    if 'failed' in results: +        module.fail_json(**results) + +    module.exit_json(**results) + + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_adm_registry.py.good b/roles/lib_openshift/library/oc_adm_registry.py.good new file mode 100644 index 000000000..6fc85073f --- /dev/null +++ b/roles/lib_openshift/library/oc_adm_registry.py.good @@ -0,0 +1,2601 @@ +#!/usr/bin/env python +# pylint: disable=missing-docstring +# flake8: noqa: T001 +#     ___ ___ _  _ ___ ___    _ _____ ___ ___ +#    / __| __| \| | __| _ \  /_\_   _| __|   \ +#   | (_ | _|| .` | _||   / / _ \| | | _|| |) | +#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ +#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _| +#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | | +#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_| +# +# Copyright 2016 Red Hat, Inc. and/or its affiliates +# and other contributors as indicated by the @author tags. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*- +''' +   OpenShiftCLI class that wraps the oc commands in a subprocess +''' +# pylint: disable=too-many-lines + +from __future__ import print_function +#import atexit +import copy +import json +import os +import re +import shutil +import subprocess +import tempfile +# pylint: disable=import-error +try: +    import ruamel.yaml as yaml +except ImportError: +    import yaml + +from ansible.module_utils.basic import AnsibleModule + +# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*- + +DOCUMENTATION = ''' +--- +module: oc_adm_registry +short_description: Module to manage openshift registry +description: +  - Manage openshift registry programmatically. +options: +  state: +    description: +    - The desired action when managing openshift registry +    - present - update or create the registry +    - absent - tear down the registry service and deploymentconfig +    - list - returns the current representiation of a registry +    required: false +    default: False +    aliases: [] +  kubeconfig: +    description: +    - The path for the kubeconfig file to use for authentication +    required: false +    default: /etc/origin/master/admin.kubeconfig +    aliases: [] +  debug: +    description: +    - Turn on debug output. +    required: false +    default: False +    aliases: [] +  name: +    description: +    - The name of the registry +    required: false +    default: None +    aliases: [] +  namespace: +    description: +    - The selector when filtering on node labels +    required: false +    default: None +    aliases: [] +  images: +    description: +    - The image to base this registry on - ${component} will be replaced with --type +    required: 'openshift3/ose-${component}:${version}' +    default: None +    aliases: [] +  latest_images: +    description: +    - If true, attempt to use the latest image for the registry instead of the latest release. +    required: false +    default: False +    aliases: [] +  labels: +    description: +    - A set of labels to uniquely identify the registry and its components. +    required: false +    default: None +    aliases: [] +  enforce_quota: +    description: +    - If set, the registry will refuse to write blobs if they exceed quota limits +    required: False +    default: False +    aliases: [] +  mount_host: +    description: +    - If set, the registry volume will be created as a host-mount at this path. +    required: False +    default: False +    aliases: [] +  ports: +    description: +    - A comma delimited list of ports or port pairs to expose on the registry pod.  The default is set for 5000. +    required: False +    default: [5000] +    aliases: [] +  replicas: +    description: +    - The replication factor of the registry; commonly 2 when high availability is desired. +    required: False +    default: 1 +    aliases: [] +  selector: +    description: +    - Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes. +    required: False +    default: None +    aliases: [] +  service_account: +    description: +    - Name of the service account to use to run the registry pod. +    required: False +    default: 'registry' +    aliases: [] +  tls_certificate: +    description: +    - An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS +    required: false +    default: None +    aliases: [] +  tls_key: +    description: +    - An optional path to a PEM encoded private key for serving over TLS +    required: false +    default: None +    aliases: [] +  volume_mounts: +    description: +    - The volume mounts for the registry. +    required: false +    default: None +    aliases: [] +  daemonset: +    description: +    - Use a daemonset instead of a deployment config. +    required: false +    default: False +    aliases: [] +  edits: +    description: +    - A list of modifications to make on the deploymentconfig +    required: false +    default: None +    aliases: [] +  env_vars: +    description: +    - A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR +    required: false +    default: None +    aliases: [] +  force: +    description: +    - Force a registry update. +    required: false +    default: False +    aliases: [] +author: +- "Kenny Woodson <kwoodson@redhat.com>" +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +- name: create a secure registry +  oc_adm_registry: +    name: docker-registry +    service_account: registry +    replicas: 2 +    namespace: default +    selector: type=infra +    images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}" +    env_vars: +      REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml +      REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt +      REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key +      REGISTRY_HTTP_SECRET: supersecret +    volume_mounts: +    - path: /etc/secrets +      name: dockercerts +      type: secret +      secret_name: registry-secret +    - path: /etc/registryconfig +      name: dockersecrets +      type: secret +      secret_name: docker-registry-config +    edits: +    - key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme +      value: HTTPS +      action: put +    - key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme +      value: HTTPS +      action: put +    - key: spec.strategy.rollingParams +      value: +        intervalSeconds: 1 +        maxSurge: 50% +        maxUnavailable: 50% +        timeoutSeconds: 600 +        updatePeriodSeconds: 1 +      action: put +    - key: spec.template.spec.containers[0].resources.limits.memory +      value: 2G +      action: update +    - key: spec.template.spec.containers[0].resources.requests.memory +      value: 1G +      action: update + +  register: registryout + +''' + +# -*- -*- -*- End included fragment: doc/registry -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- +# pylint: disable=undefined-variable,missing-docstring +# noqa: E301,E302 + + +class YeditException(Exception): +    ''' Exception class for Yedit ''' +    pass + + +# pylint: disable=too-many-public-methods +class Yedit(object): +    ''' Class to modify yaml files ''' +    re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" +    re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" +    com_sep = set(['.', '#', '|', ':']) + +    # pylint: disable=too-many-arguments +    def __init__(self, +                 filename=None, +                 content=None, +                 content_type='yaml', +                 separator='.', +                 backup=False): +        self.content = content +        self._separator = separator +        self.filename = filename +        self.__yaml_dict = content +        self.content_type = content_type +        self.backup = backup +        self.load(content_type=self.content_type) +        if self.__yaml_dict is None: +            self.__yaml_dict = {} + +    @property +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @separator.setter +    def separator(self): +        ''' getter method for yaml_dict ''' +        return self._separator + +    @property +    def yaml_dict(self): +        ''' getter method for yaml_dict ''' +        return self.__yaml_dict + +    @yaml_dict.setter +    def yaml_dict(self, value): +        ''' setter method for yaml_dict ''' +        self.__yaml_dict = value + +    @staticmethod +    def parse_key(key, sep='.'): +        '''parse the key allowing the appropriate separator''' +        common_separators = list(Yedit.com_sep - set([sep])) +        return re.findall(Yedit.re_key % ''.join(common_separators), key) + +    @staticmethod +    def valid_key(key, sep='.'): +        '''validate the incoming key''' +        common_separators = list(Yedit.com_sep - set([sep])) +        if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): +            return False + +        return True + +    @staticmethod +    def remove_entry(data, key, sep='.'): +        ''' remove data at location key ''' +        if key == '' and isinstance(data, dict): +            data.clear() +            return True +        elif key == '' and isinstance(data, list): +            del data[:] +            return True + +        if not (key and Yedit.valid_key(key, sep)) and \ +           isinstance(data, (list, dict)): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        # process last index for remove +        # expected list entry +        if key_indexes[-1][0]: +            if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +                del data[int(key_indexes[-1][0])] +                return True + +        # expected dict entry +        elif key_indexes[-1][1]: +            if isinstance(data, dict): +                del data[key_indexes[-1][1]] +                return True + +    @staticmethod +    def add_entry(data, key, item=None, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a#b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes[:-1]: +            if dict_key: +                if isinstance(data, dict) and dict_key in data and data[dict_key]:  # noqa: E501 +                    data = data[dict_key] +                    continue + +                elif data and not isinstance(data, dict): +                    return None + +                data[dict_key] = {} +                data = data[dict_key] + +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        if key == '': +            data = item + +        # process last index for add +        # expected list entry +        elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:  # noqa: E501 +            data[int(key_indexes[-1][0])] = item + +        # expected dict entry +        elif key_indexes[-1][1] and isinstance(data, dict): +            data[key_indexes[-1][1]] = item + +        return data + +    @staticmethod +    def get_entry(data, key, sep='.'): +        ''' Get an item from a dictionary with key notation a.b.c +            d = {'a': {'b': 'c'}}} +            key = a.b +            return c +        ''' +        if key == '': +            pass +        elif (not (key and Yedit.valid_key(key, sep)) and +              isinstance(data, (list, dict))): +            return None + +        key_indexes = Yedit.parse_key(key, sep) +        for arr_ind, dict_key in key_indexes: +            if dict_key and isinstance(data, dict): +                data = data.get(dict_key, None) +            elif (arr_ind and isinstance(data, list) and +                  int(arr_ind) <= len(data) - 1): +                data = data[int(arr_ind)] +            else: +                return None + +        return data + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        tmp_filename = filename + '.yedit' + +        with open(tmp_filename, 'w') as yfd: +            yfd.write(contents) + +        os.rename(tmp_filename, filename) + +    def write(self): +        ''' write to file ''' +        if not self.filename: +            raise YeditException('Please specify a filename.') + +        if self.backup and self.file_exists(): +            shutil.copy(self.filename, self.filename + '.orig') + +        # Try to set format attributes if supported +        try: +            self.yaml_dict.fa.set_block_style() +        except AttributeError: +            pass + +        # Try to use RoundTripDumper if supported. +        try: +            Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) +        except AttributeError: +            Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + +        return (True, self.yaml_dict) + +    def read(self): +        ''' read from file ''' +        # check if it exists +        if self.filename is None or not self.file_exists(): +            return None + +        contents = None +        with open(self.filename) as yfd: +            contents = yfd.read() + +        return contents + +    def file_exists(self): +        ''' return whether file exists ''' +        if os.path.exists(self.filename): +            return True + +        return False + +    def load(self, content_type='yaml'): +        ''' return yaml file ''' +        contents = self.read() + +        if not contents and not self.content: +            return None + +        if self.content: +            if isinstance(self.content, dict): +                self.yaml_dict = self.content +                return self.yaml_dict +            elif isinstance(self.content, str): +                contents = self.content + +        # check if it is yaml +        try: +            if content_type == 'yaml' and contents: +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +                # Try to use RoundTripLoader if supported. +                try: +                    self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader) +                except AttributeError: +                    self.yaml_dict = yaml.safe_load(contents) + +                # Try to set format attributes if supported +                try: +                    self.yaml_dict.fa.set_block_style() +                except AttributeError: +                    pass + +            elif content_type == 'json' and contents: +                self.yaml_dict = json.loads(contents) +        except yaml.YAMLError as err: +            # Error loading yaml or json +            raise YeditException('Problem with loading yaml file. %s' % err) + +        return self.yaml_dict + +    def get(self, key): +        ''' get a specified key''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, key, self.separator) +        except KeyError: +            entry = None + +        return entry + +    def pop(self, path, key_or_item): +        ''' remove a key, value pair from a dict or an item for a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if key_or_item in entry: +                entry.pop(key_or_item) +                return (True, self.yaml_dict) +            return (False, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            try: +                ind = entry.index(key_or_item) +            except ValueError: +                return (False, self.yaml_dict) + +            entry.pop(ind) +            return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    def delete(self, path): +        ''' remove path from a dict''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            return (False, self.yaml_dict) + +        result = Yedit.remove_entry(self.yaml_dict, path, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        return (True, self.yaml_dict) + +    def exists(self, path, value): +        ''' check if value exists at path''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, list): +            if value in entry: +                return True +            return False + +        elif isinstance(entry, dict): +            if isinstance(value, dict): +                rval = False +                for key, val in value.items(): +                    if entry[key] != val: +                        rval = False +                        break +                else: +                    rval = True +                return rval + +            return value in entry + +        return entry == value + +    def append(self, path, value): +        '''append value to a list''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry is None: +            self.put(path, []) +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        if not isinstance(entry, list): +            return (False, self.yaml_dict) + +        # AUDIT:maybe-no-member makes sense due to loading data from +        # a serialized format. +        # pylint: disable=maybe-no-member +        entry.append(value) +        return (True, self.yaml_dict) + +    # pylint: disable=too-many-arguments +    def update(self, path, value, index=None, curr_value=None): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if isinstance(entry, dict): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            if not isinstance(value, dict): +                raise YeditException('Cannot replace key, value entry in ' + +                                     'dict with non-dict type. value=[%s] [%s]' % (value, type(value)))  # noqa: E501 + +            entry.update(value) +            return (True, self.yaml_dict) + +        elif isinstance(entry, list): +            # AUDIT:maybe-no-member makes sense due to fuzzy types +            # pylint: disable=maybe-no-member +            ind = None +            if curr_value: +                try: +                    ind = entry.index(curr_value) +                except ValueError: +                    return (False, self.yaml_dict) + +            elif index is not None: +                ind = index + +            if ind is not None and entry[ind] != value: +                entry[ind] = value +                return (True, self.yaml_dict) + +            # see if it exists in the list +            try: +                ind = entry.index(value) +            except ValueError: +                # doesn't exist, append it +                entry.append(value) +                return (True, self.yaml_dict) + +            # already exists, return +            if ind is not None: +                return (False, self.yaml_dict) +        return (False, self.yaml_dict) + +    def put(self, path, value): +        ''' put path, value into a dict ''' +        try: +            entry = Yedit.get_entry(self.yaml_dict, path, self.separator) +        except KeyError: +            entry = None + +        if entry == value: +            return (False, self.yaml_dict) + +        # deepcopy didn't work +        # Try to use ruamel.yaml and fallback to pyyaml +        try: +            tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                      default_flow_style=False), +                                 yaml.RoundTripLoader) +        except AttributeError: +            tmp_copy = copy.deepcopy(self.yaml_dict) + +        # set the format attributes if available +        try: +            tmp_copy.fa.set_block_style() +        except AttributeError: +            pass + +        result = Yedit.add_entry(tmp_copy, path, value, self.separator) +        if not result: +            return (False, self.yaml_dict) + +        self.yaml_dict = tmp_copy + +        return (True, self.yaml_dict) + +    def create(self, path, value): +        ''' create a yaml file ''' +        if not self.file_exists(): +            # deepcopy didn't work +            # Try to use ruamel.yaml and fallback to pyyaml +            try: +                tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, +                                                          default_flow_style=False), +                                     yaml.RoundTripLoader) +            except AttributeError: +                tmp_copy = copy.deepcopy(self.yaml_dict) + +            # set the format attributes if available +            try: +                tmp_copy.fa.set_block_style() +            except AttributeError: +                pass + +            result = Yedit.add_entry(tmp_copy, path, value, self.separator) +            if result: +                self.yaml_dict = tmp_copy +                return (True, self.yaml_dict) + +        return (False, self.yaml_dict) + +    @staticmethod +    def get_curr_value(invalue, val_type): +        '''return the current value''' +        if invalue is None: +            return None + +        curr_value = invalue +        if val_type == 'yaml': +            curr_value = yaml.load(invalue) +        elif val_type == 'json': +            curr_value = json.loads(invalue) + +        return curr_value + +    @staticmethod +    def parse_value(inc_value, vtype=''): +        '''determine value type passed''' +        true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', +                      'on', 'On', 'ON', ] +        false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', +                       'off', 'Off', 'OFF'] + +        # It came in as a string but you didn't specify value_type as string +        # we will convert to bool if it matches any of the above cases +        if isinstance(inc_value, str) and 'bool' in vtype: +            if inc_value not in true_bools and inc_value not in false_bools: +                raise YeditException('Not a boolean type. str=[%s] vtype=[%s]' +                                     % (inc_value, vtype)) +        elif isinstance(inc_value, bool) and 'str' in vtype: +            inc_value = str(inc_value) + +        # If vtype is not str then go ahead and attempt to yaml load it. +        if isinstance(inc_value, str) and 'str' not in vtype: +            try: +                inc_value = yaml.load(inc_value) +            except Exception: +                raise YeditException('Could not determine type of incoming ' + +                                     'value. value=[%s] vtype=[%s]' +                                     % (type(inc_value), vtype)) + +        return inc_value + +    # pylint: disable=too-many-return-statements,too-many-branches +    @staticmethod +    def run_ansible(module): +        '''perform the idempotent crud operations''' +        yamlfile = Yedit(filename=module.params['src'], +                         backup=module.params['backup'], +                         separator=module.params['separator']) + +        if module.params['src']: +            rval = yamlfile.load() + +            if yamlfile.yaml_dict is None and \ +               module.params['state'] != 'present': +                return {'failed': True, +                        'msg': 'Error opening file [%s].  Verify that the ' + +                               'file exists, that it is has correct' + +                               ' permissions, and is valid yaml.'} + +        if module.params['state'] == 'list': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['key']: +                rval = yamlfile.get(module.params['key']) or {} + +            return {'changed': False, 'result': rval, 'state': "list"} + +        elif module.params['state'] == 'absent': +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) +                yamlfile.yaml_dict = content + +            if module.params['update']: +                rval = yamlfile.pop(module.params['key'], +                                    module.params['value']) +            else: +                rval = yamlfile.delete(module.params['key']) + +            if rval[0] and module.params['src']: +                yamlfile.write() + +            return {'changed': rval[0], 'result': rval[1], 'state': "absent"} + +        elif module.params['state'] == 'present': +            # check if content is different than what is in the file +            if module.params['content']: +                content = Yedit.parse_value(module.params['content'], +                                            module.params['content_type']) + +                # We had no edits to make and the contents are the same +                if yamlfile.yaml_dict == content and \ +                   module.params['value'] is None: +                    return {'changed': False, +                            'result': yamlfile.yaml_dict, +                            'state': "present"} + +                yamlfile.yaml_dict = content + +            # we were passed a value; parse it +            if module.params['value']: +                value = Yedit.parse_value(module.params['value'], +                                          module.params['value_type']) +                key = module.params['key'] +                if module.params['update']: +                    # pylint: disable=line-too-long +                    curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']),  # noqa: E501 +                                                      module.params['curr_value_format'])  # noqa: E501 + +                    rval = yamlfile.update(key, value, module.params['index'], curr_value)  # noqa: E501 + +                elif module.params['append']: +                    rval = yamlfile.append(key, value) +                else: +                    rval = yamlfile.put(key, value) + +                if rval[0] and module.params['src']: +                    yamlfile.write() + +                return {'changed': rval[0], +                        'result': rval[1], 'state': "present"} + +            # no edits to make +            if module.params['src']: +                # pylint: disable=redefined-variable-type +                rval = yamlfile.write() +                return {'changed': rval[0], +                        'result': rval[1], +                        'state': "present"} + +        return {'failed': True, 'msg': 'Unkown state passed'} + +# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*- +# pylint: disable=too-many-lines +# noqa: E301,E302,E303,T001 + + +class OpenShiftCLIError(Exception): +    '''Exception class for openshiftcli''' +    pass + + +ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] + + +def locate_oc_binary(): +    ''' Find and return oc binary file ''' +    # https://github.com/openshift/openshift-ansible/issues/3410 +    # oc can be in /usr/local/bin in some cases, but that may not +    # be in $PATH due to ansible/sudo +    paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS + +    oc_binary = 'oc' + +    # Use shutil.which if it is available, otherwise fallback to a naive path search +    try: +        which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) +        if which_result is not None: +            oc_binary = which_result +    except AttributeError: +        for path in paths: +            if os.path.exists(os.path.join(path, oc_binary)): +                oc_binary = os.path.join(path, oc_binary) +                break + +    return oc_binary + + +# pylint: disable=too-few-public-methods +class OpenShiftCLI(object): +    ''' Class to wrap the command line tools ''' +    def __init__(self, +                 namespace, +                 kubeconfig='/etc/origin/master/admin.kubeconfig', +                 verbose=False, +                 all_namespaces=False): +        ''' Constructor for OpenshiftCLI ''' +        self.namespace = namespace +        self.verbose = verbose +        self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) +        self.all_namespaces = all_namespaces +        self.oc_binary = locate_oc_binary() + +    # Pylint allows only 5 arguments to be passed. +    # pylint: disable=too-many-arguments +    def _replace_content(self, resource, rname, content, force=False, sep='.'): +        ''' replace the current object with the content ''' +        res = self._get(resource, rname) +        if not res['results']: +            return res + +        fname = Utils.create_tmpfile(rname + '-') + +        yed = Yedit(fname, res['results'][0], separator=sep) +        changes = [] +        for key, value in content.items(): +            changes.append(yed.put(key, value)) + +        if any([change[0] for change in changes]): +            yed.write() + +            #atexit.register(Utils.cleanup, [fname]) + +            return self._replace(fname, force) + +        return {'returncode': 0, 'updated': False} + +    def _replace(self, fname, force=False): +        '''replace the current object with oc replace''' +        cmd = ['replace', '-f', fname] +        if force: +            cmd.append('--force') +        return self.openshift_cmd(cmd) + +    def _create_from_content(self, rname, content): +        '''create a temporary file and then call oc create on it''' +        fname = Utils.create_tmpfile(rname + '-') +        yed = Yedit(fname, content=content) +        yed.write() + +        #atexit.register(Utils.cleanup, [fname]) + +        return self._create(fname) + +    def _create(self, fname): +        '''call oc create on a filename''' +        return self.openshift_cmd(['create', '-f', fname]) + +    def _delete(self, resource, rname, selector=None): +        '''call oc delete on a resource''' +        cmd = ['delete', resource, rname] +        if selector: +            cmd.append('--selector=%s' % selector) + +        return self.openshift_cmd(cmd) + +    def _process(self, template_name, create=False, params=None, template_data=None):  # noqa: E501 +        '''process a template + +           template_name: the name of the template to process +           create: whether to send to oc create after processing +           params: the parameters for the template +           template_data: the incoming template's data; instead of a file +        ''' +        cmd = ['process'] +        if template_data: +            cmd.extend(['-f', '-']) +        else: +            cmd.append(template_name) +        if params: +            param_str = ["%s=%s" % (key, value) for key, value in params.items()] +            cmd.append('-v') +            cmd.extend(param_str) + +        results = self.openshift_cmd(cmd, output=True, input_data=template_data) + +        if results['returncode'] != 0 or not create: +            return results + +        fname = Utils.create_tmpfile(template_name + '-') +        yed = Yedit(fname, results['results']) +        yed.write() + +        #atexit.register(Utils.cleanup, [fname]) + +        return self.openshift_cmd(['create', '-f', fname]) + +    def _get(self, resource, rname=None, selector=None): +        '''return a resource by name ''' +        cmd = ['get', resource] +        if selector: +            cmd.append('--selector=%s' % selector) +        elif rname: +            cmd.append(rname) + +        cmd.extend(['-o', 'json']) + +        rval = self.openshift_cmd(cmd, output=True) + +        # Ensure results are retuned in an array +        if 'items' in rval: +            rval['results'] = rval['items'] +        elif not isinstance(rval['results'], list): +            rval['results'] = [rval['results']] + +        return rval + +    def _schedulable(self, node=None, selector=None, schedulable=True): +        ''' perform oadm manage-node scheduable ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        cmd.append('--schedulable=%s' % schedulable) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')  # noqa: E501 + +    def _list_pods(self, node=None, selector=None, pod_selector=None): +        ''' perform oadm list pods + +            node: the node in which to list pods +            selector: the label selector filter if provided +            pod_selector: the pod selector filter if provided +        ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        cmd.extend(['--list-pods', '-o', 'json']) + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    # pylint: disable=too-many-arguments +    def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): +        ''' perform oadm manage-node evacuate ''' +        cmd = ['manage-node'] +        if node: +            cmd.extend(node) +        else: +            cmd.append('--selector=%s' % selector) + +        if dry_run: +            cmd.append('--dry-run') + +        if pod_selector: +            cmd.append('--pod-selector=%s' % pod_selector) + +        if grace_period: +            cmd.append('--grace-period=%s' % int(grace_period)) + +        if force: +            cmd.append('--force') + +        cmd.append('--evacuate') + +        return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') + +    def _version(self): +        ''' return the openshift version''' +        return self.openshift_cmd(['version'], output=True, output_type='raw') + +    def _import_image(self, url=None, name=None, tag=None): +        ''' perform image import ''' +        cmd = ['import-image'] + +        image = '{0}'.format(name) +        if tag: +            image += ':{0}'.format(tag) + +        cmd.append(image) + +        if url: +            cmd.append('--from={0}/{1}'.format(url, image)) + +        cmd.append('-n{0}'.format(self.namespace)) + +        cmd.append('--confirm') +        return self.openshift_cmd(cmd) + +    def _run(self, cmds, input_data): +        ''' Actually executes the command. This makes mocking easier. ''' +        curr_env = os.environ.copy() +        curr_env.update({'KUBECONFIG': self.kubeconfig}) +        proc = subprocess.Popen(cmds, +                                stdin=subprocess.PIPE, +                                stdout=subprocess.PIPE, +                                stderr=subprocess.PIPE, +                                env=curr_env) + +        stdout, stderr = proc.communicate(input_data) + +        return proc.returncode, stdout.decode(), stderr.decode() + +    # pylint: disable=too-many-arguments,too-many-branches +    def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): +        '''Base command for oc ''' +        cmds = [self.oc_binary] + +        if oadm: +            cmds.append('adm') + +        if self.all_namespaces: +            cmds.extend(['--all-namespaces']) +        elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']:  # E501 +            cmds.extend(['-n', self.namespace]) + +        cmds.extend(cmd) + +        rval = {} +        results = '' +        err = None + +        if self.verbose: +            print(' '.join(cmds)) + +        try: +            returncode, stdout, stderr = self._run(cmds, input_data) +        except OSError as ex: +            returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) + +        rval = {"returncode": returncode, +                "results": results, +                "cmd": ' '.join(cmds)} + +        if returncode == 0: +            if output: +                if output_type == 'json': +                    try: +                        rval['results'] = json.loads(stdout) +                    except ValueError as err: +                        if "No JSON object could be decoded" in err.args: +                            err = err.args +                elif output_type == 'raw': +                    rval['results'] = stdout + +            if self.verbose: +                print("STDOUT: {0}".format(stdout)) +                print("STDERR: {0}".format(stderr)) + +            if err: +                rval.update({"err": err, +                             "stderr": stderr, +                             "stdout": stdout, +                             "cmd": cmds}) + +        else: +            rval.update({"stderr": stderr, +                         "stdout": stdout, +                         "results": {}}) + +        return rval + + +class Utils(object): +    ''' utilities for openshiftcli modules ''' + +    @staticmethod +    def _write(filename, contents): +        ''' Actually write the file contents to disk. This helps with mocking. ''' + +        with open(filename, 'w') as sfd: +            sfd.write(contents) + +    @staticmethod +    def create_tmp_file_from_contents(rname, data, ftype='yaml'): +        ''' create a file in tmp with name and contents''' + +        tmp = Utils.create_tmpfile(prefix=rname) + +        if ftype == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripDumper'): +                Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) +            else: +                Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) + +        elif ftype == 'json': +            Utils._write(tmp, json.dumps(data)) +        else: +            Utils._write(tmp, data) + +        # Register cleanup when module is done +        #atexit.register(Utils.cleanup, [tmp]) +        return tmp + +    @staticmethod +    def create_tmpfile_copy(inc_file): +        '''create a temporary copy of a file''' +        tmpfile = Utils.create_tmpfile('lib_openshift-') +        Utils._write(tmpfile, open(inc_file).read()) + +        # Cleanup the tmpfile +        #atexit.register(Utils.cleanup, [tmpfile]) + +        return tmpfile + +    @staticmethod +    def create_tmpfile(prefix='tmp'): +        ''' Generates and returns a temporary file name ''' + +        with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: +            return tmp.name + +    @staticmethod +    def create_tmp_files_from_contents(content, content_type=None): +        '''Turn an array of dict: filename, content into a files array''' +        if not isinstance(content, list): +            content = [content] +        files = [] +        for item in content: +            path = Utils.create_tmp_file_from_contents(item['path'] + '-', +                                                       item['data'], +                                                       ftype=content_type) +            files.append({'name': os.path.basename(item['path']), +                          'path': path}) +        return files + +    @staticmethod +    def cleanup(files): +        '''Clean up on exit ''' +        for sfile in files: +            if os.path.exists(sfile): +                if os.path.isdir(sfile): +                    shutil.rmtree(sfile) +                elif os.path.isfile(sfile): +                    os.remove(sfile) + +    @staticmethod +    def exists(results, _name): +        ''' Check to see if the results include the name ''' +        if not results: +            return False + +        if Utils.find_result(results, _name): +            return True + +        return False + +    @staticmethod +    def find_result(results, _name): +        ''' Find the specified result by name''' +        rval = None +        for result in results: +            if 'metadata' in result and result['metadata']['name'] == _name: +                rval = result +                break + +        return rval + +    @staticmethod +    def get_resource_file(sfile, sfile_type='yaml'): +        ''' return the service file ''' +        contents = None +        with open(sfile) as sfd: +            contents = sfd.read() + +        if sfile_type == 'yaml': +            # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage +            # pylint: disable=no-member +            if hasattr(yaml, 'RoundTripLoader'): +                contents = yaml.load(contents, yaml.RoundTripLoader) +            else: +                contents = yaml.safe_load(contents) +        elif sfile_type == 'json': +            contents = json.loads(contents) + +        return contents + +    @staticmethod +    def filter_versions(stdout): +        ''' filter the oc version output ''' + +        version_dict = {} +        version_search = ['oc', 'openshift', 'kubernetes'] + +        for line in stdout.strip().split('\n'): +            for term in version_search: +                if not line: +                    continue +                if line.startswith(term): +                    version_dict[term] = line.split()[-1] + +        # horrible hack to get openshift version in Openshift 3.2 +        #  By default "oc version in 3.2 does not return an "openshift" version +        if "openshift" not in version_dict: +            version_dict["openshift"] = version_dict["oc"] + +        return version_dict + +    @staticmethod +    def add_custom_versions(versions): +        ''' create custom versions strings ''' + +        versions_dict = {} + +        for tech, version in versions.items(): +            # clean up "-" from version +            if "-" in version: +                version = version.split("-")[0] + +            if version.startswith('v'): +                versions_dict[tech + '_numeric'] = version[1:].split('+')[0] +                # "v3.3.0.33" is what we have, we want "3.3" +                versions_dict[tech + '_short'] = version[1:4] + +        return versions_dict + +    @staticmethod +    def openshift_installed(): +        ''' check if openshift is installed ''' +        import yum + +        yum_base = yum.YumBase() +        if yum_base.rpmdb.searchNevra(name='atomic-openshift'): +            return True + +        return False + +    # Disabling too-many-branches.  This is a yaml dictionary comparison function +    # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements +    @staticmethod +    def check_def_equal(user_def, result_def, skip_keys=None, debug=False): +        ''' Given a user defined definition, compare it with the results given back by our query.  ''' + +        # Currently these values are autogenerated and we do not need to check them +        skip = ['metadata', 'status'] +        if skip_keys: +            skip.extend(skip_keys) + +        for key, value in result_def.items(): +            if key in skip: +                continue + +            # Both are lists +            if isinstance(value, list): +                if key not in user_def: +                    if debug: +                        print('User data does not have key [%s]' % key) +                        print('User data: %s' % user_def) +                    return False + +                if not isinstance(user_def[key], list): +                    if debug: +                        print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) +                    return False + +                if len(user_def[key]) != len(value): +                    if debug: +                        print("List lengths are not equal.") +                        print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) +                        print("user_def: %s" % user_def[key]) +                        print("value: %s" % value) +                    return False + +                for values in zip(user_def[key], value): +                    if isinstance(values[0], dict) and isinstance(values[1], dict): +                        if debug: +                            print('sending list - list') +                            print(type(values[0])) +                            print(type(values[1])) +                        result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) +                        if not result: +                            print('list compare returned false') +                            return False + +                    elif value != user_def[key]: +                        if debug: +                            print('value should be identical') +                            print(user_def[key]) +                            print(value) +                        return False + +            # recurse on a dictionary +            elif isinstance(value, dict): +                if key not in user_def: +                    if debug: +                        print("user_def does not have key [%s]" % key) +                    return False +                if not isinstance(user_def[key], dict): +                    if debug: +                        print("dict returned false: not instance of dict") +                    return False + +                # before passing ensure keys match +                api_values = set(value.keys()) - set(skip) +                user_values = set(user_def[key].keys()) - set(skip) +                if api_values != user_values: +                    if debug: +                        print("keys are not equal in dict") +                        print('user_def: %s' % user_values) +                        print('memory: %s' % api_values) +                    return False + +                result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) +                if not result: +                    if debug: +                        print("dict returned false") +                        print(result) +                    return False + +            # Verify each key, value pair is the same +            else: +                if key not in user_def or value != user_def[key]: +                    if debug: +                        print("value not equal; user_def does not have key") +                        print(key) +                        print(value) +                        if key in user_def: +                            print(user_def[key]) +                    return False + +        if debug: +            print('returning true') +        return True + + +class OpenShiftCLIConfig(object): +    '''Generic Config''' +    def __init__(self, rname, namespace, kubeconfig, options): +        self.kubeconfig = kubeconfig +        self.name = rname +        self.namespace = namespace +        self._options = options + +    @property +    def config_options(self): +        ''' return config options ''' +        return self._options + +    def to_option_list(self): +        '''return all options as a string''' +        return self.stringify() + +    def stringify(self): +        ''' return the options hash as cli params in a string ''' +        rval = [] +        for key, data in self.config_options.items(): +            if data['include'] \ +               and (data['value'] or isinstance(data['value'], int)): +                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) + +        return rval + + +# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*- + + +# pylint: disable=too-many-public-methods +class DeploymentConfig(Yedit): +    ''' Class to model an openshift DeploymentConfig''' +    default_deployment_config = ''' +apiVersion: v1 +kind: DeploymentConfig +metadata: +  name: default_dc +  namespace: default +spec: +  replicas: 0 +  selector: +    default_dc: default_dc +  strategy: +    resources: {} +    rollingParams: +      intervalSeconds: 1 +      maxSurge: 0 +      maxUnavailable: 25% +      timeoutSeconds: 600 +      updatePercent: -25 +      updatePeriodSeconds: 1 +    type: Rolling +  template: +    metadata: +    spec: +      containers: +      - env: +        - name: default +          value: default +        image: default +        imagePullPolicy: IfNotPresent +        name: default_dc +        ports: +        - containerPort: 8000 +          hostPort: 8000 +          protocol: TCP +          name: default_port +        resources: {} +        terminationMessagePath: /dev/termination-log +      dnsPolicy: ClusterFirst +      hostNetwork: true +      nodeSelector: +        type: compute +      restartPolicy: Always +      securityContext: {} +      serviceAccount: default +      serviceAccountName: default +      terminationGracePeriodSeconds: 30 +  triggers: +  - type: ConfigChange +''' + +    replicas_path = "spec.replicas" +    env_path = "spec.template.spec.containers[0].env" +    volumes_path = "spec.template.spec.volumes" +    container_path = "spec.template.spec.containers" +    volume_mounts_path = "spec.template.spec.containers[0].volumeMounts" + +    def __init__(self, content=None): +        ''' Constructor for deploymentconfig ''' +        if not content: +            content = DeploymentConfig.default_deployment_config + +        super(DeploymentConfig, self).__init__(content=content) + +    def add_env_value(self, key, value): +        ''' add key, value pair to env array ''' +        rval = False +        env = self.get_env_vars() +        if env: +            env.append({'name': key, 'value': value}) +            rval = True +        else: +            result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value}) +            rval = result[0] + +        return rval + +    def exists_env_value(self, key, value): +        ''' return whether a key, value  pair exists ''' +        results = self.get_env_vars() +        if not results: +            return False + +        for result in results: +            if result['name'] == key and result['value'] == value: +                return True + +        return False + +    def exists_env_key(self, key): +        ''' return whether a key, value  pair exists ''' +        results = self.get_env_vars() +        if not results: +            return False + +        for result in results: +            if result['name'] == key: +                return True + +        return False + +    def get_env_var(self, key): +        '''return a environment variables ''' +        results = self.get(DeploymentConfig.env_path) or [] +        if not results: +            return None + +        for env_var in results: +            if env_var['name'] == key: +                return env_var + +        return None + +    def get_env_vars(self): +        '''return a environment variables ''' +        return self.get(DeploymentConfig.env_path) or [] + +    def delete_env_var(self, keys): +        '''delete a list of keys ''' +        if not isinstance(keys, list): +            keys = [keys] + +        env_vars_array = self.get_env_vars() +        modified = False +        idx = None +        for key in keys: +            for env_idx, env_var in enumerate(env_vars_array): +                if env_var['name'] == key: +                    idx = env_idx +                    break + +            if idx: +                modified = True +                del env_vars_array[idx] + +        if modified: +            return True + +        return False + +    def update_env_var(self, key, value): +        '''place an env in the env var list''' + +        env_vars_array = self.get_env_vars() +        idx = None +        for env_idx, env_var in enumerate(env_vars_array): +            if env_var['name'] == key: +                idx = env_idx +                break + +        if idx: +            env_vars_array[idx]['value'] = value +        else: +            self.add_env_value(key, value) + +        return True + +    def exists_volume_mount(self, volume_mount): +        ''' return whether a volume mount exists ''' +        exist_volume_mounts = self.get_volume_mounts() + +        if not exist_volume_mounts: +            return False + +        volume_mount_found = False +        for exist_volume_mount in exist_volume_mounts: +            if exist_volume_mount['name'] == volume_mount['name']: +                volume_mount_found = True +                break + +        return volume_mount_found + +    def exists_volume(self, volume): +        ''' return whether a volume exists ''' +        exist_volumes = self.get_volumes() + +        volume_found = False +        for exist_volume in exist_volumes: +            if exist_volume['name'] == volume['name']: +                volume_found = True +                break + +        return volume_found + +    def find_volume_by_name(self, volume, mounts=False): +        ''' return the index of a volume ''' +        volumes = [] +        if mounts: +            volumes = self.get_volume_mounts() +        else: +            volumes = self.get_volumes() +        for exist_volume in volumes: +            if exist_volume['name'] == volume['name']: +                return exist_volume + +        return None + +    def get_replicas(self): +        ''' return replicas setting ''' +        return self.get(DeploymentConfig.replicas_path) + +    def get_volume_mounts(self): +        '''return volume mount information ''' +        return self.get_volumes(mounts=True) + +    def get_volumes(self, mounts=False): +        '''return volume mount information ''' +        if mounts: +            return self.get(DeploymentConfig.volume_mounts_path) or [] + +        return self.get(DeploymentConfig.volumes_path) or [] + +    def delete_volume_by_name(self, volume): +        '''delete a volume ''' +        modified = False +        exist_volume_mounts = self.get_volume_mounts() +        exist_volumes = self.get_volumes() +        del_idx = None +        for idx, exist_volume in enumerate(exist_volumes): +            if 'name' in exist_volume and exist_volume['name'] == volume['name']: +                del_idx = idx +                break + +        if del_idx != None: +            del exist_volumes[del_idx] +            modified = True + +        del_idx = None +        for idx, exist_volume_mount in enumerate(exist_volume_mounts): +            if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']: +                del_idx = idx +                break + +        if del_idx != None: +            del exist_volume_mounts[idx] +            modified = True + +        return modified + +    def add_volume_mount(self, volume_mount): +        ''' add a volume or volume mount to the proper location ''' +        exist_volume_mounts = self.get_volume_mounts() + +        if not exist_volume_mounts and volume_mount: +            self.put(DeploymentConfig.volume_mounts_path, [volume_mount]) +        else: +            exist_volume_mounts.append(volume_mount) + +    def add_volume(self, volume): +        ''' add a volume or volume mount to the proper location ''' +        exist_volumes = self.get_volumes() +        if not volume: +            return + +        if not exist_volumes: +            self.put(DeploymentConfig.volumes_path, [volume]) +        else: +            exist_volumes.append(volume) + +    def update_replicas(self, replicas): +        ''' update replicas value ''' +        self.put(DeploymentConfig.replicas_path, replicas) + +    def update_volume(self, volume): +        '''place an env in the env var list''' +        exist_volumes = self.get_volumes() + +        if not volume: +            return False + +        # update the volume +        update_idx = None +        for idx, exist_vol in enumerate(exist_volumes): +            if exist_vol['name'] == volume['name']: +                update_idx = idx +                break + +        if update_idx != None: +            exist_volumes[update_idx] = volume +        else: +            self.add_volume(volume) + +        return True + +    def update_volume_mount(self, volume_mount): +        '''place an env in the env var list''' +        modified = False + +        exist_volume_mounts = self.get_volume_mounts() + +        if not volume_mount: +            return False + +        # update the volume mount +        for exist_vol_mount in exist_volume_mounts: +            if exist_vol_mount['name'] == volume_mount['name']: +                if 'mountPath' in exist_vol_mount and \ +                   str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']): +                    exist_vol_mount['mountPath'] = volume_mount['mountPath'] +                    modified = True +                break + +        if not modified: +            self.add_volume_mount(volume_mount) +            modified = True + +        return modified + +    def needs_update_volume(self, volume, volume_mount): +        ''' verify a volume update is needed ''' +        exist_volume = self.find_volume_by_name(volume) +        exist_volume_mount = self.find_volume_by_name(volume, mounts=True) +        results = [] +        results.append(exist_volume['name'] == volume['name']) + +        if 'secret' in volume: +            results.append('secret' in exist_volume) +            results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName']) +            results.append(exist_volume_mount['name'] == volume_mount['name']) +            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + +        elif 'emptyDir' in volume: +            results.append(exist_volume_mount['name'] == volume['name']) +            results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + +        elif 'persistentVolumeClaim' in volume: +            pvc = 'persistentVolumeClaim' +            results.append(pvc in exist_volume) +            if results[-1]: +                results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName']) + +                if 'claimSize' in volume[pvc]: +                    results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize']) + +        elif 'hostpath' in volume: +            results.append('hostPath' in exist_volume) +            results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath']) + +        return not all(results) + +    def needs_update_replicas(self, replicas): +        ''' verify whether a replica update is needed ''' +        current_reps = self.get(DeploymentConfig.replicas_path) +        return not current_reps == replicas + +# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*- + +# pylint: disable=too-many-instance-attributes +class SecretConfig(object): +    ''' Handle secret options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 kubeconfig, +                 secrets=None): +        ''' constructor for handling secret options ''' +        self.kubeconfig = kubeconfig +        self.name = sname +        self.namespace = namespace +        self.secrets = secrets +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' assign the correct properties for a secret dict ''' +        self.data['apiVersion'] = 'v1' +        self.data['kind'] = 'Secret' +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        self.data['metadata']['namespace'] = self.namespace +        self.data['data'] = {} +        if self.secrets: +            for key, value in self.secrets.items(): +                self.data['data'][key] = value + +# pylint: disable=too-many-instance-attributes +class Secret(Yedit): +    ''' Class to wrap the oc command line tools ''' +    secret_path = "data" +    kind = 'secret' + +    def __init__(self, content): +        '''secret constructor''' +        super(Secret, self).__init__(content=content) +        self._secrets = None + +    @property +    def secrets(self): +        '''secret property getter''' +        if self._secrets is None: +            self._secrets = self.get_secrets() +        return self._secrets + +    @secrets.setter +    def secrets(self): +        '''secret property setter''' +        if self._secrets is None: +            self._secrets = self.get_secrets() +        return self._secrets + +    def get_secrets(self): +        ''' returns all of the defined secrets ''' +        return self.get(Secret.secret_path) or {} + +    def add_secret(self, key, value): +        ''' add a secret ''' +        if self.secrets: +            self.secrets[key] = value +        else: +            self.put(Secret.secret_path, {key: value}) + +        return True + +    def delete_secret(self, key): +        ''' delete secret''' +        try: +            del self.secrets[key] +        except KeyError as _: +            return False + +        return True + +    def find_secret(self, key): +        ''' find secret''' +        rval = None +        try: +            rval = self.secrets[key] +        except KeyError as _: +            return None + +        return {'key': key, 'value': rval} + +    def update_secret(self, key, value): +        ''' update a secret''' +        if key in self.secrets: +            self.secrets[key] = value +        else: +            self.add_secret(key, value) + +        return True + +# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class ServiceConfig(object): +    ''' Handle service options ''' +    # pylint: disable=too-many-arguments +    def __init__(self, +                 sname, +                 namespace, +                 ports, +                 selector=None, +                 labels=None, +                 cluster_ip=None, +                 portal_ip=None, +                 session_affinity=None, +                 service_type=None): +        ''' constructor for handling service options ''' +        self.name = sname +        self.namespace = namespace +        self.ports = ports +        self.selector = selector +        self.labels = labels +        self.cluster_ip = cluster_ip +        self.portal_ip = portal_ip +        self.session_affinity = session_affinity +        self.service_type = service_type +        self.data = {} + +        self.create_dict() + +    def create_dict(self): +        ''' instantiates a service dict ''' +        self.data['apiVersion'] = 'v1' +        self.data['kind'] = 'Service' +        self.data['metadata'] = {} +        self.data['metadata']['name'] = self.name +        self.data['metadata']['namespace'] = self.namespace +        if self.labels: +            for lab, lab_value  in self.labels.items(): +                self.data['metadata'][lab] = lab_value +        self.data['spec'] = {} + +        if self.ports: +            self.data['spec']['ports'] = self.ports +        else: +            self.data['spec']['ports'] = [] + +        if self.selector: +            self.data['spec']['selector'] = self.selector + +        self.data['spec']['sessionAffinity'] = self.session_affinity or 'None' + +        if self.cluster_ip: +            self.data['spec']['clusterIP'] = self.cluster_ip + +        if self.portal_ip: +            self.data['spec']['portalIP'] = self.portal_ip + +        if self.service_type: +            self.data['spec']['type'] = self.service_type + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class Service(Yedit): +    ''' Class to model the oc service object ''' +    port_path = "spec.ports" +    portal_ip = "spec.portalIP" +    cluster_ip = "spec.clusterIP" +    selector = "spec.selector" +    kind = 'Service' + +    def __init__(self, content): +        '''Service constructor''' +        super(Service, self).__init__(content=content) + +    def get_ports(self): +        ''' get a list of ports ''' +        return self.get(Service.port_path) or [] + +    def get_selector(self): +        ''' get the service selector''' +        return self.get(Service.selector) or {} + +    def add_ports(self, inc_ports): +        ''' add a port object to the ports list ''' +        if not isinstance(inc_ports, list): +            inc_ports = [inc_ports] + +        ports = self.get_ports() +        if not ports: +            self.put(Service.port_path, inc_ports) +        else: +            ports.extend(inc_ports) + +        return True + +    def find_ports(self, inc_port): +        ''' find a specific port ''' +        for port in self.get_ports(): +            if port['port'] == inc_port['port']: +                return port + +        return None + +    def delete_ports(self, inc_ports): +        ''' remove a port from a service ''' +        if not isinstance(inc_ports, list): +            inc_ports = [inc_ports] + +        ports = self.get(Service.port_path) or [] + +        if not ports: +            return True + +        removed = False +        for inc_port in inc_ports: +            port = self.find_ports(inc_port) +            if port: +                ports.remove(port) +                removed = True + +        return removed + +    def add_cluster_ip(self, sip): +        '''add cluster ip''' +        self.put(Service.cluster_ip, sip) + +    def add_portal_ip(self, pip): +        '''add cluster ip''' +        self.put(Service.portal_ip, pip) + +# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*- + +class Volume(object): +    ''' Class to model an openshift volume object''' +    volume_mounts_path = {"pod": "spec.containers[0].volumeMounts", +                          "dc":  "spec.template.spec.containers[0].volumeMounts", +                          "rc":  "spec.template.spec.containers[0].volumeMounts", +                         } +    volumes_path = {"pod": "spec.volumes", +                    "dc":  "spec.template.spec.volumes", +                    "rc":  "spec.template.spec.volumes", +                   } + +    @staticmethod +    def create_volume_structure(volume_info): +        ''' return a properly structured volume ''' +        volume_mount = None +        volume = {'name': volume_info['name']} +        if volume_info['type'] == 'secret': +            volume['secret'] = {} +            volume[volume_info['type']] = {'secretName': volume_info['secret_name']} +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']} +        elif volume_info['type'] == 'emptydir': +            volume['emptyDir'] = {} +            volume_mount = {'mountPath': volume_info['path'], +                            'name': volume_info['name']} +        elif volume_info['type'] == 'pvc': +            volume['persistentVolumeClaim'] = {} +            volume['persistentVolumeClaim']['claimName'] = volume_info['claimName'] +            volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize'] +        elif volume_info['type'] == 'hostpath': +            volume['hostPath'] = {} +            volume['hostPath']['path'] = volume_info['path'] + +        return (volume, volume_mount) + +# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*- + + +# pylint: disable=too-many-instance-attributes +class OCVersion(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' +    # pylint allows 5 +    # pylint: disable=too-many-arguments +    def __init__(self, +                 config, +                 debug): +        ''' Constructor for OCVersion ''' +        super(OCVersion, self).__init__(None, config) +        self.debug = debug + +    def get(self): +        '''get and return version information ''' + +        results = {} + +        version_results = self._version() + +        if version_results['returncode'] == 0: +            filtered_vers = Utils.filter_versions(version_results['results']) +            custom_vers = Utils.add_custom_versions(filtered_vers) + +            results['returncode'] = version_results['returncode'] +            results.update(filtered_vers) +            results.update(custom_vers) + +            return results + +        raise OpenShiftCLIError('Problem detecting openshift version.') + +    @staticmethod +    def run_ansible(params): +        '''run the idempotent ansible code''' +        oc_version = OCVersion(params['kubeconfig'], params['debug']) + +        if params['state'] == 'list': + +            #pylint: disable=protected-access +            result = oc_version.get() +            return {'state': params['state'], +                    'results': result, +                    'changed': False} + +# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*- + +class RegistryException(Exception): +    ''' Registry Exception Class ''' +    pass + + +class RegistryConfig(OpenShiftCLIConfig): +    ''' RegistryConfig is a DTO for the registry.  ''' +    def __init__(self, rname, namespace, kubeconfig, registry_options): +        super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options) + + +class Registry(OpenShiftCLI): +    ''' Class to wrap the oc command line tools ''' + +    volume_mount_path = 'spec.template.spec.containers[0].volumeMounts' +    volume_path = 'spec.template.spec.volumes' +    env_path = 'spec.template.spec.containers[0].env' + +    def __init__(self, +                 registry_config, +                 verbose=False): +        ''' Constructor for Registry + +           a registry consists of 3 or more parts +           - dc/docker-registry +           - svc/docker-registry + +           Parameters: +           :registry_config: +           :verbose: +        ''' +        super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose) +        self.version = OCVersion(registry_config.kubeconfig, verbose) +        self.svc_ip = None +        self.portal_ip = None +        self.config = registry_config +        self.verbose = verbose +        self.registry_parts = [{'kind': 'dc', 'name': self.config.name}, +                               {'kind': 'svc', 'name': self.config.name}, +                              ] + +        self.__prepared_registry = None +        self.volume_mounts = [] +        self.volumes = [] +        if self.config.config_options['volume_mounts']['value']: +            for volume in self.config.config_options['volume_mounts']['value']: +                volume_info = {'secret_name': volume.get('secret_name', None), +                               'name':        volume.get('name', None), +                               'type':        volume.get('type', None), +                               'path':        volume.get('path', None), +                               'claimName':   volume.get('claim_name', None), +                               'claimSize':   volume.get('claim_size', None), +                              } + +                vol, vol_mount = Volume.create_volume_structure(volume_info) +                self.volumes.append(vol) +                self.volume_mounts.append(vol_mount) + +        self.dconfig = None +        self.svc = None + +    @property +    def deploymentconfig(self): +        ''' deploymentconfig property ''' +        return self.dconfig + +    @deploymentconfig.setter +    def deploymentconfig(self, config): +        ''' setter for deploymentconfig property ''' +        self.dconfig = config + +    @property +    def service(self): +        ''' service property ''' +        return self.svc + +    @service.setter +    def service(self, config): +        ''' setter for service property ''' +        self.svc = config + +    @property +    def prepared_registry(self): +        ''' prepared_registry property ''' +        if not self.__prepared_registry: +            results = self.prepare_registry() +            if not results: +                raise RegistryException('Could not perform registry preparation.') +            self.__prepared_registry = results + +        return self.__prepared_registry + +    @prepared_registry.setter +    def prepared_registry(self, data): +        ''' setter method for prepared_registry attribute ''' +        self.__prepared_registry = data + +    def get(self): +        ''' return the self.registry_parts ''' +        self.deploymentconfig = None +        self.service = None + +        rval = 0 +        for part in self.registry_parts: +            result = self._get(part['kind'], rname=part['name']) +            if result['returncode'] == 0 and part['kind'] == 'dc': +                self.deploymentconfig = DeploymentConfig(result['results'][0]) +            elif result['returncode'] == 0 and part['kind'] == 'svc': +                self.service = Service(result['results'][0]) + +            if result['returncode'] != 0: +                rval = result['returncode'] + + +        return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service} + +    def exists(self): +        '''does the object exist?''' +        self.get() +        if self.deploymentconfig or self.service: +            return True + +        return False + +    def delete(self, complete=True): +        '''return all pods ''' +        parts = [] +        for part in self.registry_parts: +            if not complete and part['kind'] == 'svc': +                continue +            parts.append(self._delete(part['kind'], part['name'])) + +        # Clean up returned results +        rval = 0 +        for part in parts: +            # pylint: disable=invalid-sequence-index +            if 'returncode' in part and part['returncode'] != 0: +                rval = part['returncode'] + +        return {'returncode': rval, 'results': parts} + +    def prepare_registry(self): +        ''' prepare a registry for instantiation ''' +        options = self.config.to_option_list() + +        cmd = ['registry', '-n', self.config.namespace] +        cmd.extend(options) +        cmd.extend(['--dry-run=True', '-o', 'json']) + +        results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json') +        # probably need to parse this +        # pylint thinks results is a string +        # pylint: disable=no-member +        if results['returncode'] != 0 and 'items' in results['results']: +            return results + +        service = None +        deploymentconfig = None +        # pylint: disable=invalid-sequence-index +        for res in results['results']['items']: +            if res['kind'] == 'DeploymentConfig': +                deploymentconfig = DeploymentConfig(res) +            elif res['kind'] == 'Service': +                service = Service(res) + +        # Verify we got a service and a deploymentconfig +        if not service or not deploymentconfig: +            return results + +        # results will need to get parsed here and modifications added +        deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig)) + +        # modify service ip +        if self.svc_ip: +            service.put('spec.clusterIP', self.svc_ip) +        if self.portal_ip: +            service.put('spec.portalIP', self.portal_ip) + +        # the dry-run doesn't apply the selector correctly +        service.put('spec.selector', self.service.get_selector()) + + +        # need to create the service and the deploymentconfig +        service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict) +        deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict) + +        return {"service": service, +                "service_file": service_file, +                "service_update": False, +                "deployment": deploymentconfig, +                "deployment_file": deployment_file, +                "deployment_update": False} + +    def create(self): +        '''Create a registry''' +        results = [] +        for config_file in ['deployment_file', 'service_file']: +            results.append(self._create(self.prepared_registry[config_file])) + +        # Clean up returned results +        rval = 0 +        for result in results: +            # pylint: disable=invalid-sequence-index +            if 'returncode' in result and result['returncode'] != 0: +                rval = result['returncode'] + +        return {'returncode': rval, 'results': results} + +    def update(self): +        '''run update for the registry.  This performs a delete and then create ''' +        # Store the current service IP +        if self.service: +            svcip = self.service.get('spec.clusterIP') +            if svcip: +                self.svc_ip = svcip +            portip = self.service.get('spec.portalIP') +            if portip: +                self.portal_ip = portip + +        results = [] +        if self.prepared_registry['deployment_update']: +            results.append(self._replace(self.prepared_registry['deployment_file'])) +        if self.prepared_registry['service_update']: +            results.append(self._replace(self.prepared_registry['service_file'])) + +        # Clean up returned results +        rval = 0 +        for result in results: +            if result['returncode'] != 0: +                rval = result['returncode'] + +        return {'returncode': rval, 'results': results} + +    def add_modifications(self, deploymentconfig): +        ''' update a deployment config with changes ''' +        # The environment variable for REGISTRY_HTTP_SECRET is autogenerated +        # We should set the generated deploymentconfig to the in memory version +        # the following modifications will overwrite if needed +        if self.deploymentconfig: +            result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET') +            if result: +                deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value']) + +        # Currently we know that our deployment of a registry requires a few extra modifications +        # Modification 1 +        # we need specific environment variables to be set +        for key, value in self.config.config_options['env_vars'].get('value', {}).items(): +            if not deploymentconfig.exists_env_key(key): +                deploymentconfig.add_env_value(key, value) +            else: +                deploymentconfig.update_env_var(key, value) + +        # Modification 2 +        # we need specific volume variables to be set +        for volume in self.volumes: +            deploymentconfig.update_volume(volume) + +        for vol_mount in self.volume_mounts: +            deploymentconfig.update_volume_mount(vol_mount) + +        # Modification 3 +        # Edits +        edit_results = [] +        for edit in self.config.config_options['edits'].get('value', []): +            if edit['action'] == 'put': +                edit_results.append(deploymentconfig.put(edit['key'], +                                                         edit['value'])) +            if edit['action'] == 'update': +                edit_results.append(deploymentconfig.update(edit['key'], +                                                            edit['value'], +                                                            edit.get('index', None), +                                                            edit.get('curr_value', None))) +            if edit['action'] == 'append': +                edit_results.append(deploymentconfig.append(edit['key'], +                                                            edit['value'])) + +        if edit_results and not any([res[0] for res in edit_results]): +            return None + +        return deploymentconfig.yaml_dict + +    def needs_update(self): +        ''' check to see if we need to update ''' +        if not self.service or not self.deploymentconfig: +            return True + +        exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol'] +        if not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict, +                                     self.service.yaml_dict, +                                     exclude_list, +                                     debug=self.verbose): +            self.prepared_registry['service_update'] = True + +        exclude_list = ['dnsPolicy', +                        'terminationGracePeriodSeconds', +                        'restartPolicy', 'timeoutSeconds', +                        'livenessProbe', 'readinessProbe', +                        'terminationMessagePath', +                        'securityContext', +                        'imagePullPolicy', +                        'protocol', # ports.portocol: TCP +                        'type', # strategy: {'type': 'rolling'} +                        'defaultMode', # added on secrets +                        'activeDeadlineSeconds', # added in 1.5 for timeouts +                       ] + +        if not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict, +                                     self.deploymentconfig.yaml_dict, +                                     exclude_list, +                                     debug=self.verbose): +            self.prepared_registry['deployment_update'] = True + +        return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False + +    # In the future, we would like to break out each ansible state into a function. +    # pylint: disable=too-many-branches,too-many-return-statements +    @staticmethod +    def run_ansible(params, check_mode): +        '''run idempotent ansible code''' + +        rconfig = RegistryConfig(params['name'], +                                 params['namespace'], +                                 params['kubeconfig'], +                                 {'images': {'value': params['images'], 'include': True}, +                                  'latest_images': {'value': params['latest_images'], 'include': True}, +                                  'labels': {'value': params['labels'], 'include': True}, +                                  'ports': {'value': ','.join(params['ports']), 'include': True}, +                                  'replicas': {'value': params['replicas'], 'include': True}, +                                  'selector': {'value': params['selector'], 'include': True}, +                                  'service_account': {'value': params['service_account'], 'include': True}, +                                  'mount_host': {'value': params['mount_host'], 'include': True}, +                                  'env_vars': {'value': params['env_vars'], 'include': False}, +                                  'volume_mounts': {'value': params['volume_mounts'], 'include': False}, +                                  'edits': {'value': params['edits'], 'include': False}, +                                  'enforce_quota': {'value': params['enforce_quota'], 'include': True}, +                                  'daemonset': {'value': params['daemonset'], 'include': True}, +                                  'tls_key': {'value': params['tls_key'], 'include': True}, +                                  'tls_certificate': {'value': params['tls_certificate'], 'include': True}, +                                 }) + + +        ocregistry = Registry(rconfig, params['debug']) + +        api_rval = ocregistry.get() + +        state = params['state'] +        ######## +        # get +        ######## +        if state == 'list': + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': False, 'results': api_rval, 'state': state} + +        ######## +        # Delete +        ######## +        if state == 'absent': +            if not ocregistry.exists(): +                return {'changed': False, 'state': state} + +            if check_mode: +                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'} + +            # Unsure as to why this is angry with the return type. +            # pylint: disable=redefined-variable-type +            api_rval = ocregistry.delete() + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': True, 'results': api_rval, 'state': state} + +        if state == 'present': +            ######## +            # Create +            ######## +            if not ocregistry.exists(): + +                if check_mode: +                    return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} + +                api_rval = ocregistry.create() + +                if api_rval['returncode'] != 0: +                    return {'failed': True, 'msg': api_rval} + +                return {'changed': True, 'results': api_rval, 'state': state} + +            ######## +            # Update +            ######## +            if not params['force'] and not ocregistry.needs_update(): +                return {'changed': False, 'state': state} + +            if check_mode: +                return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} + +            api_rval = ocregistry.update() + +            if api_rval['returncode'] != 0: +                return {'failed': True, 'msg': api_rval} + +            return {'changed': True, 'results': api_rval, 'state': state} + +        return {'failed': True, 'msg': 'Unknown state passed. %s' % state} + +# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*- + +# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*- + +def main(): +    ''' +    ansible oc module for registry +    ''' + +    module = AnsibleModule( +        argument_spec=dict( +            state=dict(default='present', type='str', +                       choices=['present', 'absent']), +            debug=dict(default=False, type='bool'), +            namespace=dict(default='default', type='str'), +            name=dict(default=None, required=True, type='str'), + +            kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), +            images=dict(default=None, type='str'), +            latest_images=dict(default=False, type='bool'), +            labels=dict(default=None, type='list'), +            ports=dict(default=['5000'], type='list'), +            replicas=dict(default=1, type='int'), +            selector=dict(default=None, type='str'), +            service_account=dict(default='registry', type='str'), +            mount_host=dict(default=None, type='str'), +            volume_mounts=dict(default=None, type='list'), +            env_vars=dict(default={}, type='dict'), +            edits=dict(default=[], type='list'), +            enforce_quota=dict(default=False, type='bool'), +            force=dict(default=False, type='bool'), +            daemonset=dict(default=False, type='bool'), +            tls_key=dict(default=None, type='str'), +            tls_certificate=dict(default=None, type='str'), +        ), + +        supports_check_mode=True, +    ) + +    results = Registry.run_ansible(module.params, module.check_mode) +    if 'failed' in results: +        module.fail_json(**results) + +    module.exit_json(**results) + + +if __name__ == '__main__': +    main() + +# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*- diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 68b797577..ab06a5141 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1487,10 +1487,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval @@ -2729,7 +2730,7 @@ class Router(OpenShiftCLI):          options = self.config.to_option_list() -        cmd = ['router', self.config.name, '-n', self.config.namespace] +        cmd = ['router', self.config.name]          cmd.extend(options)          cmd.extend(['--dry-run=True', '-o', 'json']) diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 0347644eb..7a7eaf40a 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1386,10 +1386,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index e343c70df..a1994b0f1 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1353,10 +1353,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index c4ed42bbe..109a78184 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1362,10 +1362,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 863443517..bd6e77c2a 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1365,10 +1365,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 5a966fa93..1d0e4c876 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1297,10 +1297,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 03c9d2044..14d519e52 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1354,10 +1354,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 812c67de5..4f82abcfe 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1351,10 +1351,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 2ce3824e9..97dd310bc 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1396,10 +1396,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index c9e9b1790..56e4e38f7 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1340,10 +1340,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 6058f0ee2..ad32d4900 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1386,10 +1386,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 8f6303a66..a4d0ca3f3 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1392,10 +1392,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 7d78c96d0..b6586fca9 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1338,10 +1338,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index c058c555b..925a5a088 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1338,10 +1338,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 4e65b8a0a..8f59d4d7e 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1310,10 +1310,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/src/class/oc_adm_policy_group.py b/roles/lib_openshift/src/class/oc_adm_policy_group.py index afb066c77..1e51913e0 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_group.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_group.py @@ -41,6 +41,28 @@ class PolicyGroup(OpenShiftCLI):          self.verbose = verbose          self._rolebinding = None          self._scc = None +        self._cluster_policy_bindings = None +        self._policy_bindings = None + +    @property +    def policybindings(self): +        if self._policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve policybindings') +            self._policy_bindings = results['results'][0]['items'][0] + +        return self._policy_bindings + +    @property +    def clusterpolicybindings(self): +        if self._cluster_policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') +            self._cluster_policy_bindings = results['results'][0]['items'][0] + +        return self._cluster_policy_bindings      @property      def role_binding(self): @@ -81,18 +103,24 @@ class PolicyGroup(OpenShiftCLI):      def exists_role_binding(self):          ''' return whether role_binding exists ''' -        results = self.get() -        if results['returncode'] == 0: -            self.role_binding = RoleBinding(results['results'][0]) -            if self.role_binding.find_group_name(self.config.config_options['group']['value']) != None: -                return True +        bindings = None +        if self.config.config_options['resource_kind']['value'] == 'cluster-role': +            bindings = self.clusterpolicybindings +        else: +            bindings = self.policybindings +        if bindings is None:              return False -        elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: -            return False +        for binding in bindings['roleBindings']: +            _rb = binding['roleBinding'] +            if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ +                    _rb['groupNames'] is not None and \ +                    self.config.config_options['group']['value'] in _rb['groupNames']: +                self.role_binding = binding +                return True -        return results +        return False      def exists_scc(self):          ''' return whether scc exists ''' diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index c9d53acfa..88fcc1ddc 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -40,6 +40,28 @@ class PolicyUser(OpenShiftCLI):          self.verbose = verbose          self._rolebinding = None          self._scc = None +        self._cluster_policy_bindings = None +        self._policy_bindings = None + +    @property +    def policybindings(self): +        if self._policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve policybindings') +            self._policy_bindings = results['results'][0]['items'][0] + +        return self._policy_bindings + +    @property +    def clusterpolicybindings(self): +        if self._cluster_policy_bindings is None: +            results = self._get('clusterpolicybindings', None) +            if results['returncode'] != 0: +                raise OpenShiftCLIError('Could not retrieve clusterpolicybindings') +            self._cluster_policy_bindings = results['results'][0]['items'][0] + +        return self._cluster_policy_bindings      @property      def role_binding(self): @@ -62,36 +84,37 @@ class PolicyUser(OpenShiftCLI):          self._scc = scc      def get(self): -        '''fetch the desired kind''' +        '''fetch the desired kind + +           This is only used for scc objects. +           The {cluster}rolebindings happen in exists. +        '''          resource_name = self.config.config_options['name']['value']          if resource_name == 'cluster-reader':              resource_name += 's' -        # oc adm policy add-... creates policy bindings with the name -        # "[resource_name]-binding", however some bindings in the system -        # simply use "[resource_name]". So try both. - -        results = self._get(self.config.kind, resource_name) -        if results['returncode'] == 0: -            return results - -        # Now try -binding naming convention -        return self._get(self.config.kind, resource_name + "-binding") +        return self._get(self.config.kind, resource_name)      def exists_role_binding(self):          ''' return whether role_binding exists ''' -        results = self.get() -        if results['returncode'] == 0: -            self.role_binding = RoleBinding(results['results'][0]) -            if self.role_binding.find_user_name(self.config.config_options['user']['value']) != None: -                return True +        bindings = None +        if self.config.config_options['resource_kind']['value'] == 'cluster-role': +            bindings = self.clusterpolicybindings +        else: +            bindings = self.policybindings +        if bindings is None:              return False -        elif self.config.config_options['name']['value'] in results['stderr'] and '" not found' in results['stderr']: -            return False +        for binding in bindings['roleBindings']: +            _rb = binding['roleBinding'] +            if _rb['roleRef']['name'] == self.config.config_options['name']['value'] and \ +                    _rb['userNames'] is not None and \ +                    self.config.config_options['user']['value'] in _rb['userNames']: +                self.role_binding = binding +                return True -        return results +        return False      def exists_scc(self):          ''' return whether scc exists ''' diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py index 37904c43f..c083cd179 100644 --- a/roles/lib_openshift/src/class/oc_adm_registry.py +++ b/roles/lib_openshift/src/class/oc_adm_registry.py @@ -119,7 +119,6 @@ class Registry(OpenShiftCLI):      def exists(self):          '''does the object exist?''' -        self.get()          if self.deploymentconfig and self.service:              return True @@ -146,7 +145,7 @@ class Registry(OpenShiftCLI):          ''' prepare a registry for instantiation '''          options = self.config.to_option_list() -        cmd = ['registry', '-n', self.config.namespace] +        cmd = ['registry']          cmd.extend(options)          cmd.extend(['--dry-run=True', '-o', 'json']) @@ -180,7 +179,8 @@ class Registry(OpenShiftCLI):              service.put('spec.portalIP', self.portal_ip)          # the dry-run doesn't apply the selector correctly -        service.put('spec.selector', self.service.get_selector()) +        if self.service: +            service.put('spec.selector', self.service.get_selector())          # need to create the service and the deploymentconfig          service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict) diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py index 7b163b120..356d06fdf 100644 --- a/roles/lib_openshift/src/class/oc_adm_router.py +++ b/roles/lib_openshift/src/class/oc_adm_router.py @@ -224,7 +224,7 @@ class Router(OpenShiftCLI):          options = self.config.to_option_list() -        cmd = ['router', self.config.name, '-n', self.config.namespace] +        cmd = ['router', self.config.name]          cmd.extend(options)          cmd.extend(['--dry-run=True', '-o', 'json']) diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index d037074a5..334542b97 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -591,10 +591,11 @@ class OpenShiftCLIConfig(object):      def stringify(self):          ''' return the options hash as cli params in a string '''          rval = [] -        for key, data in self.config_options.items(): +        for key in sorted(self.config_options.keys()): +            data = self.config_options[key]              if data['include'] \                 and (data['value'] or isinstance(data['value'], int)): -                rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) +                rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))          return rval diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py new file mode 100755 index 000000000..bab36fddc --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py @@ -0,0 +1,369 @@ +#!/usr/bin/env python +''' + Unit tests for oc adm registry +''' + +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_adm_registry import Registry, locate_oc_binary  # noqa: E402 + + +# pylint: disable=too-many-public-methods +class RegistryTest(unittest.TestCase): +    ''' +     Test class for Registry +    ''' +    dry_run = '''{ +        "kind": "List", +        "apiVersion": "v1", +        "metadata": {}, +        "items": [ +            { +                "kind": "ServiceAccount", +                "apiVersion": "v1", +                "metadata": { +                    "name": "registry", +                    "creationTimestamp": null +                } +            }, +            { +                "kind": "ClusterRoleBinding", +                "apiVersion": "v1", +                "metadata": { +                    "name": "registry-registry-role", +                    "creationTimestamp": null +                }, +                "userNames": [ +                    "system:serviceaccount:default:registry" +                ], +                "groupNames": null, +                "subjects": [ +                    { +                        "kind": "ServiceAccount", +                        "namespace": "default", +                        "name": "registry" +                    } +                ], +                "roleRef": { +                    "kind": "ClusterRole", +                    "name": "system:registry" +                } +            }, +            { +                "kind": "DeploymentConfig", +                "apiVersion": "v1", +                "metadata": { +                    "name": "docker-registry", +                    "creationTimestamp": null, +                    "labels": { +                        "docker-registry": "default" +                    } +                }, +                "spec": { +                    "strategy": { +                        "resources": {} +                    }, +                    "triggers": [ +                        { +                            "type": "ConfigChange" +                        } +                    ], +                    "replicas": 1, +                    "test": false, +                    "selector": { +                        "docker-registry": "default" +                    }, +                    "template": { +                        "metadata": { +                            "creationTimestamp": null, +                            "labels": { +                                "docker-registry": "default" +                            } +                        }, +                        "spec": { +                            "volumes": [ +                                { +                                    "name": "registry-storage", +                                    "emptyDir": {} +                                } +                            ], +                            "containers": [ +                                { +                                    "name": "registry", +                                    "image": "openshift3/ose-docker-registry:v3.5.0.39", +                                    "ports": [ +                                        { +                                            "containerPort": 5000 +                                        } +                                    ], +                                    "env": [ +                                        { +                                            "name": "REGISTRY_HTTP_ADDR", +                                            "value": ":5000" +                                        }, +                                        { +                                            "name": "REGISTRY_HTTP_NET", +                                            "value": "tcp" +                                        }, +                                        { +                                            "name": "REGISTRY_HTTP_SECRET", +                                            "value": "WQjSGeUu5KFZRTwGeIXgwIjyraNDLmdJblsFbtzZdF8=" +                                        }, +                                        { +                                            "name": "REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ENFORCEQUOTA", +                                            "value": "false" +                                        } +                                    ], +                                    "resources": { +                                        "requests": { +                                            "cpu": "100m", +                                            "memory": "256Mi" +                                        } +                                    }, +                                    "volumeMounts": [ +                                        { +                                            "name": "registry-storage", +                                            "mountPath": "/registry" +                                        } +                                    ], +                                    "livenessProbe": { +                                        "httpGet": { +                                            "path": "/healthz", +                                            "port": 5000 +                                        }, +                                        "initialDelaySeconds": 10, +                                        "timeoutSeconds": 5 +                                    }, +                                    "readinessProbe": { +                                        "httpGet": { +                                            "path": "/healthz", +                                            "port": 5000 +                                        }, +                                        "timeoutSeconds": 5 +                                    }, +                                    "securityContext": { +                                        "privileged": false +                                    } +                                } +                            ], +                            "nodeSelector": { +                                "type": "infra" +                            }, +                            "serviceAccountName": "registry", +                            "serviceAccount": "registry" +                        } +                    } +                }, +                "status": { +                    "latestVersion": 0, +                    "observedGeneration": 0, +                    "replicas": 0, +                    "updatedReplicas": 0, +                    "availableReplicas": 0, +                    "unavailableReplicas": 0 +                } +            }, +            { +                "kind": "Service", +                "apiVersion": "v1", +                "metadata": { +                    "name": "docker-registry", +                    "creationTimestamp": null, +                    "labels": { +                        "docker-registry": "default" +                    } +                }, +                "spec": { +                    "ports": [ +                        { +                            "name": "5000-tcp", +                            "port": 5000, +                            "targetPort": 5000 +                        } +                    ], +                    "selector": { +                        "docker-registry": "default" +                    }, +                    "clusterIP": "172.30.119.110", +                    "sessionAffinity": "ClientIP" +                }, +                "status": { +                    "loadBalancer": {} +                } +            } +        ]}''' + +    @mock.patch('oc_adm_registry.Utils._write') +    @mock.patch('oc_adm_registry.Utils.create_tmpfile_copy') +    @mock.patch('oc_adm_registry.Registry._run') +    def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): +        ''' Testing state present ''' +        params = {'state': 'present', +                  'debug': False, +                  'namespace': 'default', +                  'name': 'docker-registry', +                  'kubeconfig': '/etc/origin/master/admin.kubeconfig', +                  'images': None, +                  'latest_images': None, +                  'labels': None, +                  'ports': ['5000'], +                  'replicas': 1, +                  'selector': 'type=infra', +                  'service_account': 'registry', +                  'mount_host': None, +                  'volume_mounts': None, +                  'env_vars': {}, +                  'enforce_quota': False, +                  'force': False, +                  'daemonset': False, +                  'tls_key': None, +                  'tls_certificate': None, +                  'edits': []} + +        mock_cmd.side_effect = [ +            (1, '', 'Error from server (NotFound): deploymentconfigs "docker-registry" not found'), +            (1, '', 'Error from server (NotFound): service "docker-registry" not found'), +            (0, RegistryTest.dry_run, ''), +            (0, '', ''), +            (0, '', ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +            '/tmp/mocked_kubeconfig', +        ] + +        results = Registry.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        for result in results['results']['results']: +            self.assertEqual(result['returncode'], 0) + +        mock_cmd.assert_has_calls([ +            mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False', +                       '--ports=5000', '--replicas=1', '--selector=type=infra', +                       '--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), ]) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_path_exists.side_effect = lambda _: False + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_shutil_which.side_effect = lambda _f, path=None: None + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py new file mode 100755 index 000000000..51393dbaf --- /dev/null +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python +''' + Unit tests for oc adm router +''' + +import os +import six +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place class in our python path +module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501 +sys.path.insert(0, module_path) +from oc_adm_router import Router, locate_oc_binary  # noqa: E402 + + +# pylint: disable=too-many-public-methods +class RouterTest(unittest.TestCase): +    ''' +     Test class for Router +    ''' +    dry_run = '''{ +    "kind": "List", +    "apiVersion": "v1", +    "metadata": {}, +    "items": [ +        { +            "kind": "ServiceAccount", +            "apiVersion": "v1", +            "metadata": { +                "name": "router", +                "creationTimestamp": null +            } +        }, +        { +            "kind": "ClusterRoleBinding", +            "apiVersion": "v1", +            "metadata": { +                "name": "router-router-role", +                "creationTimestamp": null +            }, +            "userNames": [ +                "system:serviceaccount:default:router" +            ], +            "groupNames": null, +            "subjects": [ +                { +                    "kind": "ServiceAccount", +                    "namespace": "default", +                    "name": "router" +                } +            ], +            "roleRef": { +                "kind": "ClusterRole", +                "name": "system:router" +            } +        }, +        { +            "kind": "DeploymentConfig", +            "apiVersion": "v1", +            "metadata": { +                "name": "router", +                "creationTimestamp": null, +                "labels": { +                    "router": "router" +                } +            }, +            "spec": { +                "strategy": { +                    "type": "Rolling", +                    "rollingParams": { +                        "maxUnavailable": "25%", +                        "maxSurge": 0 +                    }, +                    "resources": {} +                }, +                "triggers": [ +                    { +                        "type": "ConfigChange" +                    } +                ], +                "replicas": 2, +                "test": false, +                "selector": { +                    "router": "router" +                }, +                "template": { +                    "metadata": { +                        "creationTimestamp": null, +                        "labels": { +                            "router": "router" +                        } +                    }, +                    "spec": { +                        "volumes": [ +                            { +                                "name": "server-certificate", +                                "secret": { +                                    "secretName": "router-certs" +                                } +                            } +                        ], +                        "containers": [ +                            { +                                "name": "router", +                                "image": "openshift3/ose-haproxy-router:v3.5.0.39", +                                "ports": [ +                                    { +                                        "containerPort": 80 +                                    }, +                                    { +                                        "containerPort": 443 +                                    }, +                                    { +                                        "name": "stats", +                                        "containerPort": 1936, +                                        "protocol": "TCP" +                                    } +                                ], +                                "env": [ +                                    { +                                        "name": "DEFAULT_CERTIFICATE_DIR", +                                        "value": "/etc/pki/tls/private" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_HOSTNAME" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_HTTPS_VSERVER" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_HTTP_VSERVER" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_INSECURE", +                                        "value": "false" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_INTERNAL_ADDRESS" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_PARTITION_PATH" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_PASSWORD" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_PRIVKEY", +                                        "value": "/etc/secret-volume/router.pem" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_USERNAME" +                                    }, +                                    { +                                        "name": "ROUTER_EXTERNAL_HOST_VXLAN_GW_CIDR" +                                    }, +                                    { +                                        "name": "ROUTER_SERVICE_HTTPS_PORT", +                                        "value": "443" +                                    }, +                                    { +                                        "name": "ROUTER_SERVICE_HTTP_PORT", +                                        "value": "80" +                                    }, +                                    { +                                        "name": "ROUTER_SERVICE_NAME", +                                        "value": "router" +                                    }, +                                    { +                                        "name": "ROUTER_SERVICE_NAMESPACE", +                                        "value": "default" +                                    }, +                                    { +                                        "name": "ROUTER_SUBDOMAIN" +                                    }, +                                    { +                                        "name": "STATS_PASSWORD", +                                        "value": "eSfUICQyyr" +                                    }, +                                    { +                                        "name": "STATS_PORT", +                                        "value": "1936" +                                    }, +                                    { +                                        "name": "STATS_USERNAME", +                                        "value": "admin" +                                    } +                                ], +                                "resources": { +                                    "requests": { +                                        "cpu": "100m", +                                        "memory": "256Mi" +                                    } +                                }, +                                "volumeMounts": [ +                                    { +                                        "name": "server-certificate", +                                        "readOnly": true, +                                        "mountPath": "/etc/pki/tls/private" +                                    } +                                ], +                                "livenessProbe": { +                                    "httpGet": { +                                        "path": "/healthz", +                                        "port": 1936, +                                        "host": "localhost" +                                    }, +                                    "initialDelaySeconds": 10 +                                }, +                                "readinessProbe": { +                                    "httpGet": { +                                        "path": "/healthz", +                                        "port": 1936, +                                        "host": "localhost" +                                    }, +                                    "initialDelaySeconds": 10 +                                }, +                                "imagePullPolicy": "IfNotPresent" +                            } +                        ], +                        "nodeSelector": { +                            "type": "infra" +                        }, +                        "serviceAccountName": "router", +                        "serviceAccount": "router", +                        "hostNetwork": true, +                        "securityContext": {} +                    } +                } +            }, +            "status": { +                "latestVersion": 0, +                "observedGeneration": 0, +                "replicas": 0, +                "updatedReplicas": 0, +                "availableReplicas": 0, +                "unavailableReplicas": 0 +            } +        }, +        { +            "kind": "Service", +            "apiVersion": "v1", +            "metadata": { +                "name": "router", +                "creationTimestamp": null, +                "labels": { +                    "router": "router" +                }, +                "annotations": { +                    "service.alpha.openshift.io/serving-cert-secret-name": "router-certs" +                } +            }, +            "spec": { +                "ports": [ +                    { +                        "name": "80-tcp", +                        "port": 80, +                        "targetPort": 80 +                    }, +                    { +                        "name": "443-tcp", +                        "port": 443, +                        "targetPort": 443 +                    }, +                    { +                        "name": "1936-tcp", +                        "protocol": "TCP", +                        "port": 1936, +                        "targetPort": 1936 +                    } +                ], +                "selector": { +                    "router": "router" +                } +            }, +            "status": { +                "loadBalancer": {} +            } +        } +    ] +}''' + +    @mock.patch('oc_adm_router.Utils._write') +    @mock.patch('oc_adm_router.Utils.create_tmpfile_copy') +    @mock.patch('oc_adm_router.Router._run') +    def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): +        ''' Testing a create ''' +        params = {'state': 'present', +                  'debug': False, +                  'namespace': 'default', +                  'name': 'router', +                  'default_cert': None, +                  'cert_file': None, +                  'key_file': None, +                  'cacert_file': None, +                  'labels': None, +                  'ports': ['80:80', '443:443'], +                  'images': None, +                  'latest_images': None, +                  'clusterip': None, +                  'portalip': None, +                  'session_affinity': None, +                  'service_type': None, +                  'kubeconfig': '/etc/origin/master/admin.kubeconfig', +                  'replicas': 2, +                  'selector': 'type=infra', +                  'service_account': 'router', +                  'router_type': None, +                  'host_network': None, +                  'external_host': None, +                  'external_host_vserver': None, +                  'external_host_insecure': False, +                  'external_host_partition_path': None, +                  'external_host_username': None, +                  'external_host_password': None, +                  'external_host_private_key': None, +                  'expose_metrics': False, +                  'metrics_image': None, +                  'stats_user': None, +                  'stats_password': None, +                  'stats_port': 1936, +                  'edits': []} + +        mock_cmd.side_effect = [ +            (1, '', 'Error from server (NotFound): deploymentconfigs "router" not found'), +            (1, '', 'Error from server (NotFound): service "router" not found'), +            (1, '', 'Error from server (NotFound): serviceaccount "router" not found'), +            (1, '', 'Error from server (NotFound): secret "router-certs" not found'), +            (1, '', 'Error from server (NotFound): clsuterrolebinding "router-router-role" not found'), +            (0, RouterTest.dry_run, ''), +            (0, '', ''), +            (0, '', ''), +            (0, '', ''), +            (0, '', ''), +            (0, '', ''), +        ] + +        mock_tmpfile_copy.side_effect = [ +            '/tmp/mocked_kubeconfig', +        ] + +        results = Router.run_ansible(params, False) + +        self.assertTrue(results['changed']) +        for result in results['results']['results']: +            self.assertEqual(result['returncode'], 0) + +        mock_cmd.assert_has_calls([ +            mock.call(['oc', 'get', 'dc', 'router', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'get', 'svc', 'router', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'get', 'sa', 'router', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'get', 'secret', 'router-certs', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'get', 'clusterrolebinding', 'router-router-role', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'adm', 'router', 'router', '--expose-metrics=False', '--external-host-insecure=False', +                       '--ports=80:80,443:443', '--replicas=2', '--selector=type=infra', '--service-account=router', +                       '--stats-port=1936', '--dry-run=True', '-o', 'json', '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), +            mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None)]) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_path_exists.side_effect = lambda _: False + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY3, 'py2 test only') +    @mock.patch('os.path.exists') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_path_exists.side_effect = lambda f: f == oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup fallback ''' + +        mock_env_get.side_effect = lambda _v, _d: '' + +        mock_shutil_which.side_effect = lambda _f, path=None: None + +        self.assertEqual(locate_oc_binary(), 'oc') + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in path ''' + +        oc_bin = '/usr/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in /usr/local/bin ''' + +        oc_bin = '/usr/local/bin/oc' + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) + +    @unittest.skipIf(six.PY2, 'py3 test only') +    @mock.patch('shutil.which') +    @mock.patch('os.environ.get') +    def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which): +        ''' Testing binary lookup in ~/bin ''' + +        oc_bin = os.path.expanduser('~/bin/oc') + +        mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin' + +        mock_shutil_which.side_effect = lambda _f, path=None: oc_bin + +        self.assertEqual(locate_oc_binary(), oc_bin) diff --git a/roles/openshift_excluder/meta/main.yml b/roles/openshift_excluder/meta/main.yml index 8bca38e77..4d1c1efca 100644 --- a/roles/openshift_excluder/meta/main.yml +++ b/roles/openshift_excluder/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - { role: openshift_facts } +- { role: openshift_repos } diff --git a/roles/openshift_excluder/tasks/adjust.yml b/roles/openshift_excluder/tasks/adjust.yml index 6f4070c3d..2535b9ea6 100644 --- a/roles/openshift_excluder/tasks/adjust.yml +++ b/roles/openshift_excluder/tasks/adjust.yml @@ -20,4 +20,4 @@        # disable it if the docker excluder is enabled        disable_openshift_excluder: "{{ openshift_excluder_on | bool }}"    when: -  - not openshift.common.is_containerized | bool +  - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml index ef6fc4a01..413c7b5cf 100644 --- a/roles/openshift_excluder/tasks/enable.yml +++ b/roles/openshift_excluder/tasks/enable.yml @@ -18,4 +18,4 @@        enable_openshift_excluder: "{{ not disable_openshift_excluder_override | default(not openshift_excluder_on) | bool }}"    when: -  - not openshift.common.is_containerized | bool +  - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/exclude.yml b/roles/openshift_excluder/tasks/exclude.yml index ee0ad8a0b..af9824aae 100644 --- a/roles/openshift_excluder/tasks/exclude.yml +++ b/roles/openshift_excluder/tasks/exclude.yml @@ -17,4 +17,4 @@      when:      - enable_openshift_excluder | default(false) | bool    when: -  - not openshift.common.is_containerized | bool +  - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index 01fe5da55..dcc8df0cb 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -18,4 +18,4 @@      when:      - install_openshift_excluder | default(true) | bool    when: -  - not openshift.common.is_containerized | bool +  - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/status.yml b/roles/openshift_excluder/tasks/status.yml index 3b6821244..363ccdbea 100644 --- a/roles/openshift_excluder/tasks/status.yml +++ b/roles/openshift_excluder/tasks/status.yml @@ -81,4 +81,4 @@      - "{{ docker_excluder_on }}"    when: -  - not openshift.common.is_containerized | bool +  - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/unexclude.yml b/roles/openshift_excluder/tasks/unexclude.yml index 4df92bc65..196ca25f5 100644 --- a/roles/openshift_excluder/tasks/unexclude.yml +++ b/roles/openshift_excluder/tasks/unexclude.yml @@ -16,4 +16,4 @@      - disable_openshift_excluder | default(false) | bool    when: -  - not openshift.common.is_containerized | bool +  - not openshift.common.is_atomic | bool | 
