diff options
22 files changed, 119 insertions, 118 deletions
| diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml index a9fc18958..6f95b4e2d 100644 --- a/playbooks/byo/openshift-cluster/service-catalog.yml +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -5,6 +5,12 @@  # currently supported method.  #  - include: initialize_groups.yml +  tags: +  - always + +- include: ../../common/openshift-cluster/std_include.yml +  tags: +  - always  - include: ../../common/openshift-cluster/service_catalog.yml    vars: diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index 5425f448f..50351588f 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -37,7 +37,7 @@        dest: "{{ openshift.common.config_base }}/master/master-config.yaml"        yaml_key: dnsConfig.bindAddress        yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" -    notify: restart master +    notify: restart master api    - meta: flush_handlers  - name: Configure nodes for dnsmasq diff --git a/playbooks/common/openshift-cluster/initialize_firewall.yml b/playbooks/common/openshift-cluster/initialize_firewall.yml index 7d7a427d4..f0374fbc7 100644 --- a/playbooks/common/openshift-cluster/initialize_firewall.yml +++ b/playbooks/common/openshift-cluster/initialize_firewall.yml @@ -1,7 +1,7 @@  --- -- name: Initialize host facts +- name: Initialize host firewall    hosts: oo_all_hosts    tasks: -  - name: install and configure the proper firewall settings +  - name: Install and configure the proper firewall settings      include_role:        name: os_firewall diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml index 6c12875fe..599350258 100644 --- a/playbooks/common/openshift-cluster/service_catalog.yml +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -1,5 +1,4 @@  --- -- include: evaluate_groups.yml  - name: Update Master configs    hosts: oo_masters diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml index 47fa8cdf5..192305bc8 100644 --- a/playbooks/common/openshift-etcd/scaleup.yml +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -1,4 +1,13 @@  --- +- name: Gather facts +  hosts: oo_etcd_to_config:oo_new_etcd_to_config +  roles: +  - openshift_etcd_facts +  post_tasks: +  - set_fact: +      etcd_hostname: "{{ etcd_hostname }}" +      etcd_ip: "{{ etcd_ip }}" +  - name: Configure etcd    hosts: oo_new_etcd_to_config    serial: 1 @@ -8,11 +17,11 @@    pre_tasks:    - name: Add new etcd members to cluster      command: > -      /usr/bin/etcdctl  --cert-file {{ etcd_peer_cert_file }} -                        --key-file {{ etcd_peer_key_file }} -                        --ca-file {{ etcd_peer_ca_file }} -                        -C {{ etcd_peer_url_scheme }}://{{ etcd_ca_host }}:{{ etcd_client_port }} -                        member add {{ inventory_hostname }} {{ etcd_peer_url_scheme }}://{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}:{{ etcd_peer_port }} +      /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} +                       --key-file {{ etcd_peer_key_file }} +                       --ca-file {{ etcd_peer_ca_file }} +                       -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} +                       member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}      delegate_to: "{{ etcd_ca_host }}"      register: etcd_add_check    roles: @@ -23,7 +32,6 @@      etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"      etcd_initial_cluster_state: "existing"      initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') }}" -    etcd_hostname: "{{ inventory_hostname }}"      etcd_ca_setup: False      r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"    - role: nickhammond.logrotate diff --git a/roles/calico/README.md b/roles/calico/README.md index 9b9458bfa..65f66ebfa 100644 --- a/roles/calico/README.md +++ b/roles/calico/README.md @@ -6,12 +6,6 @@ Configure Calico components for the Master host.  * Ansible 2.2 -## Warning: This Calico Integration is in Alpha - -Calico shares the etcd instance used by OpenShift, and distributes client etcd certificates to each node. -For this reason, **we do not (yet) recommend running Calico on any production-like -cluster, or using it for any purpose besides early access testing.** -  ## Installation  To install, set the following inventory configuration parameters: @@ -20,7 +14,19 @@ To install, set the following inventory configuration parameters:  * `openshift_use_openshift_sdn=False`  * `os_sdn_network_plugin_name='cni'` -## Additional Calico/Node and Felix Configuration Options +For more information, see [Calico's official OpenShift Installation Documentation](https://docs.projectcalico.org/latest/getting-started/openshift/installation#bring-your-own-etcd) + +## Improving security with BYO-etcd + +By default, Calico uses the etcd set up by OpenShift. To accomplish this, it generates and distributes client etcd certificates to each node. +Distributing these certs across the cluster in this way weakens the overall security, +so Calico should not be deployed in production in this mode. + +Instead, Calico can be installed in BYO-etcd mode, where it connects to an externally +set up etcd. For information on deploying Calico in BYO-etcd mode, see  +[Calico's official OpenShift Installation Documentation](https://docs.projectcalico.org/latest/getting-started/openshift/installation#bring-your-own-etcd) + +## Calico Configuration Options  Additional parameters that can be defined in the inventory are: diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml index 207dee068..e7a9db92f 100644 --- a/roles/calico/defaults/main.yaml +++ b/roles/calico/defaults/main.yaml @@ -5,11 +5,11 @@ cni_conf_dir: "/etc/cni/net.d/"  cni_bin_dir: "/opt/cni/bin/"  cni_url: "https://github.com/containernetworking/cni/releases/download/v0.5.2/cni-amd64-v0.5.2.tgz" -calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico" -calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.8.3/calico-ipam" +calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.10.0/calico" +calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.10.0/calico-ipam"  calico_ipv4pool_ipip: "always"  calico_ipv4pool_cidr: "192.168.0.0/16"  calico_log_dir: "/var/log/calico" -calico_node_image: "calico/node:v1.2.1" +calico_node_image: "calico/node:v2.4.1" diff --git a/roles/calico_master/defaults/main.yaml b/roles/calico_master/defaults/main.yaml index b2df0105f..d40286aba 100644 --- a/roles/calico_master/defaults/main.yaml +++ b/roles/calico_master/defaults/main.yaml @@ -3,5 +3,5 @@ kubeconfig: "{{ openshift.common.config_base }}/master/openshift-master.kubeconf  calicoctl_bin_dir: "/usr/local/bin/" -calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.1.3/calicoctl" -calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.5.4" +calico_url_calicoctl: "https://github.com/projectcalico/calicoctl/releases/download/v1.4.0/calicoctl" +calico_url_policy_controller: "quay.io/calico/kube-policy-controller:v0.7.0" diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index cb7f75398..d0363c981 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -49,7 +49,6 @@    become: yes    template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644    notify: -    - restart master      - restart master api      - restart master controllers      - restart nuage-openshift-monitor diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2 index 9fab53906..f6bccebc2 100644 --- a/roles/nuage_node/templates/vsp-openshift.j2 +++ b/roles/nuage_node/templates/vsp-openshift.j2 @@ -9,7 +9,7 @@ enterpriseName: {{ enterprise }}  # Name of the domain in which pods will reside  domainName: {{ domain }}  # Name of the VSD user in admin group -vsdUser: {{ vsduser }} +vsdUser: {{ vsd_user }}  # IP address and port number of master API server  masterApiServer: {{ api_server }}  # REST server URL  diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml index 79e59b410..27ed57703 100644 --- a/roles/openshift_cfme/defaults/main.yml +++ b/roles/openshift_cfme/defaults/main.yml @@ -35,9 +35,9 @@ openshift_cfme_nfs_server: "{{ groups.nfs.0 }}"  # --template=manageiq). If False everything UP TO 'new-app' is ran.  openshift_cfme_install_app: False  # Docker image to pull -openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods:app-latest-fine' }}" -openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods:app-latest-fine' }}" -openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods:app-latest-fine' }}" +openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" +openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}" +openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"  openshift_cfme_application_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'app-latest-fine' }}"  openshift_cfme_memcached_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'memcached-latest-fine' }}"  openshift_cfme_postgresql_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'postgresql-latest-fine' }}" diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py index 09139408c..07ec6f7ef 100644 --- a/roles/openshift_health_checker/openshift_checks/__init__.py +++ b/roles/openshift_health_checker/openshift_checks/__init__.py @@ -197,6 +197,31 @@ class OpenShiftCheck(object):          components = tuple(int(x) for x in components[:2])          return components +    def find_ansible_mount(self, path): +        """Return the mount point for path from ansible_mounts.""" + +        # reorganize list of mounts into dict by path +        mount_for_path = { +            mount['mount']: mount +            for mount +            in self.get_var('ansible_mounts') +        } + +        # NOTE: including base cases '/' and '' to ensure the loop ends +        mount_targets = set(mount_for_path.keys()) | {'/', ''} +        mount_point = path +        while mount_point not in mount_targets: +            mount_point = os.path.dirname(mount_point) + +        try: +            return mount_for_path[mount_point] +        except KeyError: +            known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path)) +            raise OpenShiftCheckException( +                'Unable to determine mount point for path "{}".\n' +                'Known mount points: {}.'.format(path, known_mounts or 'none') +            ) +  LOADER_EXCLUDES = (      "__init__.py", diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py index 39ac0e4ec..6d1dea9ce 100644 --- a/roles/openshift_health_checker/openshift_checks/disk_availability.py +++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py @@ -1,6 +1,5 @@  """Check that there is enough disk space in predefined paths.""" -import os.path  import tempfile  from openshift_checks import OpenShiftCheck, OpenShiftCheckException @@ -55,9 +54,6 @@ class DiskAvailability(OpenShiftCheck):      def run(self):          group_names = self.get_var("group_names") -        ansible_mounts = self.get_var("ansible_mounts") -        ansible_mounts = {mount['mount']: mount for mount in ansible_mounts} -          user_config = self.get_var("openshift_check_min_host_disk_gb", default={})          try:              # For backwards-compatibility, if openshift_check_min_host_disk_gb @@ -80,7 +76,7 @@ class DiskAvailability(OpenShiftCheck):          # not part of the official recommendation but present in the user          # configuration.          for path, recommendation in self.recommended_disk_space_bytes.items(): -            free_bytes = self.free_bytes(path, ansible_mounts) +            free_bytes = self.free_bytes(path)              recommended_bytes = max(recommendation.get(name, 0) for name in group_names)              config = user_config.get(path, {}) @@ -127,22 +123,17 @@ class DiskAvailability(OpenShiftCheck):          return {} -    @staticmethod -    def free_bytes(path, ansible_mounts): +    def free_bytes(self, path):          """Return the size available in path based on ansible_mounts.""" -        mount_point = path -        # arbitry value to prevent an infinite loop, in the unlike case that '/' -        # is not in ansible_mounts. -        max_depth = 32 -        while mount_point not in ansible_mounts and max_depth > 0: -            mount_point = os.path.dirname(mount_point) -            max_depth -= 1 - +        mount = self.find_ansible_mount(path)          try: -            free_bytes = ansible_mounts[mount_point]['size_available'] +            return mount['size_available']          except KeyError: -            known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(ansible_mounts)) or 'none' -            msg = 'Unable to determine disk availability for "{}". Known mount points: {}.' -            raise OpenShiftCheckException(msg.format(path, known_mounts)) - -        return free_bytes +            raise OpenShiftCheckException( +                'Unable to retrieve disk availability for "{path}".\n' +                'Ansible facts included a matching mount point for this path:\n' +                '  {mount}\n' +                'however it is missing the size_available field.\n' +                'To investigate, you can inspect the output of `ansible -m setup <host>`' +                ''.format(path=path, mount=mount) +            ) diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py index 7ae384bd7..0558ddf14 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_storage.py +++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py @@ -1,6 +1,5 @@  """Check Docker storage driver and usage."""  import json -import os.path  import re  from openshift_checks import OpenShiftCheck, OpenShiftCheckException  from openshift_checks.mixins import DockerHostMixin @@ -252,7 +251,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):                  "msg": "Specified 'max_overlay_usage_percent' is not a percentage: {}".format(threshold),              } -        mount = self.find_ansible_mount(path, self.get_var("ansible_mounts")) +        mount = self.find_ansible_mount(path)          try:              free_bytes = mount['size_available']              total_bytes = mount['size_total'] @@ -275,22 +274,3 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):              }          return {} - -    # TODO(lmeyer): migrate to base class -    @staticmethod -    def find_ansible_mount(path, ansible_mounts): -        """Return the mount point for path from ansible_mounts.""" - -        mount_for_path = {mount['mount']: mount for mount in ansible_mounts} -        mount_point = path -        while mount_point not in mount_for_path: -            if mount_point in ["/", ""]:  # "/" not in ansible_mounts??? -                break -            mount_point = os.path.dirname(mount_point) - -        try: -            return mount_for_path[mount_point] -        except KeyError: -            known_mounts = ', '.join('"{}"'.format(mount) for mount in sorted(mount_for_path)) or 'none' -            msg = 'Unable to determine mount point for path "{}". Known mount points: {}.' -            raise OpenShiftCheckException(msg.format(path, known_mounts)) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py index ae8460b7e..f4296753a 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py @@ -2,7 +2,7 @@  Ansible module for determining if the size of OpenShift image data exceeds a specified limit in an etcd cluster.  """ -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck  class EtcdImageDataSize(OpenShiftCheck): @@ -12,7 +12,7 @@ class EtcdImageDataSize(OpenShiftCheck):      tags = ["etcd"]      def run(self): -        etcd_mountpath = self._get_etcd_mountpath(self.get_var("ansible_mounts")) +        etcd_mountpath = self.find_ansible_mount("/var/lib/etcd")          etcd_avail_diskspace = etcd_mountpath["size_available"]          etcd_total_diskspace = etcd_mountpath["size_total"] @@ -68,18 +68,5 @@ class EtcdImageDataSize(OpenShiftCheck):          return {}      @staticmethod -    def _get_etcd_mountpath(ansible_mounts): -        valid_etcd_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"] - -        mount_for_path = {mnt.get("mount"): mnt for mnt in ansible_mounts} -        for path in valid_etcd_mount_paths: -            if path in mount_for_path: -                return mount_for_path[path] - -        paths = ', '.join(sorted(mount_for_path)) or 'none' -        msg = "Unable to determine a valid etcd mountpath. Paths mounted: {}.".format(paths) -        raise OpenShiftCheckException(msg) - -    @staticmethod      def _to_gigabytes(byte_size):          return float(byte_size) / 10.0**9 diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py index e55d55e91..e5d93ff3f 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py @@ -1,6 +1,6 @@  """A health check for OpenShift clusters.""" -from openshift_checks import OpenShiftCheck, OpenShiftCheckException +from openshift_checks import OpenShiftCheck  class EtcdVolume(OpenShiftCheck): @@ -11,8 +11,8 @@ class EtcdVolume(OpenShiftCheck):      # Default device usage threshold. Value should be in the range [0, 100].      default_threshold_percent = 90 -    # Where to find ectd data, higher priority first. -    supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"] +    # Where to find etcd data +    etcd_mount_path = "/var/lib/etcd"      def is_active(self):          etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or [] @@ -20,7 +20,7 @@ class EtcdVolume(OpenShiftCheck):          return super(EtcdVolume, self).is_active() and is_etcd_host      def run(self): -        mount_info = self._etcd_mount_info() +        mount_info = self.find_ansible_mount(self.etcd_mount_path)          available = mount_info["size_available"]          total = mount_info["size_total"]          used = total - available @@ -41,15 +41,3 @@ class EtcdVolume(OpenShiftCheck):              return {"failed": True, "msg": msg}          return {} - -    def _etcd_mount_info(self): -        ansible_mounts = self.get_var("ansible_mounts") -        mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts} - -        for path in self.supported_mount_paths: -            if path in mounts: -                return mounts[path] - -        paths = ', '.join(sorted(mounts)) or 'none' -        msg = "Unable to find etcd storage mount point. Paths mounted: {}.".format(paths) -        raise OpenShiftCheckException(msg) diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py index 5720eeacf..f4fd2dfed 100644 --- a/roles/openshift_health_checker/test/disk_availability_test.py +++ b/roles/openshift_health_checker/test/disk_availability_test.py @@ -20,12 +20,24 @@ def test_is_active(group_names, is_active):      assert DiskAvailability(None, task_vars).is_active() == is_active -@pytest.mark.parametrize('ansible_mounts,extra_words', [ -    ([], ['none']),  # empty ansible_mounts -    ([{'mount': '/mnt'}], ['/mnt']),  # missing relevant mount paths -    ([{'mount': '/var'}], ['/var']),  # missing size_available +@pytest.mark.parametrize('desc, ansible_mounts, expect_chunks', [ +    ( +        'empty ansible_mounts', +        [], +        ['determine mount point', 'none'], +    ), +    ( +        'missing relevant mount paths', +        [{'mount': '/mnt'}], +        ['determine mount point', '/mnt'], +    ), +    ( +        'missing size_available', +        [{'mount': '/var'}, {'mount': '/usr'}, {'mount': '/tmp'}], +        ['missing', 'size_available'], +    ),  ]) -def test_cannot_determine_available_disk(ansible_mounts, extra_words): +def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):      task_vars = dict(          group_names=['masters'],          ansible_mounts=ansible_mounts, @@ -34,8 +46,8 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):      with pytest.raises(OpenShiftCheckException) as excinfo:          DiskAvailability(fake_execute_module, task_vars).run() -    for word in 'determine disk availability'.split() + extra_words: -        assert word in str(excinfo.value) +    for chunk in expect_chunks: +        assert chunk in str(excinfo.value)  @pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [ @@ -97,7 +109,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib      assert not result.get('failed', False) -@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,extra_words', [ +@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [      (          'test with no space available',          ['masters'], @@ -164,7 +176,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib          ['0.0 GB'],      ),  ], ids=lambda argval: argval[0]) -def test_fails_with_insufficient_disk_space(name, group_names, configured_min, ansible_mounts, extra_words): +def test_fails_with_insufficient_disk_space(name, group_names, configured_min, ansible_mounts, expect_chunks):      task_vars = dict(          group_names=group_names,          openshift_check_min_host_disk_gb=configured_min, @@ -174,8 +186,8 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a      result = DiskAvailability(fake_execute_module, task_vars).run()      assert result['failed'] -    for word in 'below recommended'.split() + extra_words: -        assert word in result.get('msg', '') +    for chunk in 'below recommended'.split() + expect_chunks: +        assert chunk in result.get('msg', '')  @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [ diff --git a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py index e3d6706fa..d3aae98f2 100644 --- a/roles/openshift_health_checker/test/etcd_imagedata_size_test.py +++ b/roles/openshift_health_checker/test/etcd_imagedata_size_test.py @@ -1,7 +1,8 @@  import pytest  from collections import namedtuple -from openshift_checks.etcd_imagedata_size import EtcdImageDataSize, OpenShiftCheckException +from openshift_checks.etcd_imagedata_size import EtcdImageDataSize +from openshift_checks import OpenShiftCheckException  from etcdkeysize import check_etcd_key_size @@ -56,7 +57,7 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):      with pytest.raises(OpenShiftCheckException) as excinfo:          check.run() -    for word in 'determine valid etcd mountpath'.split() + extra_words: +    for word in ['Unable to determine mount point'] + extra_words:          assert word in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/etcd_volume_test.py b/roles/openshift_health_checker/test/etcd_volume_test.py index 0b255136e..077cea3ea 100644 --- a/roles/openshift_health_checker/test/etcd_volume_test.py +++ b/roles/openshift_health_checker/test/etcd_volume_test.py @@ -1,6 +1,7 @@  import pytest -from openshift_checks.etcd_volume import EtcdVolume, OpenShiftCheckException +from openshift_checks.etcd_volume import EtcdVolume +from openshift_checks import OpenShiftCheckException  @pytest.mark.parametrize('ansible_mounts,extra_words', [ @@ -15,7 +16,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):      with pytest.raises(OpenShiftCheckException) as excinfo:          EtcdVolume(fake_execute_module, task_vars).run() -    for word in 'Unable to find etcd storage mount point'.split() + extra_words: +    for word in ['Unable to determine mount point'] + extra_words:          assert word in str(excinfo.value) diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index 10f522b61..b96b8e29d 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -5,7 +5,6 @@      yaml_key: assetConfig.loggingPublicURL      yaml_value: "https://{{ openshift_logging_kibana_hostname }}"    notify: -  - restart master    - restart master api    - restart master controllers    tags: diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 3affdd348..723bdb0c4 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -70,7 +70,7 @@    with_items: "{{ master_api_proxy.stdout_lines | default([]) }}"  - name: Restore Master API AWS Options -  when: bool and openshift.master.cluster_method == "native" +  when: openshift.master.cluster_method == "native"        and master_api_aws.rc == 0 and        not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)    lineinfile: diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml index be1e3c3a0..5059d8d94 100644 --- a/roles/openshift_metrics/tasks/update_master_config.yaml +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -5,7 +5,6 @@      yaml_key: assetConfig.metricsPublicURL      yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics"    notify: -  - restart master    - restart master api    - restart master controllers    tags: | 
