diff options
Diffstat (limited to 'roles')
302 files changed, 2319 insertions, 3096 deletions
diff --git a/roles/calico/handlers/main.yml b/roles/calico/handlers/main.yml index 65d75cf00..53cecfcc3 100644 --- a/roles/calico/handlers/main.yml +++ b/roles/calico/handlers/main.yml @@ -5,4 +5,6 @@ - name: restart docker become: yes - systemd: name=docker state=restarted + systemd: + name: "{{ openshift.docker.service_name }}" + state: restarted diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml index 97b9762df..0847c92bc 100644 --- a/roles/contiv/tasks/netplugin.yml +++ b/roles/contiv/tasks/netplugin.yml @@ -105,7 +105,7 @@ - name: Docker | Restart docker service: - name: docker + name: "{{ openshift.docker.service_name }}" state: restarted when: docker_updated|changed diff --git a/roles/docker/README.md b/roles/docker/README.md index ea06fd41a..f25ca03cd 100644 --- a/roles/docker/README.md +++ b/roles/docker/README.md @@ -1,7 +1,7 @@ Docker ========= -Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. +Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes. Requirements ------------ @@ -11,8 +11,10 @@ Ansible 2.2 Role Variables -------------- -udevw_udevd_dir: location of systemd config for systemd-udevd.service +docker_conf_dir: location of the Docker configuration directory +docker_systemd_dir location of the systemd directory for Docker docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446) +udevw_udevd_dir: location of systemd config for systemd-udevd.service Dependencies ------------ @@ -26,6 +28,7 @@ Example Playbook roles: - role: docker docker_udev_workaround: "true" + docker_use_system_container: False License ------- diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 9ccb306fc..7f91afb37 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -2,7 +2,7 @@ - name: restart docker systemd: - name: docker + name: "{{ openshift.docker.service_name }}" state: restarted when: not docker_service_status_changed | default(false) | bool diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml index ad28cece9..cd4083572 100644 --- a/roles/docker/meta/main.yml +++ b/roles/docker/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info: - 7 dependencies: - role: os_firewall +- role: lib_openshift diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index c34700aeb..0c2b16acf 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -1,119 +1,17 @@ --- -- name: Get current installed Docker version - command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" - when: not openshift.common.is_atomic | bool - register: curr_docker_version - changed_when: false - -- name: Error out if Docker pre-installed but too old - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined - -- name: Error out if requested Docker is too old - fail: - msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." - when: docker_version is defined and docker_version | version_compare('1.9.1', '<') - -# If a docker_version was requested, sanity check that we can install or upgrade to it, and -# no downgrade is required. -- name: Fail if Docker version requested but downgrade is required - fail: - msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') - -# This involves an extremely slow migration process, users should instead run the -# Docker 1.10 upgrade playbook to accomplish this. -- name: Error out if attempting to upgrade Docker across the 1.10 boundary - fail: - msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." - when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') - -# Make sure Docker is installed, but does not update a running version. -# Docker upgrades are handled by a separate playbook. -- name: Install Docker - package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present - when: not openshift.common.is_atomic | bool - -- block: - # Extend the default Docker service unit file when using iptables-services - - name: Ensure docker.service.d directory exists - file: - path: "{{ docker_systemd_dir }}" - state: directory - - - name: Configure Docker service unit file - template: - dest: "{{ docker_systemd_dir }}/custom.conf" - src: custom.conf.j2 - when: not os_firewall_use_firewalld | default(True) | bool +# These tasks dispatch to the proper set of docker tasks based on the +# inventory:openshift_docker_use_system_container variable - include: udev_workaround.yml when: docker_udev_workaround | default(False) | bool -- stat: path=/etc/sysconfig/docker - register: docker_check - -- name: Set registry params - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^{{ item.reg_conf_var }}=.*$' - line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" - when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg - with_items: - - reg_conf_var: ADD_REGISTRY - reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" - reg_flag: --add-registry - - reg_conf_var: BLOCK_REGISTRY - reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}" - reg_flag: --block-registry - - reg_conf_var: INSECURE_REGISTRY - reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}" - reg_flag: --insecure-registry - notify: - - restart docker - -- name: Set Proxy Settings - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^{{ item.reg_conf_var }}=.*$' - line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'" - state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}" - with_items: - - reg_conf_var: HTTP_PROXY - reg_fact_val: "{{ docker_http_proxy | default('') }}" - - reg_conf_var: HTTPS_PROXY - reg_fact_val: "{{ docker_https_proxy | default('') }}" - - reg_conf_var: NO_PROXY - reg_fact_val: "{{ docker_no_proxy | default('') }}" - notify: - - restart docker - when: - - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common' - -- name: Set various Docker options - lineinfile: - dest: /etc/sysconfig/docker - regexp: '^OPTIONS=.*$' - line: "OPTIONS='\ - {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\ - {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ - {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ - {% if docker_options is defined %} {{ docker_options }}{% endif %}\ - {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" - when: docker_check.stat.isreg is defined and docker_check.stat.isreg - notify: - - restart docker - -- name: Start the Docker service - systemd: - name: docker - enabled: yes - state: started - daemon_reload: yes - register: start_result - - set_fact: - docker_service_status_changed: start_result | changed + l_use_system_container: "{{ openshift.docker.use_system_container | default(False) }}" + +- name: Use Package Docker if Requested + include: package_docker.yml + when: not l_use_system_container -- meta: flush_handlers +- name: Use System Container Docker if Requested + include: systemcontainer_docker.yml + when: l_use_system_container diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml new file mode 100644 index 000000000..10fb5772c --- /dev/null +++ b/roles/docker/tasks/package_docker.yml @@ -0,0 +1,116 @@ +--- +- name: Get current installed Docker version + command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" + when: not openshift.common.is_atomic | bool + register: curr_docker_version + changed_when: false + +- name: Error out if Docker pre-installed but too old + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined + +- name: Error out if requested Docker is too old + fail: + msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required." + when: docker_version is defined and docker_version | version_compare('1.9.1', '<') + +# If a docker_version was requested, sanity check that we can install or upgrade to it, and +# no downgrade is required. +- name: Fail if Docker version requested but downgrade is required + fail: + msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') + +# This involves an extremely slow migration process, users should instead run the +# Docker 1.10 upgrade playbook to accomplish this. +- name: Error out if attempting to upgrade Docker across the 1.10 boundary + fail: + msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed." + when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') + +# Make sure Docker is installed, but does not update a running version. +# Docker upgrades are handled by a separate playbook. +- name: Install Docker + package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present + when: not openshift.common.is_atomic | bool + +- block: + # Extend the default Docker service unit file when using iptables-services + - name: Ensure docker.service.d directory exists + file: + path: "{{ docker_systemd_dir }}" + state: directory + + - name: Configure Docker service unit file + template: + dest: "{{ docker_systemd_dir }}/custom.conf" + src: custom.conf.j2 + when: not os_firewall_use_firewalld | default(True) | bool + +- stat: path=/etc/sysconfig/docker + register: docker_check + +- name: Set registry params + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^{{ item.reg_conf_var }}=.*$' + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" + when: item.reg_fact_val != '' and docker_check.stat.isreg is defined and docker_check.stat.isreg + with_items: + - reg_conf_var: ADD_REGISTRY + reg_fact_val: "{{ docker_additional_registries | default(None, true)}}" + reg_flag: --add-registry + - reg_conf_var: BLOCK_REGISTRY + reg_fact_val: "{{ docker_blocked_registries| default(None, true) }}" + reg_flag: --block-registry + - reg_conf_var: INSECURE_REGISTRY + reg_fact_val: "{{ docker_insecure_registries| default(None, true) }}" + reg_flag: --insecure-registry + notify: + - restart docker + +- name: Set Proxy Settings + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^{{ item.reg_conf_var }}=.*$' + line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'" + state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}" + with_items: + - reg_conf_var: HTTP_PROXY + reg_fact_val: "{{ docker_http_proxy | default('') }}" + - reg_conf_var: HTTPS_PROXY + reg_fact_val: "{{ docker_https_proxy | default('') }}" + - reg_conf_var: NO_PROXY + reg_fact_val: "{{ docker_no_proxy | default('') }}" + notify: + - restart docker + when: + - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common' + +- name: Set various Docker options + lineinfile: + dest: /etc/sysconfig/docker + regexp: '^OPTIONS=.*$' + line: "OPTIONS='\ + {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\ + {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\ + {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\ + {% if docker_options is defined %} {{ docker_options }}{% endif %}\ + {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'" + when: docker_check.stat.isreg is defined and docker_check.stat.isreg + notify: + - restart docker + +- name: Start the Docker service + systemd: + name: docker + enabled: yes + state: started + daemon_reload: yes + register: start_result + +- set_fact: + docker_service_status_changed: start_result | changed + +- meta: flush_handlers diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml new file mode 100644 index 000000000..722232a9b --- /dev/null +++ b/roles/docker/tasks/systemcontainer_docker.yml @@ -0,0 +1,143 @@ +--- +# If docker_options are provided we should fail. We should not install docker and ignore +# the users configuration. NOTE: docker_options == inventory:openshift_docker_options +- name: Fail quickly if openshift_docker_options are set + assert: + that: + - docker_options is defined + - docker_options != "" + msg: | + Docker via System Container does not allow for the use of the openshift_docker_options + variable. If you want to use openshift_docker_options you will need to use the + traditional docker package install. Otherwise, comment out openshift_docker_options + in your inventory file. + +# Used to pull and install the system container +- name: Ensure atomic is installed + package: + name: atomic + state: present + when: not openshift.common.is_atomic | bool + +# At the time of writing the atomic command requires runc for it's own use. This +# task is here in the even that the atomic package ever removes the dependency. +- name: Ensure runc is installed + package: + name: runc + state: present + when: not openshift.common.is_atomic | bool + +# If we are on atomic, set http_proxy and https_proxy in /etc/atomic.conf +- block: + + - name: Add http_proxy to /etc/atomic.conf + lineinfile: + path: /etc/atomic.conf + line: "http_proxy={{ openshift.common.http_proxy | default('') }}" + when: + - openshift.common.http_proxy is defined + - openshift.common.http_proxy != '' + + - name: Add https_proxy to /etc/atomic.conf + lineinfile: + path: /etc/atomic.conf + line: "https_proxy={{ openshift.common.https_proxy | default('') }}" + when: + - openshift.common.https_proxy is defined + - openshift.common.https_proxy != '' + + when: openshift.common.is_atomic | bool + + +- block: + + - name: Set to default prepend + set_fact: + l_docker_image_prepend: "gscrivano" + + - name: Use Red Hat Registry for image when distribution is Red Hat + set_fact: + l_docker_image_prepend: "registry.access.redhat.com/openshift3" + when: ansible_distribution == 'RedHat' + + - name: Use Fedora Registry for image when distribution is Fedora + set_fact: + l_docker_image_prepend: "registry.fedoraproject.org" + when: ansible_distribution == 'Fedora' + + # For https://github.com/openshift/openshift-ansible/pull/4049#discussion_r114478504 + - name: Use a testing registry if requested + set_fact: + l_docker_image_prepend: "{{ openshift_docker_systemcontainer_image_registry_override }}" + when: + - openshift_docker_systemcontainer_image_registry_override is defined + - openshift_docker_systemcontainer_image_registry_override != "" + + - name: Set the full image name + set_fact: + l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest" + +- name: Pre-pull Container Enginer System Container image + command: "atomic pull --storage ostree {{ l_docker_image }}" + changed_when: false + +# Make sure docker is disabled Errors are ignored as docker may not +# be installed. +- name: Disable Docker + systemd: + name: docker + enabled: no + state: stopped + daemon_reload: yes + ignore_errors: True + +- name: Ensure docker.service.d directory exists + file: + path: "{{ docker_systemd_dir }}" + state: directory + +- name: Ensure /etc/docker directory exists + file: + path: "{{ docker_conf_dir }}" + state: directory + +- name: Install Container Enginer System Container + oc_atomic_container: + name: "{{ openshift.docker.service_name }}" + image: "{{ l_docker_image }}" + state: latest + values: + - "system-package=no" + +- name: Configure Container Engine Service File + template: + dest: "{{ docker_systemd_dir }}/custom.conf" + src: systemcontainercustom.conf.j2 + +# Set local versions of facts that must be in json format for daemon.json +# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson +- set_fact: + l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}" + l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}" + l_docker_additional_registries: "{{ docker_additional_registries | default([]) | to_json }}" + l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}" + +# Configure container-engine using the daemon.json file +- name: Configure Container Engine + template: + dest: "{{ docker_conf_dir }}/daemon.json" + src: daemon.json + +# Enable and start the container-engine service +- name: Start the Container Engine service + systemd: + name: "{{ openshift.docker.service_name }}" + enabled: yes + state: started + daemon_reload: yes + register: start_result + +- set_fact: + docker_service_status_changed: start_result | changed + +- meta: flush_handlers diff --git a/roles/docker/templates/daemon.json b/roles/docker/templates/daemon.json new file mode 100644 index 000000000..7ea8164b3 --- /dev/null +++ b/roles/docker/templates/daemon.json @@ -0,0 +1,66 @@ + +{ + "api-cors-header": "", + "authorization-plugins": ["rhel-push-plugin"], + "bip": "", + "bridge": "", + "cgroup-parent": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "debug": true, + "default-gateway": "", + "default-gateway-v6": "", + "default-runtime": "oci", + "containerd": "/run/containerd.sock", + "default-ulimits": {}, + "disable-legacy-registry": false, + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": ["native.cgroupdriver=systemd"], + "exec-root": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "graph": "", + "group": "", + "hosts": [], + "icc": false, + "insecure-registries": {{ l_docker_insecure_registries }}, + "ip": "0.0.0.0", + "iptables": false, + "ipv6": false, + "ip-forward": false, + "ip-masq": false, + "labels": [], + "live-restore": true, +{% if docker_log_driver is defined %} + "log-driver": "{{ docker_log_driver }}", +{%- endif %} + "log-level": "", + "log-opts": {{ l_docker_log_options }}, + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "mtu": 0, + "oom-score-adjust": -500, + "pidfile": "", + "raw-logs": false, + "registry-mirrors": [], + "runtimes": { + "oci": { + "path": "/usr/libexec/docker/docker-runc-current" + } + }, + "selinux-enabled": {{ docker_selinux_enabled|default(true) }}, + "storage-driver": "", + "storage-opts": [], + "tls": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "tlsverify": true, + "userns-remap": "", + "add-registry": {{ l_docker_additional_registries }}, + "blocked-registries": {{ l_docker_blocked_registries }}, + "userland-proxy-path": "/usr/libexec/docker/docker-proxy-current" +} diff --git a/roles/docker/templates/systemcontainercustom.conf.j2 b/roles/docker/templates/systemcontainercustom.conf.j2 new file mode 100644 index 000000000..a4fb01d2b --- /dev/null +++ b/roles/docker/templates/systemcontainercustom.conf.j2 @@ -0,0 +1,17 @@ +# {{ ansible_managed }} + +[Service] +{%- if "http_proxy" in openshift.common %} +ENVIRONMENT=HTTP_PROXY={{ docker_http_proxy }} +{%- endif -%} +{%- if "https_proxy" in openshift.common %} +ENVIRONMENT=HTTPS_PROXY={{ docker_http_proxy }} +{%- endif -%} +{%- if "no_proxy" in openshift.common %} +ENVIRONMENT=NO_PROXY={{ docker_no_proxy }} +{%- endif %} +{%- if os_firewall_use_firewalld|default(true) %} +[Unit] +Wants=iptables.service +After=iptables.service +{%- endif %} diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml index 5237ed8f2..0082ded1e 100644 --- a/roles/docker/vars/main.yml +++ b/roles/docker/vars/main.yml @@ -1,3 +1,4 @@ --- -udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d docker_systemd_dir: /etc/systemd/system/docker.service.d +docker_conf_dir: /etc/docker/ +udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 29153f4df..e45f53219 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -13,5 +13,4 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_ etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}" -etcd_data_dir: /var/lib/etcd/ etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d" diff --git a/roles/etcd/files/etcdctl.sh b/roles/etcd/files/etcdctl.sh deleted file mode 100644 index 0e324a8a9..000000000 --- a/roles/etcd/files/etcdctl.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because -# command flags are different between the two. Should work on stand -# alone etcd hosts and master + etcd hosts too because we use the peer keys. -etcdctl2() { - /usr/bin/etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://`hostname`:2379 ${@} -} - -etcdctl3() { - ETCDCTL_API=3 /usr/bin/etcdctl --cert /etc/etcd/peer.crt --key /etc/etcd/peer.key --cacert /etc/etcd/ca.crt --endpoints https://`hostname`:2379 ${@} -} diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index e0c70a181..689c07a84 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -24,3 +24,4 @@ dependencies: - service: etcd peering port: "{{ etcd_peer_port }}/tcp" - role: etcd_server_certificates +- role: etcd_common diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index c09da3b61..fa2f44609 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -10,51 +10,45 @@ package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present when: not etcd_is_containerized | bool -- name: Pull etcd container - command: docker pull {{ openshift.etcd.etcd_image }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" +- block: + - name: Pull etcd container + command: docker pull {{ openshift.etcd.etcd_image }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + + - name: Install etcd container service file + template: + dest: "/etc/systemd/system/etcd_container.service" + src: etcd.docker.service when: - etcd_is_containerized | bool - not openshift.common.is_etcd_system_container | bool -- name: Install etcd container service file - template: - dest: "/etc/systemd/system/etcd_container.service" - src: etcd.docker.service - when: - - etcd_is_containerized | bool - - not openshift.common.is_etcd_system_container | bool - - # Start secondary etcd instance for third party integrations # TODO: Determine an alternative to using thirdparty variable - -- name: Create configuration directory - file: - path: "{{ etcd_conf_dir }}" - state: directory - mode: 0700 - when: etcd_is_thirdparty | bool +- block: + - name: Create configuration directory + file: + path: "{{ etcd_conf_dir }}" + state: directory + mode: 0700 # TODO: retest with symlink to confirm it does or does not function -- name: Copy service file for etcd instance - copy: - src: /usr/lib/systemd/system/etcd.service - dest: "/etc/systemd/system/{{ etcd_service }}.service" - remote_src: True - when: etcd_is_thirdparty | bool - -- name: Create third party etcd service.d directory exists - file: - path: "{{ etcd_systemd_dir }}" - state: directory - when: etcd_is_thirdparty | bool - -- name: Configure third part etcd service unit file - template: - dest: "{{ etcd_systemd_dir }}/custom.conf" - src: custom.conf.j2 + - name: Copy service file for etcd instance + copy: + src: /usr/lib/systemd/system/etcd.service + dest: "/etc/systemd/system/{{ etcd_service }}.service" + remote_src: True + + - name: Create third party etcd service.d directory exists + file: + path: "{{ etcd_systemd_dir }}" + state: directory + + - name: Configure third part etcd service unit file + template: + dest: "{{ etcd_systemd_dir }}/custom.conf" + src: custom.conf.j2 when: etcd_is_thirdparty # TODO: this task may not be needed with Validate permissions @@ -80,28 +74,28 @@ command: systemctl daemon-reload when: etcd_is_thirdparty | bool -- name: Disable system etcd when containerized - systemd: - name: etcd - state: stopped - enabled: no - masked: yes - daemon_reload: yes - when: - - etcd_is_containerized | bool - - not openshift.common.is_etcd_system_container | bool - register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" - -- name: Install etcd container service file - template: - dest: "/etc/systemd/system/etcd_container.service" - src: etcd.docker.service - when: etcd_is_containerized | bool and not openshift.common.is_etcd_system_container | bool - -- name: Install Etcd system container - include: system_container.yml - when: etcd_is_containerized | bool and openshift.common.is_etcd_system_container | bool +- block: + - name: Disable system etcd when containerized + systemd: + name: etcd + state: stopped + enabled: no + masked: yes + daemon_reload: yes + when: not openshift.common.is_etcd_system_container | bool + register: task_result + failed_when: task_result|failed and 'could not' not in task_result.msg|lower + + - name: Install etcd container service file + template: + dest: "/etc/systemd/system/etcd_container.service" + src: etcd.docker.service + when: not openshift.common.is_etcd_system_container | bool + + - name: Install Etcd system container + include: system_container.yml + when: openshift.common.is_etcd_system_container | bool + when: etcd_is_containerized | bool - name: Validate permissions on the config dir file: @@ -126,7 +120,9 @@ enabled: yes register: start_result -- include: etcdctl.yml +- include_role: + name: etcd_common + tasks_from: etcdctl.yml when: openshift_etcd_etcdctl_profile | default(true) | bool - name: Set fact etcd_service_status_changed diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service index e4d1b57e6..c8ceaa6ba 100644 --- a/roles/etcd/templates/etcd.docker.service +++ b/roles/etcd/templates/etcd.docker.service @@ -5,9 +5,9 @@ Requires=docker.service PartOf=docker.service [Service] -EnvironmentFile=/etc/etcd/etcd.conf +EnvironmentFile={{ etcd_conf_file }} ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }} -ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --env-file=/etc/sysconfig/etcd --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} +ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }} ExecStop=/usr/bin/docker stop {{ etcd_service }} SyslogIdentifier=etcd_container Restart=always diff --git a/roles/etcd_common/README.md b/roles/etcd_common/README.md index 131a01490..d1c3a6602 100644 --- a/roles/etcd_common/README.md +++ b/roles/etcd_common/README.md @@ -1,17 +1,21 @@ etcd_common ======================== -TODO +Common resources for dependent etcd roles. E.g. default variables for: +* config directories +* certificates +* ports +* other settings -Requirements ------------- - -TODO +Or `delegated_serial_command` ansible module for executing a command on a remote node. E.g. -Role Variables --------------- +```yaml +- delegated_serial_command: + command: /usr/bin/make_database.sh arg1 arg2 + creates: /path/to/database +``` -TODO +Or etcdctl.yml playbook for installation of `etcdctl` aliases on a node (see example). Dependencies ------------ @@ -21,7 +25,22 @@ openshift-repos Example Playbook ---------------- -TODO +**Drop etcdctl aliases** + +```yaml +- include_role: + name: etcd_common + tasks_from: etcdctl +``` + +**Get access to common variables** + +```yaml +# meta.yml of etcd +... +dependencies: +- { role: etcd_common } +``` License ------- diff --git a/roles/etcd_common/defaults/main.yml b/roles/etcd_common/defaults/main.yml index c5efb0a0c..d12e6a07f 100644 --- a/roles/etcd_common/defaults/main.yml +++ b/roles/etcd_common/defaults/main.yml @@ -35,3 +35,6 @@ etcd_ip: "{{ ansible_default_ipv4.address }}" etcd_is_atomic: False etcd_is_containerized: False etcd_is_thirdparty: False + +# etcd dir vars +etcd_data_dir: /var/lib/etcd/ diff --git a/roles/etcd/tasks/etcdctl.yml b/roles/etcd_common/tasks/etcdctl.yml index 649ad23c1..6cb456677 100644 --- a/roles/etcd/tasks/etcdctl.yml +++ b/roles/etcd_common/tasks/etcdctl.yml @@ -4,9 +4,9 @@ when: not openshift.common.is_atomic | bool - name: Configure etcd profile.d alises - copy: - src: etcdctl.sh - dest: /etc/profile.d/etcdctl.sh + template: + dest: "/etc/profile.d/etcdctl.sh" + src: etcdctl.sh.j2 mode: 0755 owner: root group: root diff --git a/roles/etcd_common/templates/etcdctl.sh.j2 b/roles/etcd_common/templates/etcdctl.sh.j2 new file mode 100644 index 000000000..ac7d9c72f --- /dev/null +++ b/roles/etcd_common/templates/etcdctl.sh.j2 @@ -0,0 +1,12 @@ +#!/bin/bash +# Sets up handy aliases for etcd, need etcdctl2 and etcdctl3 because +# command flags are different between the two. Should work on stand +# alone etcd hosts and master + etcd hosts too because we use the peer keys. +etcdctl2() { + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://`hostname`:2379 ${@} + +} + +etcdctl3() { + ETCDCTL_API=3 /usr/bin/etcdctl --cert {{ etcd_peer_cert_file }} --key {{ etcd_peer_key_file }} --cacert {{ etcd_peer_ca_file }} --endpoints https://`hostname`:2379 ${@} +} diff --git a/roles/etcd_server_certificates/meta/main.yml b/roles/etcd_server_certificates/meta/main.yml index 98c913dba..b453f2bd8 100644 --- a/roles/etcd_server_certificates/meta/main.yml +++ b/roles/etcd_server_certificates/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info: - cloud - system dependencies: -- role: openshift_etcd_ca +- role: etcd_ca diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml index 94d1d18fb..c60c2115a 100644 --- a/roles/flannel/handlers/main.yml +++ b/roles/flannel/handlers/main.yml @@ -5,4 +5,6 @@ - name: restart docker become: yes - systemd: name=docker state=restarted + systemd: + name: "{{ openshift.docker.service_name }}" + state: restarted diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 4d083c4d5..7039a0cec 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -900,6 +900,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1073,7 +1080,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 48e80a7cd..ae5806137 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -886,6 +886,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1059,7 +1066,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 35168d1a3..36eb294a8 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -872,6 +872,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1045,7 +1052,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 5f7e4b8fa..bedd45922 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -872,6 +872,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1045,7 +1052,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -1960,7 +1967,7 @@ class PolicyUser(OpenShiftCLI): @property def policybindings(self): if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + results = self._get('policybindings', None) if results['returncode'] != 0: raise OpenShiftCLIError('Could not retrieve policybindings') self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index a6718d921..c6fa85f90 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -990,6 +990,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1163,7 +1170,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -2531,25 +2538,34 @@ class Registry(OpenShiftCLI): def run_ansible(params, check_mode): '''run idempotent ansible code''' + registry_options = {'images': {'value': params['images'], 'include': True}, + 'latest_images': {'value': params['latest_images'], 'include': True}, + 'labels': {'value': params['labels'], 'include': True}, + 'ports': {'value': ','.join(params['ports']), 'include': True}, + 'replicas': {'value': params['replicas'], 'include': True}, + 'selector': {'value': params['selector'], 'include': True}, + 'service_account': {'value': params['service_account'], 'include': True}, + 'mount_host': {'value': params['mount_host'], 'include': True}, + 'env_vars': {'value': params['env_vars'], 'include': False}, + 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, + 'edits': {'value': params['edits'], 'include': False}, + 'tls_key': {'value': params['tls_key'], 'include': True}, + 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, + } + + # Do not always pass the daemonset and enforce-quota parameters because they are not understood + # by old versions of oc. + # Default value is false. So, it's safe to not pass an explicit false value to oc versions which + # understand these parameters. + if params['daemonset']: + registry_options['daemonset'] = {'value': params['daemonset'], 'include': True} + if params['enforce_quota']: + registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True} + rconfig = RegistryConfig(params['name'], params['namespace'], params['kubeconfig'], - {'images': {'value': params['images'], 'include': True}, - 'latest_images': {'value': params['latest_images'], 'include': True}, - 'labels': {'value': params['labels'], 'include': True}, - 'ports': {'value': ','.join(params['ports']), 'include': True}, - 'replicas': {'value': params['replicas'], 'include': True}, - 'selector': {'value': params['selector'], 'include': True}, - 'service_account': {'value': params['service_account'], 'include': True}, - 'mount_host': {'value': params['mount_host'], 'include': True}, - 'env_vars': {'value': params['env_vars'], 'include': False}, - 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, - 'edits': {'value': params['edits'], 'include': False}, - 'enforce_quota': {'value': params['enforce_quota'], 'include': True}, - 'daemonset': {'value': params['daemonset'], 'include': True}, - 'tls_key': {'value': params['tls_key'], 'include': True}, - 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, - }) + registry_options) ocregistry = Registry(rconfig, params['debug']) diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 0e4b336fb..8a4f93372 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1015,6 +1015,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1188,7 +1195,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index a34ce351e..d81c29784 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -864,6 +864,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1037,7 +1044,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -1531,10 +1538,10 @@ class Rule(object): results = [] for rule in inc_rules: - results.append(Rule(rule['apiGroups'], - rule['attributeRestrictions'], - rule['resources'], - rule['verbs'])) + results.append(Rule(rule.get('apiGroups', ['']), + rule.get('attributeRestrictions', None), + rule.get('resources', []), + rule.get('verbs', []))) return results @@ -1633,7 +1640,7 @@ class OCClusterRole(OpenShiftCLI): @property def clusterrole(self): ''' property for clusterrole''' - if not self._clusterrole: + if self._clusterrole is None: self.get() return self._clusterrole @@ -1669,6 +1676,7 @@ class OCClusterRole(OpenShiftCLI): elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 + self.clusterrole = None return result @@ -1738,6 +1746,9 @@ class OCClusterRole(OpenShiftCLI): # Create it here api_rval = oc_clusterrole.create() + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + # return the created object api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 69dd23a0e..bdcb3f278 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -870,6 +870,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1043,7 +1050,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 70329ccfe..be1b3a01e 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -914,6 +914,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1087,7 +1094,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index bda5eebc5..4ac6e4aeb 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -881,6 +881,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1054,7 +1061,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 462e14868..b6f058340 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -854,6 +854,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1027,7 +1034,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index 8aed060bb..c094c9472 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -873,6 +873,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1046,7 +1053,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index 0d18a7afe..a76dd44c4 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -890,6 +890,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1063,7 +1070,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 0b01670c6..bdce28045 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -98,7 +98,7 @@ options: aliases: [] kind: description: - - The kind attribute of the object. e.g. dc, bc, svc, route + - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc". required: True default: None aliases: [] @@ -893,6 +893,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1066,7 +1073,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): @@ -1539,12 +1546,9 @@ class OCObject(OpenShiftCLI): # Delete ######## if state == 'absent': - # if we were passed a name, verify its not in our results - if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']): - return {'changed': False, 'state': state} - - # verify results are empty for the selector - if params['selector'] is not None and len(api_rval['results']) == 0: + # verify its not in our results + if (params['name'] is not None or params['selector'] is not None) and \ + (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 9b321b47c..aeb4e5686 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -825,6 +825,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -998,7 +1005,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 34f80ce13..f7aa8c0d2 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -882,6 +882,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1055,7 +1062,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 331f31e41..b044a47ce 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -879,6 +879,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1052,7 +1059,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 3e4601cc3..8604cc2f3 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -874,6 +874,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1047,7 +1054,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index 755ab3b02..fef48daf0 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -924,6 +924,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1097,7 +1104,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index 0c83338b0..384df0ee3 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -868,6 +868,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1041,7 +1048,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 26e52a926..443750c5d 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -914,6 +914,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1087,7 +1094,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index 440cda1b3..7537bdb5b 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -920,6 +920,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1093,7 +1100,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 5eb36ee32..03a4dd3b9 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -866,6 +866,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1039,7 +1046,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 1bc788e87..db1010694 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -866,6 +866,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1039,7 +1046,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 3009e661a..c3885c1ac 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -926,6 +926,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1099,7 +1106,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 88f295a74..5c4596c09 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -838,6 +838,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1011,7 +1018,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 9508f2157..b89215510 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -915,6 +915,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -1088,7 +1095,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/src/class/oc_adm_policy_user.py b/roles/lib_openshift/src/class/oc_adm_policy_user.py index 88fcc1ddc..37a685ebb 100644 --- a/roles/lib_openshift/src/class/oc_adm_policy_user.py +++ b/roles/lib_openshift/src/class/oc_adm_policy_user.py @@ -46,7 +46,7 @@ class PolicyUser(OpenShiftCLI): @property def policybindings(self): if self._policy_bindings is None: - results = self._get('clusterpolicybindings', None) + results = self._get('policybindings', None) if results['returncode'] != 0: raise OpenShiftCLIError('Could not retrieve policybindings') self._policy_bindings = results['results'][0]['items'][0] diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py index 720b44cdc..3c130fe28 100644 --- a/roles/lib_openshift/src/class/oc_adm_registry.py +++ b/roles/lib_openshift/src/class/oc_adm_registry.py @@ -331,25 +331,34 @@ class Registry(OpenShiftCLI): def run_ansible(params, check_mode): '''run idempotent ansible code''' + registry_options = {'images': {'value': params['images'], 'include': True}, + 'latest_images': {'value': params['latest_images'], 'include': True}, + 'labels': {'value': params['labels'], 'include': True}, + 'ports': {'value': ','.join(params['ports']), 'include': True}, + 'replicas': {'value': params['replicas'], 'include': True}, + 'selector': {'value': params['selector'], 'include': True}, + 'service_account': {'value': params['service_account'], 'include': True}, + 'mount_host': {'value': params['mount_host'], 'include': True}, + 'env_vars': {'value': params['env_vars'], 'include': False}, + 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, + 'edits': {'value': params['edits'], 'include': False}, + 'tls_key': {'value': params['tls_key'], 'include': True}, + 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, + } + + # Do not always pass the daemonset and enforce-quota parameters because they are not understood + # by old versions of oc. + # Default value is false. So, it's safe to not pass an explicit false value to oc versions which + # understand these parameters. + if params['daemonset']: + registry_options['daemonset'] = {'value': params['daemonset'], 'include': True} + if params['enforce_quota']: + registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True} + rconfig = RegistryConfig(params['name'], params['namespace'], params['kubeconfig'], - {'images': {'value': params['images'], 'include': True}, - 'latest_images': {'value': params['latest_images'], 'include': True}, - 'labels': {'value': params['labels'], 'include': True}, - 'ports': {'value': ','.join(params['ports']), 'include': True}, - 'replicas': {'value': params['replicas'], 'include': True}, - 'selector': {'value': params['selector'], 'include': True}, - 'service_account': {'value': params['service_account'], 'include': True}, - 'mount_host': {'value': params['mount_host'], 'include': True}, - 'env_vars': {'value': params['env_vars'], 'include': False}, - 'volume_mounts': {'value': params['volume_mounts'], 'include': False}, - 'edits': {'value': params['edits'], 'include': False}, - 'enforce_quota': {'value': params['enforce_quota'], 'include': True}, - 'daemonset': {'value': params['daemonset'], 'include': True}, - 'tls_key': {'value': params['tls_key'], 'include': True}, - 'tls_certificate': {'value': params['tls_certificate'], 'include': True}, - }) + registry_options) ocregistry = Registry(rconfig, params['debug']) diff --git a/roles/lib_openshift/src/class/oc_clusterrole.py b/roles/lib_openshift/src/class/oc_clusterrole.py index 1d3d977db..ae6795446 100644 --- a/roles/lib_openshift/src/class/oc_clusterrole.py +++ b/roles/lib_openshift/src/class/oc_clusterrole.py @@ -22,7 +22,7 @@ class OCClusterRole(OpenShiftCLI): @property def clusterrole(self): ''' property for clusterrole''' - if not self._clusterrole: + if self._clusterrole is None: self.get() return self._clusterrole @@ -58,6 +58,7 @@ class OCClusterRole(OpenShiftCLI): elif 'clusterrole "{}" not found'.format(self.name) in result['stderr']: result['returncode'] = 0 + self.clusterrole = None return result @@ -127,6 +128,9 @@ class OCClusterRole(OpenShiftCLI): # Create it here api_rval = oc_clusterrole.create() + if api_rval['returncode'] != 0: + return {'failed': True, 'msg': api_rval} + # return the created object api_rval = oc_clusterrole.get() diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py index 667b98eac..6f0da3d5c 100644 --- a/roles/lib_openshift/src/class/oc_obj.py +++ b/roles/lib_openshift/src/class/oc_obj.py @@ -115,12 +115,9 @@ class OCObject(OpenShiftCLI): # Delete ######## if state == 'absent': - # if we were passed a name, verify its not in our results - if params['name'] is not None and not Utils.exists(api_rval['results'], params['name']): - return {'changed': False, 'state': state} - - # verify results are empty for the selector - if params['selector'] is not None and len(api_rval['results']) == 0: + # verify its not in our results + if (params['name'] is not None or params['selector'] is not None) and \ + (len(api_rval['results']) == 0 or len(api_rval['results'][0].get('items', [])) == 0): return {'changed': False, 'state': state} if check_mode: diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj index e44843eb3..4ff912b2d 100644 --- a/roles/lib_openshift/src/doc/obj +++ b/roles/lib_openshift/src/doc/obj @@ -47,7 +47,7 @@ options: aliases: [] kind: description: - - The kind attribute of the object. e.g. dc, bc, svc, route + - The kind attribute of the object. e.g. dc, bc, svc, route. May be a comma-separated list, e.g. "dc,po,svc". required: True default: None aliases: [] diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 1868b1420..2bf795e25 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -76,6 +76,13 @@ class OpenShiftCLI(object): def _replace(self, fname, force=False): '''replace the current object with oc replace''' + # We are removing the 'resourceVersion' to handle + # a race condition when modifying oc objects + yed = Yedit(fname) + results = yed.delete('metadata.resourceVersion') + if results[0]: + yed.write() + cmd = ['replace', '-f', fname] if force: cmd.append('--force') @@ -249,7 +256,7 @@ class OpenShiftCLI(object): stdout, stderr = proc.communicate(input_data) - return proc.returncode, stdout.decode(), stderr.decode() + return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): diff --git a/roles/lib_openshift/src/lib/rule.py b/roles/lib_openshift/src/lib/rule.py index 4590dcf90..fe5ed9723 100644 --- a/roles/lib_openshift/src/lib/rule.py +++ b/roles/lib_openshift/src/lib/rule.py @@ -136,9 +136,9 @@ class Rule(object): results = [] for rule in inc_rules: - results.append(Rule(rule['apiGroups'], - rule['attributeRestrictions'], - rule['resources'], - rule['verbs'])) + results.append(Rule(rule.get('apiGroups', ['']), + rule.get('attributeRestrictions', None), + rule.get('resources', []), + rule.get('verbs', []))) return results diff --git a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py index 6990a11a8..f350bd25d 100644 --- a/roles/lib_openshift/src/test/integration/filter_plugins/filters.py +++ b/roles/lib_openshift/src/test/integration/filter_plugins/filters.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in testing ''' diff --git a/roles/lib_openshift/src/test/integration/oc_label.yml b/roles/lib_openshift/src/test/integration/oc_label.yml index b4e721407..22cf687c5 100755 --- a/roles/lib_openshift/src/test/integration/oc_label.yml +++ b/roles/lib_openshift/src/test/integration/oc_label.yml @@ -15,7 +15,7 @@ - name: ensure needed vars are defined fail: msg: "{{ item }} not defined" - when: "{{ item }} is not defined" + when: item is not defined with_items: - cli_master_test # ansible inventory instance to run playbook against diff --git a/roles/lib_openshift/src/test/integration/oc_user.yml b/roles/lib_openshift/src/test/integration/oc_user.yml index ad1f9d188..9b4290052 100755 --- a/roles/lib_openshift/src/test/integration/oc_user.yml +++ b/roles/lib_openshift/src/test/integration/oc_user.yml @@ -14,7 +14,7 @@ - name: ensure needed vars are defined fail: msg: "{{ item }} no defined" - when: "{{ item}} is not defined" + when: item is not defined with_items: - cli_master_test # ansible inventory instance to run playbook against diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py index bab36fddc..97cf86170 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py @@ -205,10 +205,11 @@ class RegistryTest(unittest.TestCase): } ]}''' + @mock.patch('oc_adm_registry.locate_oc_binary') @mock.patch('oc_adm_registry.Utils._write') @mock.patch('oc_adm_registry.Utils.create_tmpfile_copy') @mock.patch('oc_adm_registry.Registry._run') - def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): + def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary): ''' Testing state present ''' params = {'state': 'present', 'debug': False, @@ -240,10 +241,9 @@ class RegistryTest(unittest.TestCase): (0, '', ''), ] - mock_tmpfile_copy.side_effect = [ - '/tmp/mocked_kubeconfig', - '/tmp/mocked_kubeconfig', - ] + mock_tmpfile_copy.return_value = '/tmp/mocked_kubeconfig' + + mock_oc_binary.return_value = 'oc' results = Registry.run_ansible(params, False) @@ -254,7 +254,7 @@ class RegistryTest(unittest.TestCase): mock_cmd.assert_has_calls([ mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None), - mock.call(['oc', 'adm', 'registry', '--daemonset=False', '--enforce-quota=False', + mock.call(['oc', 'adm', 'registry', '--ports=5000', '--replicas=1', '--selector=type=infra', '--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None), mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None), diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py index 51393dbaf..5481ac623 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py +++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py @@ -286,10 +286,11 @@ class RouterTest(unittest.TestCase): ] }''' + @mock.patch('oc_adm_router.locate_oc_binary') @mock.patch('oc_adm_router.Utils._write') @mock.patch('oc_adm_router.Utils.create_tmpfile_copy') @mock.patch('oc_adm_router.Router._run') - def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write): + def test_state_present(self, mock_cmd, mock_tmpfile_copy, mock_write, mock_oc_binary): ''' Testing a create ''' params = {'state': 'present', 'debug': False, @@ -345,6 +346,10 @@ class RouterTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc', + ] + results = Router.run_ansible(params, False) self.assertTrue(results['changed']) diff --git a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py index da326742f..b19a5a880 100755 --- a/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py +++ b/roles/lib_openshift/src/test/unit/test_oc_objectvalidator.py @@ -25,9 +25,10 @@ class OCObjectValidatorTest(unittest.TestCase): maxDiff = None + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_no_data(self, mock_cmd, mock_tmpfile_copy): + def test_no_data(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when both all objects are empty ''' # Arrange @@ -62,6 +63,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc', + ] + # Act results = OCObjectValidator.run_ansible(params) @@ -76,9 +81,10 @@ class OCObjectValidatorTest(unittest.TestCase): mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None), ]) + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_error_code(self, mock_cmd, mock_tmpfile_copy): + def test_error_code(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when we fail to get objects ''' # Arrange @@ -98,6 +104,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc' + ] + error_results = { 'returncode': 1, 'stderr': 'Error.', @@ -120,9 +130,10 @@ class OCObjectValidatorTest(unittest.TestCase): mock.call(['oc', 'get', 'hostsubnet', '-o', 'json', '-n', 'default'], None), ]) + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_valid_both(self, mock_cmd, mock_tmpfile_copy): + def test_valid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when both all objects are valid ''' # Arrange @@ -427,6 +438,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc' + ] + # Act results = OCObjectValidator.run_ansible(params) @@ -441,9 +456,10 @@ class OCObjectValidatorTest(unittest.TestCase): mock.call(['oc', 'get', 'namespace', '-o', 'json', '-n', 'default'], None), ]) + @mock.patch('oc_objectvalidator.locate_oc_binary') @mock.patch('oc_objectvalidator.Utils.create_tmpfile_copy') @mock.patch('oc_objectvalidator.OCObjectValidator._run') - def test_invalid_both(self, mock_cmd, mock_tmpfile_copy): + def test_invalid_both(self, mock_cmd, mock_tmpfile_copy, mock_oc_binary): ''' Testing when all objects are invalid ''' # Arrange @@ -886,6 +902,10 @@ class OCObjectValidatorTest(unittest.TestCase): '/tmp/mocked_kubeconfig', ] + mock_oc_binary.side_effect = [ + 'oc' + ] + # Act results = OCObjectValidator.run_ansible(params) diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index 3b17d9ed6..c7b906949 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -95,7 +95,7 @@ {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} --certificate-authority {{ legacy_ca_certificate }} {% endfor %} - --hostnames={{ openshift.common.all_hostnames | join(',') }} + --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }} --master={{ openshift.master.api_url }} --public-master={{ openshift.master.public_api_url }} --cert-dir={{ openshift_ca_config_dir }} diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py index 5f102e960..a2bc9ecdb 100644 --- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py +++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 """ Custom filters for use in openshift-ansible """ @@ -35,7 +34,7 @@ Example playbook usage: become: no run_once: yes delegate_to: localhost - when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + when: openshift_certificate_expiry_save_json_results|bool copy: content: "{{ hostvars|oo_cert_expiry_results_to_json() }}" dest: "{{ openshift_certificate_expiry_json_results_path }}" diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py index c204b5341..0242f5b43 100644 --- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py +++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py @@ -135,7 +135,7 @@ platforms missing the Python OpenSSL library. continue elif l.startswith('Subject:'): - # O=system:nodes, CN=system:node:m01.example.com + # O = system:nodes, CN = system:node:m01.example.com self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1]) def get_serial_number(self): @@ -202,7 +202,7 @@ object""" """ self.subjects = [] for s in subject_string.split(', '): - name, _, value = s.partition('=') + name, _, value = s.partition(' = ') self.subjects.append((name, value)) def get_components(self): diff --git a/roles/openshift_certificate_expiry/tasks/main.yml b/roles/openshift_certificate_expiry/tasks/main.yml index 139d5de6e..b5234bd1e 100644 --- a/roles/openshift_certificate_expiry/tasks/main.yml +++ b/roles/openshift_certificate_expiry/tasks/main.yml @@ -13,12 +13,12 @@ src: cert-expiry-table.html.j2 dest: "{{ openshift_certificate_expiry_html_report_path }}" delegate_to: localhost - when: "{{ openshift_certificate_expiry_generate_html_report|bool }}" + when: openshift_certificate_expiry_generate_html_report|bool - name: Generate the result JSON string run_once: yes set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}" - when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + when: openshift_certificate_expiry_save_json_results|bool - name: Generate results JSON file become: no @@ -27,4 +27,4 @@ src: save_json_results.j2 dest: "{{ openshift_certificate_expiry_json_results_path }}" delegate_to: localhost - when: "{{ openshift_certificate_expiry_save_json_results|bool }}" + when: openshift_certificate_expiry_save_json_results|bool diff --git a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py index ccdd48fa8..8a521a765 100644 --- a/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py +++ b/roles/openshift_certificate_expiry/test/test_fakeopensslclasses.py @@ -17,7 +17,8 @@ from openshift_cert_expiry import FakeOpenSSLCertificate # noqa: E402 @pytest.fixture(scope='module') def fake_valid_cert(valid_cert): - cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text'] + cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text', + '-nameopt', 'oneline'] cert = subprocess.check_output(cmd) return FakeOpenSSLCertificate(cert.decode('utf8')) diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py index 4ed3e1f01..57ac16602 100644 --- a/roles/openshift_cli/library/openshift_container_binary_sync.py +++ b/roles/openshift_cli/library/openshift_container_binary_sync.py @@ -1,8 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 # pylint: disable=missing-docstring,invalid-name -# import random import tempfile diff --git a/roles/openshift_cloud_provider/tasks/openstack.yml b/roles/openshift_cloud_provider/tasks/openstack.yml index f22dd4520..5788e6d74 100644 --- a/roles/openshift_cloud_provider/tasks/openstack.yml +++ b/roles/openshift_cloud_provider/tasks/openstack.yml @@ -7,4 +7,4 @@ template: dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf" src: openstack.conf.j2 - when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)" + when: openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined) diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml index 049ceffe0..350512452 100644 --- a/roles/openshift_docker_facts/tasks/main.yml +++ b/roles/openshift_docker_facts/tasks/main.yml @@ -16,6 +16,7 @@ disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}" hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}" hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}" + use_system_container: "{{ openshift_docker_use_system_container | default(False) }}" - set_fact: docker_additional_registries: "{{ openshift.docker.additional_registries diff --git a/roles/openshift_etcd_ca/tasks/main.yml b/roles/openshift_etcd_ca/tasks/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/roles/openshift_etcd_ca/tasks/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh index 0f2bec6d3..c7e51bbfc 100755 --- a/roles/openshift_examples/examples-sync.sh +++ b/roles/openshift_examples/examples-sync.sh @@ -6,7 +6,7 @@ # This script should be run from openshift-ansible/roles/openshift_examples XPAAS_VERSION=ose-v1.3.6 -ORIGIN_VERSION=${1:-v1.6} +ORIGIN_VERSION=${1:-v3.6} RHAMP_TAG=1.0.0.GA RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION} diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest index 536385712..08751d131 120000 --- a/roles/openshift_examples/files/examples/latest +++ b/roles/openshift_examples/files/examples/latest @@ -1 +1 @@ -v1.6
\ No newline at end of file +v3.6
\ No newline at end of file diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json index 1a90a9409..a81dbb654 100644 --- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json @@ -800,7 +800,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json index eb94c3bb4..2ed0efe1e 100644 --- a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json @@ -707,7 +707,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json index fa31f7f61..a2b59c2d3 100644 --- a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json +++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dotnet-pgsql-persistent.json @@ -19,6 +19,17 @@ }, "objects": [ { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}" + }, + "stringData": { + "database-password": "${DATABASE_PASSWORD}", + "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + } + }, + { "kind": "Service", "apiVersion": "v1", "metadata": { @@ -209,7 +220,12 @@ "env": [ { "name": "ConnectionString", - "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "connect-string" + } + } } ], "resources": { @@ -373,7 +389,12 @@ }, { "name": "POSTGRESQL_PASSWORD", - "value": "${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "database-password" + } + } }, { "name": "POSTGRESQL_DATABASE", diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml index 14bdd1dca..14bdd1dca 100644 --- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-app-example.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-app-example.yaml diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml index 709d8d976..709d8d976 100644 --- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-pv-example.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-pv-example.yaml diff --git a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml index 4f25a9c8f..4f25a9c8f 100644 --- a/roles/openshift_examples/files/examples/v1.6/cfme-templates/cfme-template.yaml +++ b/roles/openshift_examples/files/examples/v3.6/cfme-templates/cfme-template.yaml diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md index a36d7ba7d..a36d7ba7d 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/README.md +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/README.md diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json index f347f1f9f..f347f1f9f 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json index 6ed744777..6ed744777 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mariadb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mariadb-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json index 97a8abf6d..97a8abf6d 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json index 0656219fb..0656219fb 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mongodb-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mongodb-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json index d60b4647d..d60b4647d 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json index c2bfa40fd..c2bfa40fd 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/mysql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/mysql-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json index 7a16e742a..7a16e742a 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json index 242212d6f..242212d6f 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/postgresql-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/postgresql-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json index e9af50937..e9af50937 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json index aa27578a9..aa27578a9 100644 --- a/roles/openshift_examples/files/examples/v1.6/db-templates/redis-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/db-templates/redis-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json index 857ffa980..857ffa980 100644 --- a/roles/openshift_examples/files/examples/v1.6/image-streams/dotnet_imagestreams.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/dotnet_imagestreams.json diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json index 1a90a9409..a81dbb654 100644 --- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-centos7.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json @@ -800,7 +800,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json index eb94c3bb4..2ed0efe1e 100644 --- a/roles/openshift_examples/files/examples/v1.6/image-streams/image-streams-rhel7.json +++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json @@ -707,7 +707,7 @@ "openshift.io/display-name": "Jenkins 1.X", "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.", "iconClass": "icon-jenkins", - "tags": "jenkins", + "tags": "hidden,jenkins", "version": "1.x" }, "from": { diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md index f48d8d4a8..f48d8d4a8 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/README.md +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/README.md diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml index 34f5fcbcc..34f5fcbcc 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/apicast-gateway-template.yml +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json index eb3d296be..eb3d296be 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json index da2454d2e..da2454d2e 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/cakephp-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/cakephp-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json index 81ae63416..81ae63416 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json index 7a285dba8..7a285dba8 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dancer-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json index 9f982c286..9f982c286 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json index 7bee85ddd..7bee85ddd 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/django-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json index a09d71a00..a09d71a00 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-example.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-example.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json index fa31f7f61..a2b59c2d3 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/dotnet-pgsql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dotnet-pgsql-persistent.json @@ -19,6 +19,17 @@ }, "objects": [ { + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}" + }, + "stringData": { + "database-password": "${DATABASE_PASSWORD}", + "connect-string": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + } + }, + { "kind": "Service", "apiVersion": "v1", "metadata": { @@ -209,7 +220,12 @@ "env": [ { "name": "ConnectionString", - "value": "Host=${DATABASE_SERVICE_NAME};Database=${DATABASE_NAME};Username=${DATABASE_USER};Password=${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "connect-string" + } + } } ], "resources": { @@ -373,7 +389,12 @@ }, { "name": "POSTGRESQL_PASSWORD", - "value": "${DATABASE_PASSWORD}" + "valueFrom": { + "secretKeyRef": { + "name": "${NAME}", + "key": "database-password" + } + } }, { "name": "POSTGRESQL_DATABASE", diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json index 264e4b2de..264e4b2de 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-ephemeral-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-ephemeral-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json index b47bdf353..b47bdf353 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/jenkins-persistent-template.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/jenkins-persistent-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json index 6ee999cb1..6ee999cb1 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json index 5c177a7e0..5c177a7e0 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/nodejs-mongodb.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json index b400cfdb3..b400cfdb3 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json index fa67412ff..fa67412ff 100644 --- a/roles/openshift_examples/files/examples/v1.6/quickstart-templates/rails-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/rails-postgresql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json index 9d99973be..9d99973be 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/fis-image-streams.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/fis-image-streams.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json index 049f3f884..049f3f884 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-streams/jboss-image-streams.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-streams/jboss-image-streams.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json index ab35afead..ab35afead 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-basic.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-basic.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json index c12f06dec..c12f06dec 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent-ssl.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent-ssl.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json index 897ce0395..897ce0395 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json index 97d110286..97d110286 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/amq62-ssl.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/amq62-ssl.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json index 56e76016f..56e76016f 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-basic.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-basic.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json index 639ac2e11..639ac2e11 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-https.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-https.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json index 22ca3f0a0..22ca3f0a0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json index e1a585d24..e1a585d24 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json index 12720eb19..12720eb19 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json index da8015fb0..da8015fb0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datagrid65-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datagrid65-postgresql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json index 7d64dac98..7d64dac98 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json index 1e7c03b99..1e7c03b99 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-extensions-support-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-extensions-support-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json index 07f926ff3..07f926ff3 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/datavirt63-secure-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/datavirt63-secure-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json index 754a3b4c0..754a3b4c0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json index 8be4ac90b..8be4ac90b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json index bf9047599..bf9047599 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver62-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver62-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json index 51e667e02..51e667e02 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json index c5f0d006a..c5f0d006a 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json index 3db0e4c84..3db0e4c84 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/decisionserver63-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/decisionserver63-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json index 72dbb4302..72dbb4302 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json index 9dd847451..9dd847451 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json index 7b1800b7b..7b1800b7b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json index 31716d84c..31716d84c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json index 212431056..212431056 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json index 13fbbdd93..13fbbdd93 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json index 69fdec206..69fdec206 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json index 2bd3c249f..2bd3c249f 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json index 31f245950..31f245950 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json index eac964697..eac964697 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json index 09023be71..09023be71 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap64-sso-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap64-sso-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json index f08cdf2f9..f08cdf2f9 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json index 3ca9e9fab..3ca9e9fab 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-amq-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-amq-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json index 83b4d5b24..83b4d5b24 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json index 1292442a4..1292442a4 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json index 99db77d58..99db77d58 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json index c8150c231..c8150c231 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json index f8e5c2b04..f8e5c2b04 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json index 1edeb62e7..1edeb62e7 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json index d11df06ee..d11df06ee 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json index 6b7f6d707..6b7f6d707 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json index 811602220..811602220 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/eap70-sso-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/eap70-sso-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json index 413a6de87..413a6de87 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json index 610ea9441..610ea9441 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json index 6ef9d6e4c..6ef9d6e4c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json index 9b48f8ae7..9b48f8ae7 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json index 30af703ce..30af703ce 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json index c2843af63..c2843af63 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json index b8372f374..b8372f374 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json index cd5bb9fa4..cd5bb9fa4 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat7-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json index cb1e49d29..cb1e49d29 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json index 21d5662c7..21d5662c7 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-https-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-https-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json index 34657d826..34657d826 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json index 974cfaddb..974cfaddb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mongodb-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json index 7a8231cc5..7a8231cc5 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json index cda21f237..cda21f237 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json index 4dfc98015..4dfc98015 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json index f6c85668c..f6c85668c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/jws30-tomcat8-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json index cd0bec3c1..cd0bec3c1 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-amq-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-amq-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json index 2ecce08a9..2ecce08a9 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-log-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-log-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json index d80939efb..d80939efb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-camel-rest-sql-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-camel-rest-sql-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json index f99099868..f99099868 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/karaf2-cxf-rest-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/karaf2-cxf-rest-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json index 143e16756..143e16756 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/openjdk18-web-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/openjdk18-web-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json index 1dea463ac..1dea463ac 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json index 42264585b..42264585b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json index f6d0c99ed..f6d0c99ed 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json index 41c726cf0..41c726cf0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-amq-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-amq-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json index 170c919cb..170c919cb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-basic-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-basic-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json index 89d0db1a6..89d0db1a6 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json index 26cab29f8..26cab29f8 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-mysql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-mysql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json index 32a512829..32a512829 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-persistent-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json index 55e2199bb..55e2199bb 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/processserver63-postgresql-s2i.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/processserver63-postgresql-s2i.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json index 8b3cd6ed0..8b3cd6ed0 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-amq-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-amq-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json index bc5bbad22..bc5bbad22 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-config-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-config-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json index e54fa0d59..e54fa0d59 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-drools-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-drools-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json index 20ba97dac..20ba97dac 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-infinispan-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-infinispan-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json index 555647fab..555647fab 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-rest-sql-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-rest-sql-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json index cf9a4e903..cf9a4e903 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-teiid-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-teiid-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json index c78a96f7c..c78a96f7c 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json index 620425902..620425902 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-camel-xml-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-camel-xml-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json index 15cfc93fd..15cfc93fd 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxrs-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json index c70ee7726..c70ee7726 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/spring-boot-cxf-jaxws-template.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/spring-boot-cxf-jaxws-template.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json index fb0578a67..fb0578a67 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-https.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-https.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json index dcbb24bf1..dcbb24bf1 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json index 1768f7a1b..1768f7a1b 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-mysql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-mysql.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json index 4c2f81f2e..4c2f81f2e 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql-persistent.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql-persistent.json diff --git a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json index d8402ef72..d8402ef72 100644 --- a/roles/openshift_examples/files/examples/v1.6/xpaas-templates/sso70-postgresql.json +++ b/roles/openshift_examples/files/examples/v3.6/xpaas-templates/sso70-postgresql.json diff --git a/roles/openshift_excluder/tasks/verify_excluder.yml b/roles/openshift_excluder/tasks/verify_excluder.yml new file mode 100644 index 000000000..24a05d56e --- /dev/null +++ b/roles/openshift_excluder/tasks/verify_excluder.yml @@ -0,0 +1,29 @@ +--- +# input variables: +# - repoquery_cmd +# - excluder +# - openshift_upgrade_target +- block: + - name: Get available excluder version + command: > + {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}" + register: excluder_version + failed_when: false + changed_when: false + + - name: "{{ excluder }} version detected" + debug: + msg: "{{ excluder }}: {{ excluder_version.stdout }}" + + - name: Printing upgrade target version + debug: + msg: "{{ openshift_upgrade_target }}" + + - name: Check the available {{ excluder }} version is at most of the upgrade target version + fail: + msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version" + when: + - "{{ excluder_version.stdout != '' }}" + - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}" + when: + - not openshift.common.is_atomic | bool diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml new file mode 100644 index 000000000..6ea2130ac --- /dev/null +++ b/roles/openshift_excluder/tasks/verify_upgrade.yml @@ -0,0 +1,15 @@ +--- +# input variables +# - repoquery_cmd +# - openshift_upgrade_target +- include: init.yml + +- include: verify_excluder.yml + vars: + excluder: "{{ openshift.common.service_type }}-docker-excluder" + when: docker_excluder_on + +- include: verify_excluder.yml + vars: + excluder: "{{ openshift.common.service_type }}-excluder" + when: openshift_excluder_on diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index 00603f4fa..4cb5418c6 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -6,7 +6,7 @@ - name: Determine if growpart is installed command: "rpm -q cloud-utils-growpart" register: has_growpart - failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout" + failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout changed_when: false when: openshift.common.is_containerized | bool diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index adeb85c3f..914e46c05 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -1,7 +1,6 @@ #!/usr/bin/python # pylint: disable=too-many-lines # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 # Reason: Disable pylint too-many-lines because we don't want to split up this file. # Status: Permanently disabled to keep this module as self-contained as possible. @@ -911,7 +910,7 @@ def set_version_facts_if_unset(facts): version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0') version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0') version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0') - version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') or version >= LooseVersion('1.6.0') + version_gte_3_6 = version >= LooseVersion('3.6.0') else: version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905') version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1') @@ -919,25 +918,26 @@ def set_version_facts_if_unset(facts): version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0') version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0') version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0') - version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0') + version_gte_3_6 = version >= LooseVersion('3.6.0') else: + # 'Latest' version is set to True, 'Next' versions set to False version_gte_3_1_or_1_1 = True version_gte_3_1_1_or_1_1_1 = True version_gte_3_2_or_1_2 = True version_gte_3_3_or_1_3 = True version_gte_3_4_or_1_4 = True version_gte_3_5_or_1_5 = True - version_gte_3_6_or_1_6 = False + version_gte_3_6 = True facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1 facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1 facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2 facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3 facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4 facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5 - facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6 + facts['common']['version_gte_3_6'] = version_gte_3_6 - if version_gte_3_6_or_1_6: - examples_content_version = 'v1.6' + if version_gte_3_6: + examples_content_version = 'v3.6' elif version_gte_3_5_or_1_5: examples_content_version = 'v1.5' elif version_gte_3_4_or_1_4: @@ -1791,6 +1791,12 @@ def set_container_facts_if_unset(facts): deployer_image = 'openshift/origin-deployer' facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') + # If openshift_docker_use_system_container is set and is True .... + if 'use_system_container' in list(facts['docker'].keys()): + if facts['docker']['use_system_container']: + # ... set the service name to container-engine + facts['docker']['service_name'] = 'container-engine' + if 'is_containerized' not in facts['common']: facts['common']['is_containerized'] = facts['common']['is_atomic'] if 'cli_image' not in facts['common']: @@ -1910,14 +1916,16 @@ class OpenShiftFacts(object): ) self.role = role + # Collect system facts and preface each fact with 'ansible_'. try: - # ansible-2.1 # pylint: disable=too-many-function-args,invalid-name self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405 + additional_facts = {} for (k, v) in self.system_facts.items(): - self.system_facts["ansible_%s" % k.replace('-', '_')] = v + additional_facts["ansible_%s" % k.replace('-', '_')] = v + self.system_facts.update(additional_facts) except UnboundLocalError: - # ansible-2.2 + # ansible-2.2,2.3 self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405 self.facts = self.generate_facts(local_facts, @@ -2071,6 +2079,7 @@ class OpenShiftFacts(object): hosted_registry_insecure = get_hosted_registry_insecure() if hosted_registry_insecure is not None: docker['hosted_registry_insecure'] = hosted_registry_insecure + docker['service_name'] = 'docker' defaults['docker'] = docker if 'clock' in roles: @@ -2158,7 +2167,9 @@ class OpenShiftFacts(object): glusterfs=dict( endpoints='glusterfs-registry-endpoints', path='glusterfs-registry-volume', - readOnly=False), + readOnly=False, + swap=False, + swapcopy=True), host=None, access=dict( modes=['ReadWriteMany'] diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py index 208e81048..7bce7f107 100644 --- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py +++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py @@ -1,4 +1,3 @@ -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Ansible callback plugin. ''' diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py index a46589443..4460ec324 100755 --- a/roles/openshift_health_checker/library/aos_version.py +++ b/roles/openshift_health_checker/library/aos_version.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Ansible module for yum-based systems determining if multiple releases of an OpenShift package are available, and if the release requested diff --git a/roles/openshift_health_checker/library/check_yum_update.py b/roles/openshift_health_checker/library/check_yum_update.py index 630ebc848..433795b67 100755 --- a/roles/openshift_health_checker/library/check_yum_update.py +++ b/roles/openshift_health_checker/library/check_yum_update.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Ansible module to test whether a yum update or install will succeed, without actually performing it or running yum. diff --git a/roles/openshift_hosted/README.md b/roles/openshift_hosted/README.md index 6d576df71..3e5d7f860 100644 --- a/roles/openshift_hosted/README.md +++ b/roles/openshift_hosted/README.md @@ -28,6 +28,14 @@ From this role: | openshift_hosted_registry_selector | region=infra | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. | | openshift_hosted_registry_cert_expire_days | `730` (2 years) | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later. | +If you specify `openshift_hosted_registry_kind=glusterfs`, the following +variables also control configuration behavior: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | +| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, also copy the current contents of the registry volume | + Dependencies ------------ diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml index 596b36239..e7e62e5e4 100644 --- a/roles/openshift_hosted/defaults/main.yml +++ b/roles/openshift_hosted/defaults/main.yml @@ -24,9 +24,9 @@ openshift_hosted_routers: ports: - 80:80 - 443:443 - certificates: "{{ openshift_hosted_router_certificates | default({}) }}" + certificate: "{{ openshift_hosted_router_certificate | default({}) }}" -openshift_hosted_router_certificates: {} +openshift_hosted_router_certificate: {} openshift_hosted_registry_cert_expire_days: 730 openshift_hosted_router_create_certificate: False diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml index 6e691c26f..751489958 100644 --- a/roles/openshift_hosted/tasks/registry/registry.yml +++ b/roles/openshift_hosted/tasks/registry/registry.yml @@ -61,7 +61,7 @@ name: "{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" -- name: Grant the registry serivce account access to the appropriate scc +- name: Grant the registry service account access to the appropriate scc oc_adm_policy_user: user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}" namespace: "{{ openshift_hosted_registry_namespace }}" @@ -126,4 +126,4 @@ - include: storage/glusterfs.yml when: - - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' + - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap diff --git a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml index b18b24266..e6bb196b8 100644 --- a/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/registry/storage/glusterfs.yml @@ -1,10 +1,18 @@ --- +- name: Get registry DeploymentConfig + oc_obj: + namespace: "{{ openshift_hosted_registry_namespace }}" + state: list + kind: dc + name: "{{ openshift_hosted_registry_name }}" + register: registry_dc + - name: Wait for registry pods oc_obj: namespace: "{{ openshift_hosted_registry_namespace }}" state: list kind: pod - selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}" + selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}" register: registry_pods until: - "registry_pods.results.results[0]['items'] | count > 0" @@ -38,6 +46,39 @@ mode: "2775" recurse: True +- block: + - name: Activate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + + - name: Get first registry pod name + set_fact: + registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}" + + - name: Copy current registry contents to new GlusterFS volume + command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/" + when: openshift.hosted.registry.storage.glusterfs.swapcopy + + - name: Swap new GlusterFS registry volume + oc_volume: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + vol_name: registry-storage + mount_type: pvc + claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + + - name: Deactivate registry maintenance mode + oc_env: + namespace: "{{ openshift_hosted_registry_namespace }}" + name: "{{ openshift_hosted_registry_name }}" + state: absent + env_vars: + - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true' + when: openshift.hosted.registry.storage.glusterfs.swap + - name: Unmount registry volume mount: state: unmounted diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml index c71d0a34f..e75e3b16f 100644 --- a/roles/openshift_hosted/tasks/router/router.yml +++ b/roles/openshift_hosted/tasks/router/router.yml @@ -25,13 +25,13 @@ hostnames: - "{{ openshift_master_default_subdomain }}" - "*.{{ openshift_master_default_subdomain }}" - cert: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}" - key: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else ((openshift_master_config_dir) ~ '/openshift-router.key') }}" + cert: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.crt') }}" + key: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else ((openshift_master_config_dir) ~ '/openshift-router.key') }}" with_items: "{{ openshift_hosted_routers }}" - - name: set the openshift_hosted_router_certificates + - name: set the openshift_hosted_router_certificate set_fact: - openshift_hosted_router_certificates: + openshift_hosted_router_certificate: certfile: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}" keyfile: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}" cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}" @@ -44,7 +44,7 @@ backup: True dest: "/etc/origin/master/{{ item | basename }}" src: "{{ item }}" - with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificates') | + with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" when: not openshift_hosted_router_create_certificate @@ -82,9 +82,9 @@ service_account: "{{ item.serviceaccount | default('router') }}" selector: "{{ item.selector | default(none) }}" images: "{{ item.images | default(omit) }}" - cert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.certfile | basename)) if 'certfile' in item.certificates else omit }}" - key_file: "{{ ('/etc/origin/master/' ~ (item.certificates.keyfile | basename)) if 'keyfile' in item.certificates else omit }}" - cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificates.cafile | basename)) if 'cafile' in item.certificates else omit }}" + cert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.certfile | basename)) if 'certfile' in item.certificate else omit }}" + key_file: "{{ ('/etc/origin/master/' ~ (item.certificate.keyfile | basename)) if 'keyfile' in item.certificate else omit }}" + cacert_file: "{{ ('/etc/origin/master/' ~ (item.certificate.cafile | basename)) if 'cafile' in item.certificate else omit }}" edits: "{{ openshift_hosted_router_edits | union(item.edits) }}" ports: "{{ item.ports }}" stats_port: "{{ item.stats_port }}" diff --git a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml index afd82766f..78b624109 100644 --- a/roles/openshift_hosted_logging/tasks/deploy_logging.yaml +++ b/roles/openshift_hosted_logging/tasks/deploy_logging.yaml @@ -36,7 +36,7 @@ command: > {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }} register: secret_output - failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr" + failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr - name: "Create templates for logging accounts and the deployer" command: > @@ -60,21 +60,21 @@ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer register: permiss_output - failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr" + failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr - name: "Set permissions for fluentd" command: > {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd_output - failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" + failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr - name: "Set additional permissions for fluentd" command: > {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd register: fluentd2_output - failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" + failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr - name: "Add rolebinding-reader to aggregated-logging-elasticsearch" command: > @@ -82,13 +82,13 @@ policy add-cluster-role-to-user rolebinding-reader \ system:serviceaccount:logging:aggregated-logging-elasticsearch register: rolebinding_reader_output - failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr" + failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr - name: "Create ConfigMap for deployer parameters" command: > {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }} register: deployer_configmap_output - failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr" + failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr - name: "Process the deployer template" shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}" diff --git a/roles/openshift_hosted_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml index 6a442cefc..15dd1bd54 100644 --- a/roles/openshift_hosted_metrics/tasks/install.yml +++ b/roles/openshift_hosted_metrics/tasks/install.yml @@ -81,7 +81,7 @@ secrets new metrics-deployer nothing=/dev/null register: metrics_deployer_secret changed_when: metrics_deployer_secret.rc == 0 - failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr" + failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr # TODO: extend this to allow user passed in certs or generating cert with # OpenShift CA diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml index c67058696..5abb2ef83 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml @@ -223,7 +223,7 @@ items: - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"' name: IMAGE_VERSION - value: "3.4.0" + value: "v3.4" - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." name: IMAGE_PULL_SECRET diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml index 6ead122c5..1d319eab8 100644 --- a/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml +++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/metrics-deployer.yaml @@ -105,7 +105,7 @@ parameters: - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' name: IMAGE_VERSION - value: "3.4.0" + value: "v3.4" - description: "Internal URL for the master, for authentication retrieval" name: MASTER_URL @@ -118,7 +118,7 @@ parameters: description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" name: MODE value: "deploy" -- +- description: "Set to true to continue even if the deployer runs into an error." name: CONTINUE_ON_ERROR value: "false" diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml deleted file mode 100644 index fdfc285ca..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml +++ /dev/null @@ -1,345 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - kind: ClusterRole - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - kind: ClusterRole - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - kind: ClusterRole - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" - - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' - name: IMAGE_VERSION - value: "3.4.0" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml deleted file mode 100644 index c4ab794ae..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "v3.5" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml deleted file mode 100644 index 5b5503500..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml +++ /dev/null @@ -1,342 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "docker.io/openshift/origin-" - - - description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' - name: IMAGE_VERSION - value: "latest" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml deleted file mode 100644 index d191c0439..000000000 --- a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "openshift/origin-" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "latest" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml deleted file mode 100644 index fdfc285ca..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/logging-deployer.yaml +++ /dev/null @@ -1,345 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - kind: ClusterRole - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - kind: ClusterRole - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - kind: ClusterRole - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set prefix "registry.access.redhat.com/openshift3/"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" - - - description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.3.0", set version "3.3.0"' - name: IMAGE_VERSION - value: "3.4.0" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml deleted file mode 100644 index c4ab794ae..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "registry.access.redhat.com/openshift3/" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "v3.5" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml deleted file mode 100644 index 5b5503500..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/origin/logging-deployer.yaml +++ /dev/null @@ -1,342 +0,0 @@ -apiVersion: "v1" -kind: "List" -items: -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-account-template - annotations: - description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin." - tags: "infrastructure" - objects: - - - apiVersion: v1 - kind: ServiceAccount - name: logging-deployer - metadata: - name: logging-deployer - labels: - logging-infra: deployer - provider: openshift - component: deployer - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-kibana - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-elasticsearch - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-fluentd - - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: aggregated-logging-curator - - apiVersion: v1 - kind: ClusterRole - metadata: - name: oauth-editor - rules: - - resources: - - oauthclients - verbs: - - create - - delete - - apiVersion: v1 - kind: ClusterRole - metadata: - name: daemonset-admin - rules: - - resources: - - daemonsets - apiGroups: - - extensions - verbs: - - create - - get - - list - - watch - - delete - - update - - apiVersion: v1 - kind: ClusterRole - metadata: - name: rolebinding-reader - rules: - - resources: - - clusterrolebindings - verbs: - - get - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-edit-role - roleRef: - name: edit - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-deployer-dsadmin-role - roleRef: - name: daemonset-admin - subjects: - - kind: ServiceAccount - name: logging-deployer - - - apiVersion: v1 - kind: RoleBinding - metadata: - name: logging-elasticsearch-view-role - roleRef: - name: view - subjects: - - kind: ServiceAccount - name: aggregated-logging-elasticsearch -- - apiVersion: "v1" - kind: "Template" - metadata: - name: logging-deployer-template - annotations: - description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account." - tags: "infrastructure" - labels: - logging-infra: deployer - provider: openshift - objects: - - - apiVersion: v1 - kind: Pod - metadata: - generateName: logging-deployer- - spec: - containers: - - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION} - imagePullPolicy: Always - name: deployer - volumeMounts: - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: IMAGE_PULL_SECRET - value: ${IMAGE_PULL_SECRET} - - name: INSECURE_REGISTRY - value: ${INSECURE_REGISTRY} - - name: ENABLE_OPS_CLUSTER - value: ${ENABLE_OPS_CLUSTER} - - name: KIBANA_HOSTNAME - value: ${KIBANA_HOSTNAME} - - name: KIBANA_OPS_HOSTNAME - value: ${KIBANA_OPS_HOSTNAME} - - name: PUBLIC_MASTER_URL - value: ${PUBLIC_MASTER_URL} - - name: MASTER_URL - value: ${MASTER_URL} - - name: ES_INSTANCE_RAM - value: ${ES_INSTANCE_RAM} - - name: ES_PVC_SIZE - value: ${ES_PVC_SIZE} - - name: ES_PVC_PREFIX - value: ${ES_PVC_PREFIX} - - name: ES_PVC_DYNAMIC - value: ${ES_PVC_DYNAMIC} - - name: ES_CLUSTER_SIZE - value: ${ES_CLUSTER_SIZE} - - name: ES_NODE_QUORUM - value: ${ES_NODE_QUORUM} - - name: ES_RECOVER_AFTER_NODES - value: ${ES_RECOVER_AFTER_NODES} - - name: ES_RECOVER_EXPECTED_NODES - value: ${ES_RECOVER_EXPECTED_NODES} - - name: ES_RECOVER_AFTER_TIME - value: ${ES_RECOVER_AFTER_TIME} - - name: ES_OPS_INSTANCE_RAM - value: ${ES_OPS_INSTANCE_RAM} - - name: ES_OPS_PVC_SIZE - value: ${ES_OPS_PVC_SIZE} - - name: ES_OPS_PVC_PREFIX - value: ${ES_OPS_PVC_PREFIX} - - name: ES_OPS_PVC_DYNAMIC - value: ${ES_OPS_PVC_DYNAMIC} - - name: ES_OPS_CLUSTER_SIZE - value: ${ES_OPS_CLUSTER_SIZE} - - name: ES_OPS_NODE_QUORUM - value: ${ES_OPS_NODE_QUORUM} - - name: ES_OPS_RECOVER_AFTER_NODES - value: ${ES_OPS_RECOVER_AFTER_NODES} - - name: ES_OPS_RECOVER_EXPECTED_NODES - value: ${ES_OPS_RECOVER_EXPECTED_NODES} - - name: ES_OPS_RECOVER_AFTER_TIME - value: ${ES_OPS_RECOVER_AFTER_TIME} - - name: FLUENTD_NODESELECTOR - value: ${FLUENTD_NODESELECTOR} - - name: ES_NODESELECTOR - value: ${ES_NODESELECTOR} - - name: ES_OPS_NODESELECTOR - value: ${ES_OPS_NODESELECTOR} - - name: KIBANA_NODESELECTOR - value: ${KIBANA_NODESELECTOR} - - name: KIBANA_OPS_NODESELECTOR - value: ${KIBANA_OPS_NODESELECTOR} - - name: CURATOR_NODESELECTOR - value: ${CURATOR_NODESELECTOR} - - name: CURATOR_OPS_NODESELECTOR - value: ${CURATOR_OPS_NODESELECTOR} - - name: MODE - value: ${MODE} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: logging-deployer - volumes: - - name: empty - emptyDir: {} - parameters: - - - description: "The mode that the deployer runs in." - name: MODE - value: "install" - - - description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "docker.io/openshift/origin-" - - - description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"' - name: IMAGE_VERSION - value: "latest" - - - description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry." - name: IMAGE_PULL_SECRET - - - description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)" - name: INSECURE_REGISTRY - value: "false" - - - description: "(Deprecated) If true, set up to use a second ES cluster for ops logs." - name: ENABLE_OPS_CLUSTER - value: "false" - - - description: "(Deprecated) External hostname where clients will reach kibana" - name: KIBANA_HOSTNAME - value: "kibana.example.com" - - - description: "(Deprecated) External hostname at which admins will visit the ops Kibana." - name: KIBANA_OPS_HOSTNAME - value: kibana-ops.example.com - - - description: "(Deprecated) External URL for the master, for OAuth purposes" - name: PUBLIC_MASTER_URL - value: "https://localhost:8443" - - - description: "(Deprecated) Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc.cluster.local" - - - description: "(Deprecated) How many instances of ElasticSearch to deploy." - name: ES_CLUSTER_SIZE - value: "1" - - - description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance." - name: ES_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE." - name: ES_PVC_PREFIX - value: "logging-es-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. ' - name: ES_PVC_DYNAMIC - - - description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_NODE_QUORUM - - - description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE." - name: ES_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE." - name: ES_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart." - name: ES_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE." - name: ES_OPS_CLUSTER_SIZE - - - description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance." - name: ES_OPS_INSTANCE_RAM - value: "8G" - - - description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead." - name: ES_OPS_PVC_SIZE - - - description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE." - name: ES_OPS_PVC_PREFIX - value: "logging-es-ops-" - - - description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. ' - name: ES_OPS_PVC_DYNAMIC - - - description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1." - name: ES_OPS_NODE_QUORUM - - - description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_AFTER_NODES - - - description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE." - name: ES_OPS_RECOVER_EXPECTED_NODES - - - description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart." - name: ES_OPS_RECOVER_AFTER_TIME - value: "5m" - - - description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet." - name: FLUENTD_NODESELECTOR - value: "logging-infra-fluentd=true" - - - description: "(Deprecated) Node selector Elasticsearch cluster (label=value)." - name: ES_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)." - name: ES_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana cluster (label=value)." - name: KIBANA_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Kibana operations cluster (label=value)." - name: KIBANA_OPS_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector Curator (label=value)." - name: CURATOR_NODESELECTOR - value: "" - - - description: "(Deprecated) Node selector operations Curator (label=value)." - name: CURATOR_OPS_NODESELECTOR - value: "" diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml deleted file mode 100644 index d191c0439..000000000 --- a/roles/openshift_hosted_templates/files/v1.6/origin/metrics-deployer.yaml +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# -# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates -# and other contributors as indicated by the @author tags. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: "v1" -kind: "Template" -metadata: - name: metrics-deployer-template - annotations: - description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret." - tags: "infrastructure" -labels: - metrics-infra: deployer - provider: openshift - component: deployer -objects: -- - apiVersion: v1 - kind: Pod - metadata: - generateName: metrics-deployer- - spec: - securityContext: {} - containers: - - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION} - name: deployer - securityContext: {} - volumeMounts: - - name: secret - mountPath: /secret - readOnly: true - - name: empty - mountPath: /etc/deploy - env: - - name: PROJECT - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_PREFIX - value: ${IMAGE_PREFIX} - - name: IMAGE_VERSION - value: ${IMAGE_VERSION} - - name: MASTER_URL - value: ${MASTER_URL} - - name: MODE - value: ${MODE} - - name: CONTINUE_ON_ERROR - value: ${CONTINUE_ON_ERROR} - - name: REDEPLOY - value: ${REDEPLOY} - - name: IGNORE_PREFLIGHT - value: ${IGNORE_PREFLIGHT} - - name: USE_PERSISTENT_STORAGE - value: ${USE_PERSISTENT_STORAGE} - - name: DYNAMICALLY_PROVISION_STORAGE - value: ${DYNAMICALLY_PROVISION_STORAGE} - - name: HAWKULAR_METRICS_HOSTNAME - value: ${HAWKULAR_METRICS_HOSTNAME} - - name: CASSANDRA_NODES - value: ${CASSANDRA_NODES} - - name: CASSANDRA_PV_SIZE - value: ${CASSANDRA_PV_SIZE} - - name: METRIC_DURATION - value: ${METRIC_DURATION} - - name: USER_WRITE_ACCESS - value: ${USER_WRITE_ACCESS} - - name: HEAPSTER_NODE_ID - value: ${HEAPSTER_NODE_ID} - - name: METRIC_RESOLUTION - value: ${METRIC_RESOLUTION} - - name: STARTUP_TIMEOUT - value: ${STARTUP_TIMEOUT} - dnsPolicy: ClusterFirst - restartPolicy: Never - serviceAccount: metrics-deployer - volumes: - - name: empty - emptyDir: {} - - name: secret - secret: - secretName: metrics-deployer -parameters: -- - description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"' - name: IMAGE_PREFIX - value: "openshift/origin-" -- - description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"' - name: IMAGE_VERSION - value: "latest" -- - description: "Internal URL for the master, for authentication retrieval" - name: MASTER_URL - value: "https://kubernetes.default.svc:443" -- - description: "External hostname where clients will reach Hawkular Metrics" - name: HAWKULAR_METRICS_HOSTNAME - required: true -- - description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment" - name: MODE - value: "deploy" -- - description: "Set to true to continue even if the deployer runs into an error." - name: CONTINUE_ON_ERROR - value: "false" -- - description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)" - name: REDEPLOY - value: "false" -- - description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy." - name: IGNORE_PREFLIGHT - value: "false" -- - description: "Set to true for persistent storage, set to false to use non persistent storage" - name: USE_PERSISTENT_STORAGE - value: "true" -- - description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes" - name: DYNAMICALLY_PROVISION_STORAGE - value: "false" -- - description: "The number of Cassandra Nodes to deploy for the initial cluster" - name: CASSANDRA_NODES - value: "1" -- - description: "The persistent volume size for each of the Cassandra nodes" - name: CASSANDRA_PV_SIZE - value: "10Gi" -- - description: "How many days metrics should be stored for." - name: METRIC_DURATION - value: "7" -- - description: "If a user accounts should be allowed to write metrics." - name: USER_WRITE_ACCESS - value: "false" -- - description: "The identifier used when generating metric ids in Hawkular" - name: HEAPSTER_NODE_ID - value: "nodename" -- - description: "How often metrics should be gathered. Defaults value of '30s' for 30 seconds" - name: METRIC_RESOLUTION - value: "30s" -- - description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart" - name: STARTUP_TIMEOUT - value: "500" diff --git a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml index 28feac4e6..28feac4e6 100644 --- a/roles/openshift_hosted_templates/files/v1.6/enterprise/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml diff --git a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml index 80cc4233b..80cc4233b 100644 --- a/roles/openshift_hosted_templates/files/v1.6/origin/registry-console.yaml +++ b/roles/openshift_hosted_templates/files/v3.6/origin/registry-console.yaml diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index 42f4fc72e..cba0f2de8 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -91,8 +91,6 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta - `openshift_logging_es_ops_pvc_prefix`: logging-es-ops - `openshift_logging_es_ops_recover_after_time`: 5m - `openshift_logging_es_ops_storage_group`: 65534 -- `openshift_logging_es_ops_number_of_shards`: The number of primary shards for every new index created in ES. Defaults to '1'. -- `openshift_logging_es_ops_number_of_replicas`: The number of replica shards per primary shard for every new index. Defaults to '0'. - `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'. - `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified. - `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified. diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml index 96ed44011..76dfe518e 100644 --- a/roles/openshift_logging/defaults/main.yml +++ b/roles/openshift_logging/defaults/main.yml @@ -3,6 +3,10 @@ openshift_logging_use_ops: "{{ openshift_hosted_logging_enable_ops_cluster | def openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}" openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}" openshift_logging_namespace: logging +openshift_logging_nodeselector: null +openshift_logging_labels: {} +openshift_logging_label_key: "" +openshift_logging_label_value: "" openshift_logging_install_logging: True openshift_logging_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}" @@ -68,7 +72,7 @@ openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nod openshift_logging_fluentd_cpu_limit: 100m openshift_logging_fluentd_memory_limit: 512Mi openshift_logging_fluentd_es_copy: false -openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}" +openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal if openshift_hosted_logging_use_journal is defined else (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}" openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}" openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}" openshift_logging_fluentd_hosts: ['--all'] @@ -113,12 +117,19 @@ openshift_logging_es_ops_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_ openshift_logging_es_ops_recover_after_time: 5m openshift_logging_es_ops_storage_group: "{{ openshift_hosted_logging_elasticsearch_storage_group | default('65534') }}" openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}" -openshift_logging_es_ops_number_of_shards: 1 -openshift_logging_es_ops_number_of_replicas: 0 # storage related defaults openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_access_modes | default(['ReadWriteOnce']) }}" +# mux - secure_forward listener service +openshift_logging_mux_allow_external: False +openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}" +# this tells the fluentd node agent to use mux instead of sending directly to Elasticsearch +openshift_logging_use_mux_client: False +openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}" +openshift_logging_mux_port: 24284 +openshift_logging_mux_cpu_limit: 100m +openshift_logging_mux_memory_limit: 512Mi # following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly #es_logging_contents: @@ -127,3 +138,5 @@ openshift_logging_storage_access_modes: "{{ openshift_hosted_logging_storage_acc #fluentd_config_contents: #fluentd_throttle_contents: #fluentd_secureforward_contents: +#fluentd_mux_config_contents: +#fluentd_mux_secureforward_contents: diff --git a/roles/openshift_logging/handlers/main.yml b/roles/openshift_logging/handlers/main.yml index ffb812271..69c5a1663 100644 --- a/roles/openshift_logging/handlers/main.yml +++ b/roles/openshift_logging/handlers/main.yml @@ -4,6 +4,15 @@ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) notify: Verify API Server +- name: restart master api + systemd: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server + +- name: restart master controllers + systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index 188ea246c..2f5b68b4d 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -44,6 +44,7 @@ - logging-kibana - logging-kibana-proxy - logging-curator + - logging-mux ignore_errors: yes register: delete_result changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 @@ -109,5 +110,6 @@ - logging-curator - logging-elasticsearch - logging-fluentd + - logging-mux register: delete_result changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0 diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 740e490e1..b34df018d 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -45,6 +45,21 @@ - procure_component: kibana-internal hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}" +- include: procure_server_certs.yaml + loop_control: + loop_var: cert_info + with_items: + - procure_component: mux + hostnames: "logging-mux, {{openshift_logging_mux_hostname}}" + when: openshift_logging_use_mux + +- include: procure_shared_key.yaml + loop_control: + loop_var: shared_key_info + with_items: + - procure_component: mux + when: openshift_logging_use_mux + - name: Copy proxy TLS configuration file copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json when: server_tls_json is undefined @@ -85,6 +100,14 @@ loop_control: loop_var: node_name +- name: Generate PEM cert for mux + include: generate_pems.yaml component={{node_name}} + with_items: + - system.logging.mux + loop_control: + loop_var: node_name + when: openshift_logging_use_mux + - name: Creating necessary JKS certs include: generate_jks.yaml diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml index 253543f54..b047eb35a 100644 --- a/roles/openshift_logging/tasks/generate_configmaps.yaml +++ b/roles/openshift_logging/tasks/generate_configmaps.yaml @@ -21,6 +21,8 @@ dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml" vars: - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}" + - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}" + - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}" when: es_config_contents is undefined changed_when: no @@ -134,3 +136,43 @@ when: fluentd_configmap.stdout is defined changed_when: no check_mode: no + +- block: + - copy: + src: fluent.conf + dest: "{{mktemp.stdout}}/fluent-mux.conf" + when: fluentd_mux_config_contents is undefined + changed_when: no + + - copy: + src: secure-forward.conf + dest: "{{mktemp.stdout}}/secure-forward-mux.conf" + when: fluentd_mux_securefoward_contents is undefined + changed_when: no + + - copy: + content: "{{fluentd_mux_config_contents}}" + dest: "{{mktemp.stdout}}/fluent-mux.conf" + when: fluentd_mux_config_contents is defined + changed_when: no + + - copy: + content: "{{fluentd_mux_secureforward_contents}}" + dest: "{{mktemp.stdout}}/secure-forward-mux.conf" + when: fluentd_mux_secureforward_contents is defined + changed_when: no + + - command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux + --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf + --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run + register: mux_configmap + changed_when: no + + - copy: + content: "{{mux_configmap.stdout}}" + dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml" + when: mux_configmap.stdout is defined + changed_when: no + check_mode: no + when: openshift_logging_use_mux diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml index e77da7a24..f76bb3a0a 100644 --- a/roles/openshift_logging/tasks/generate_routes.yaml +++ b/roles/openshift_logging/tasks/generate_routes.yaml @@ -1,14 +1,14 @@ --- - set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }} - when: "{{ openshift_logging_kibana_key | trim | length > 0 }}" + when: openshift_logging_kibana_key | trim | length > 0 changed_when: false - set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }} - when: "{{openshift_logging_kibana_cert | trim | length > 0}}" + when: openshift_logging_kibana_cert | trim | length > 0 changed_when: false - set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }} - when: "{{openshift_logging_kibana_ca | trim | length > 0}}" + when: openshift_logging_kibana_ca | trim | length > 0 changed_when: false - set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }} diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml index f396bcc6d..c1da49fd8 100644 --- a/roles/openshift_logging/tasks/generate_secrets.yaml +++ b/roles/openshift_logging/tasks/generate_secrets.yaml @@ -34,6 +34,36 @@ check_mode: no changed_when: no +- name: Retrieving the cert to use when generating secrets for mux + slurp: src="{{generated_certs_dir}}/{{item.file}}" + register: mux_key_pairs + with_items: + - { name: "ca_file", file: "ca.crt" } + - { name: "mux_key", file: "system.logging.mux.key"} + - { name: "mux_cert", file: "system.logging.mux.crt"} + - { name: "mux_shared_key", file: "mux_shared_key"} + when: openshift_logging_use_mux + +- name: Generating secrets for mux + template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml + vars: + secret_name: "logging-{{component}}" + secret_key_file: "{{component}}_key" + secret_cert_file: "{{component}}_cert" + secrets: + - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"} + - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"} + - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"} + - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"} + secret_keys: ["ca", "cert", "key", "shared_key"] + with_items: + - mux + loop_control: + loop_var: component + check_mode: no + changed_when: no + when: openshift_logging_use_mux + - name: Generating secrets for kibana proxy template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml vars: @@ -43,7 +73,7 @@ - {key: session-secret, value: "{{session_secret}}"} - {key: server-key, value: "{{kibana_key_file}}"} - {key: server-cert, value: "{{kibana_cert_file}}"} - - {key: server-tls, value: "{{server_tls_file}}"} + - {key: server-tls.json, value: "{{server_tls_file}}"} secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"] kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}" kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}" diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml index 5091c1209..e3a5c5eb3 100644 --- a/roles/openshift_logging/tasks/generate_services.yaml +++ b/roles/openshift_logging/tasks/generate_services.yaml @@ -85,3 +85,35 @@ when: openshift_logging_use_ops | bool check_mode: no changed_when: no + +- name: Generating logging-mux service for external connections + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml + vars: + obj_name: logging-mux + ports: + - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} + labels: + logging-infra: support + selector: + provider: openshift + component: mux + externalIPs: + - "{{ ansible_eth0.ipv4.address }}" + check_mode: no + changed_when: no + when: openshift_logging_mux_allow_external + +- name: Generating logging-mux service for intra-cluster connections + template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml + vars: + obj_name: logging-mux + ports: + - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward} + labels: + logging-infra: support + selector: + provider: openshift + component: mux + check_mode: no + changed_when: no + when: openshift_logging_use_mux and not openshift_logging_mux_allow_external diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml index 28fad420b..a981e7f7f 100644 --- a/roles/openshift_logging/tasks/install_elasticsearch.yaml +++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml @@ -3,62 +3,51 @@ set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }} - set_fact: openshift_logging_es_pvc_prefix="logging-es" - when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''" + when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == '' -- set_fact: es_pvc_pool={{[]}} +- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }} + with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }} -- set_fact: openshift_logging_es_pvc_prefix="{{ openshift_logging_es_pvc_prefix | default('logging-es') }}" - -- name: Generate PersistentVolumeClaims - include: "{{ role_path}}/tasks/generate_pvcs.yaml" +### evaluate if the PVC attached to the dc currently matches the provided vars +## if it does then we reuse that pvc in the DC +- include: set_es_storage.yaml vars: - es_pv_selector: "{{openshift_logging_es_pv_selector}}" - es_pvc_dynamic: "{{openshift_logging_es_pvc_dynamic | bool}}" - es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}" - es_pvc_prefix: "{{openshift_logging_es_pvc_prefix}}" - es_pvc_size: "{{openshift_logging_es_pvc_size}}" - es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}" - es_cluster_size: "{{openshift_logging_es_cluster_size}}" - es_access_modes: "{{ openshift_logging_storage_access_modes }}" - -# we should initialize the es_dc_pool with the current keys -- name: Init pool of DeploymentConfig names for Elasticsearch - set_fact: es_dc_pool={{ es_dc_pool | default([]) + [deploy_name] }} - with_items: "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}" + es_component: es + es_name: "{{ deployment.0 }}" + es_spec: "{{ deployment.1 }}" + es_pvc_count: "{{ deployment.2 | int }}" + es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" + es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}" + es_pvc_size: "{{ openshift_logging_es_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_memory_limit }}" + with_together: + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}" + - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}" + - "{{ es_indices | default([]) }}" loop_control: - loop_var: deploy_name - -# This should be used to generate new DC names if necessary -- name: Create new DeploymentConfig names for Elasticsearch - set_fact: es_dc_pool={{es_dc_pool|default([]) + [deploy_name]}} - vars: - component: es - es_cluster_name: "{{component}}" - deploy_name_prefix: "logging-{{component}}" - deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" - with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_current_es_size | int }} - check_mode: no + loop_var: deployment +## if it does not then we should create one that does and attach it -- name: Generate Elasticsearch DeploymentConfig - template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml +## create new dc/pvc is needed +- include: set_es_storage.yaml vars: - component: es - logging_component: elasticsearch - deploy_name_prefix: "logging-{{component}}" - image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" - es_cluster_name: "{{component}}" - es_cpu_limit: "{{openshift_logging_es_cpu_limit }}" - es_memory_limit: "{{openshift_logging_es_memory_limit}}" - pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}" - deploy_name: "{{item.1}}" - es_node_selector: "{{openshift_logging_es_nodeselector | default({}) }}" - es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim)}}" - es_number_of_shards: "{{ openshift_logging_es_number_of_shards }}" - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas }}" - with_indexed_items: - - "{{ es_dc_pool }}" - check_mode: no - changed_when: no + es_component: es + es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + es_spec: "{}" + es_pvc_count: "{{ item | int - 1 }}" + es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}" + es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}" + es_pvc_size: "{{ openshift_logging_es_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_memory_limit }}" + with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }} # --------- Tasks for Operation clusters --------- @@ -73,74 +62,57 @@ es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}" cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" when: - - openshift_logging_use_ops | bool - - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}" + - openshift_logging_use_ops | bool + - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}" check_mode: no - set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops" - when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''" - -- set_fact: es_pvc_pool={{[]}} - -- name: Generate PersistentVolumeClaims for Ops - include: "{{ role_path}}/tasks/generate_pvcs.yaml" - vars: - es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}" - es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}" - es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}" - es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}" - es_cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" - es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic | bool}}" - es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}" - es_access_modes: "{{ openshift_logging_storage_access_modes }}" - when: - - openshift_logging_use_ops | bool - check_mode: no + when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == '' -- name: Init pool of DeploymentConfig names for Elasticsearch Ops - set_fact: es_ops_dc_pool={{ es_ops_dc_pool | default([]) + [deploy_name] }} - with_items: "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}" - loop_control: - loop_var: deploy_name +- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }} + with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }} when: - - openshift_logging_use_ops | bool + - openshift_logging_use_ops | bool -- name: Create new DeploymentConfig names for Elasticsearch Ops - set_fact: es_ops_dc_pool={{es_ops_dc_pool | default([]) + [deploy_name]}} +- include: set_es_storage.yaml vars: - component: es-ops - es_cluster_name: "{{component}}" - deploy_name_prefix: "logging-{{component}}" - deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" - cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}" - with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_current_es_ops_size | int }} + es_component: es-ops + es_name: "{{ deployment.0 }}" + es_spec: "{{ deployment.1 }}" + es_pvc_count: "{{ deployment.2 | int }}" + es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" + es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}" + es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" + with_together: + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}" + - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}" + - "{{ es_ops_indices | default([]) }}" + loop_control: + loop_var: deployment when: - - openshift_logging_use_ops | bool - check_mode: no + - openshift_logging_use_ops | bool +## if it does not then we should create one that does and attach it -- name: Generate Elasticsearch DeploymentConfig for Ops - template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml +## create new dc/pvc is needed +- include: set_es_storage.yaml vars: - component: es-ops - logging_component: elasticsearch - deploy_name_prefix: "logging-{{component}}" - image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" - pvc_claim: "{{(es_pvc_pool | length > item.0) | ternary(es_pvc_pool[item.0], None)}}" - deploy_name: "{{item.1}}" - es_cluster_name: "{{component}}" - es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}" - es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}" - es_node_quorum: "{{es_ops_node_quorum}}" - es_recover_after_nodes: "{{es_ops_recover_after_nodes}}" - es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}" - openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}" - es_node_selector: "{{openshift_logging_es_ops_nodeselector | default({}) }}" - es_storage: "{{openshift_logging_facts|es_storage(deploy_name, pvc_claim,root='elasticsearch_ops')}}" - es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards }}" - es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas }}" - with_indexed_items: - - "{{ es_ops_dc_pool | default([]) }}" + es_component: es-ops + es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}" + es_spec: "{}" + es_pvc_count: "{{ item | int - 1 }}" + es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}" + es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}" + es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}" + es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}" + es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}" + es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" + es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}" + es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}" + with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }} when: - - openshift_logging_use_ops | bool - check_mode: no - changed_when: no + - openshift_logging_use_ops | bool diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml index 35273829c..6bc405819 100644 --- a/roles/openshift_logging/tasks/install_fluentd.yaml +++ b/roles/openshift_logging/tasks/install_fluentd.yaml @@ -32,7 +32,7 @@ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd register: fluentd_output - failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr" + failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr check_mode: no when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 @@ -49,6 +49,6 @@ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd register: fluentd2_output - failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr" + failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr check_mode: no when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index 83b68fa77..aec455c22 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -27,6 +27,10 @@ loop_control: loop_var: install_component +- name: Install logging mux + include: "{{ role_path }}/tasks/install_mux.yaml" + when: openshift_logging_use_mux + - find: paths={{ mktemp.stdout }}/templates patterns=*.yaml register: object_def_files changed_when: no diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml new file mode 100644 index 000000000..91eeb95a1 --- /dev/null +++ b/roles/openshift_logging/tasks/install_mux.yaml @@ -0,0 +1,67 @@ +--- +- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }} + check_mode: no + +- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }} + check_mode: no + +- name: Check mux current replica count + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux + -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}} + register: mux_replica_count + when: not ansible_check_mode + ignore_errors: yes + changed_when: no + +- name: Generating mux deploymentconfig + template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml + vars: + component: mux + logging_component: mux + deploy_name: "logging-{{component}}" + image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}" + es_host: logging-es + es_port: "{{openshift_logging_es_port}}" + ops_host: "{{ mux_ops_host }}" + ops_port: "{{ mux_ops_port }}" + mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}" + mux_memory_limit: "{{openshift_logging_mux_memory_limit}}" + replicas: "{{mux_replica_count.stdout | default (0)}}" + mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}" + check_mode: no + changed_when: no + +- name: "Check mux hostmount-anyuid permissions" + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + get scc/hostmount-anyuid -o jsonpath='{.users}' + register: mux_hostmount_anyuid + check_mode: no + changed_when: no + +- name: "Set hostmount-anyuid permissions for mux" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: mux_output + failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr + check_mode: no + when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 + +- name: "Check mux cluster-reader permissions" + command: > + {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig + get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}' + register: mux_cluster_reader + check_mode: no + changed_when: no + +- name: "Set cluster-reader permissions for mux" + command: > + {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy + add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd + register: mux2_output + failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr + check_mode: no + when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1 diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml index da0bbb627..877ce3149 100644 --- a/roles/openshift_logging/tasks/install_support.yaml +++ b/roles/openshift_logging/tasks/install_support.yaml @@ -1,17 +1,36 @@ --- # This is the base configuration for installing the other components -- name: Check for logging project already exists - command: > - {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers - register: logging_project_result - ignore_errors: yes - when: not ansible_check_mode - changed_when: no +- name: Set logging project + oc_project: + state: present + name: "{{ openshift_logging_namespace }}" + node_selector: "{{ openshift_logging_nodeselector | default(null) }}" + +- name: Labelling logging project + oc_label: + state: present + kind: namespace + name: "{{ openshift_logging_namespace }}" + labels: + - key: "{{ item.key }}" + value: "{{ item.value }}" + with_dict: "{{ openshift_logging_labels | default({}) }}" + when: + - openshift_logging_labels is defined + - openshift_logging_labels is dict -- name: "Create logging project" - command: > - {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}} - when: not ansible_check_mode and "not found" in logging_project_result.stderr +- name: Labelling logging project + oc_label: + state: present + kind: namespace + name: "{{ openshift_logging_namespace }}" + labels: + - key: "{{ openshift_logging_label_key }}" + value: "{{ openshift_logging_label_value }}" + when: + - openshift_logging_label_key is defined + - openshift_logging_label_key != "" + - openshift_logging_label_value is defined - name: Create logging cert directory file: path={{openshift.common.config_base}}/logging state=directory mode=0755 diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml index c7f4a2f93..387da618d 100644 --- a/roles/openshift_logging/tasks/main.yaml +++ b/roles/openshift_logging/tasks/main.yaml @@ -1,7 +1,7 @@ --- - fail: msg: Only one Fluentd nodeselector key pair should be provided - when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1" + when: openshift_logging_fluentd_nodeselector.keys() | count > 1 - name: Set default image variables based on deployment_type include_vars: "{{ item }}" diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml index cb9509de1..a0ed56ebd 100644 --- a/roles/openshift_logging/tasks/oc_apply.yaml +++ b/roles/openshift_logging/tasks/oc_apply.yaml @@ -1,52 +1,52 @@ --- -- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} - command: > - {{ openshift.common.client_binary }} - --config={{ kubeconfig }} - get {{file_content.kind}} {{file_content.metadata.name}} - -o jsonpath='{.metadata.resourceVersion}' - -n {{namespace}} - register: generation_init - failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''" - changed_when: no +- oc_obj: + kind: "{{ file_content.kind }}" + name: "{{ file_content.metadata.name }}" + state: present + namespace: "{{ namespace }}" + files: + - "{{ file_name }}" + when: file_content.kind not in ["Service", "Route"] -- name: Applying {{file_name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - apply -f {{ file_name }} - -n {{ namespace }} - register: generation_apply - failed_when: "'error' in generation_apply.stderr" - changed_when: no +## still need to do this for services until the template logic is replaced by oc_* +- block: + - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}} + command: > + {{ openshift.common.client_binary }} + --config={{ kubeconfig }} + get {{file_content.kind}} {{file_content.metadata.name}} + -o jsonpath='{.metadata.resourceVersion}' + -n {{namespace}} + register: generation_init + failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''" + changed_when: no -- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - get {{file_content.kind}} {{file_content.metadata.name}} - -o jsonpath='{.metadata.resourceVersion}' - -n {{namespace}} - register: generation_changed - failed_when: "'not found' not in generation_changed.stderr and generation_changed.stdout == ''" - changed_when: generation_changed.stdout | default (0) | int > generation_init.stdout | default(0) | int - when: - - "'field is immutable' not in generation_apply.stderr" + - name: Applying {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + apply -f {{ file_name }} + -n {{ namespace }} + register: generation_apply + failed_when: "'error' in generation_apply.stderr" + changed_when: no -- name: Removing previous {{file_name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - delete -f {{ file_name }} - -n {{ namespace }} - register: generation_delete - failed_when: "'error' in generation_delete.stderr" - changed_when: generation_delete.rc == 0 - when: "'field is immutable' in generation_apply.stderr" + - name: Removing previous {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + delete -f {{ file_name }} + -n {{ namespace }} + register: generation_delete + failed_when: "'error' in generation_delete.stderr" + changed_when: generation_delete.rc == 0 + when: "'field is immutable' in generation_apply.stderr" -- name: Recreating {{file_name}} - command: > - {{ openshift.common.client_binary }} --config={{ kubeconfig }} - apply -f {{ file_name }} - -n {{ namespace }} - register: generation_apply - failed_when: "'error' in generation_apply.stderr" - changed_when: generation_apply.rc == 0 - when: "'field is immutable' in generation_apply.stderr" + - name: Recreating {{file_name}} + command: > + {{ openshift.common.client_binary }} --config={{ kubeconfig }} + apply -f {{ file_name }} + -n {{ namespace }} + register: generation_apply + failed_when: "'error' in generation_apply.stderr" + changed_when: generation_apply.rc == 0 + when: "'field is immutable' in generation_apply.stderr" + when: file_content.kind in ["Service", "Route"] diff --git a/roles/openshift_logging/tasks/procure_shared_key.yaml b/roles/openshift_logging/tasks/procure_shared_key.yaml new file mode 100644 index 000000000..056ff6b98 --- /dev/null +++ b/roles/openshift_logging/tasks/procure_shared_key.yaml @@ -0,0 +1,25 @@ +--- +- name: Checking for {{ shared_key_info.procure_component }}_shared_key + stat: path="{{generated_certs_dir}}/{{ shared_key_info.procure_component }}_shared_key" + register: component_shared_key_file + check_mode: no + +- name: Trying to discover shared key variable name for {{ shared_key_info.procure_component }} + set_fact: procure_component_shared_key={{ lookup('env', '{{shared_key_info.procure_component}}' + '_shared_key') }} + when: + - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined + check_mode: no + +- name: Creating shared_key for {{ shared_key_info.procure_component }} + copy: content="{{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}" + dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key" + check_mode: no + when: + - not component_shared_key_file.stat.exists + +- name: Copying shared key for {{ shared_key_info.procure_component }} to generated certs directory + copy: content="{{procure_component_shared_key}}" dest="{{generated_certs_dir}}/{{shared_key_info.procure_component}}_shared_key" + check_mode: no + when: + - shared_key_info[ shared_key_info.procure_component + '_shared_key' ] is defined + - not component_shared_key_file.stat.exists diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml new file mode 100644 index 000000000..4afe4e641 --- /dev/null +++ b/roles/openshift_logging/tasks/set_es_storage.yaml @@ -0,0 +1,80 @@ +--- +- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}" + when: es_spec.volumes is defined + +- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}" + when: + - es_spec.volumes is defined + - es_storage_type.persistentVolumeClaim is defined + +- set_fact: es_storage_claim="" + when: + - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined + +## take an ES dc and evaluate its storage option +# if it is a hostmount or emptydir we don't do anything with it +# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist) +- oc_obj: + state: list + kind: pvc + name: "{{ es_storage_claim }}" + namespace: "{{ openshift_logging_namespace }}" + register: pvc_spec + failed_when: pvc_spec.results.stderr is defined + when: + - es_spec.volumes is defined + - es_storage_type.persistentVolumeClaim is defined + +- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}" + when: + - pvc_spec.results is defined + - pvc_spec.results.results[0].spec is defined + +# if not create the pvc and use it +- block: + + - name: Generating PersistentVolumeClaims + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" + size: "{{ es_pvc_size }}" + access_modes: "{{ openshift_logging_storage_access_modes }}" + pv_selector: "{{ es_pv_selector }}" + when: not es_pvc_dynamic | bool + check_mode: no + changed_when: no + + - name: Generating PersistentVolumeClaims - Dynamic + template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml + vars: + obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" + annotations: + volume.alpha.kubernetes.io/storage-class: "dynamic" + size: "{{ es_pvc_size }}" + access_modes: "{{ openshift_logging_storage_access_modes }}" + pv_selector: "{{ es_pv_selector }}" + when: es_pvc_dynamic | bool + check_mode: no + changed_when: no + + - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}" + + when: + - es_pvc_size | search('^\d.*') + - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) ) + +- name: Generate Elasticsearch DeploymentConfig + template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml + vars: + component: "{{ es_component }}" + deploy_name: "{{ es_name }}" + logging_component: elasticsearch + deploy_name_prefix: "logging-{{ es_component }}" + image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}" + es_cluster_name: "{{component}}" + es_cpu_limit: "{{ es_cpu_limit }}" + es_memory_limit: "{{ es_memory_limit }}" + es_node_selector: "{{ es_node_selector }}" + es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}" + check_mode: no + changed_when: no diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml index edbb62c3e..c1592b830 100644 --- a/roles/openshift_logging/tasks/start_cluster.yaml +++ b/roles/openshift_logging/tasks/start_cluster.yaml @@ -21,6 +21,29 @@ loop_control: loop_var: fluentd_host +- name: Retrieve mux + oc_obj: + state: list + kind: dc + selector: "component=mux" + namespace: "{{openshift_logging_namespace}}" + register: mux_dc + when: openshift_logging_use_mux + +- name: start mux + oc_scale: + kind: dc + name: "{{ object }}" + namespace: "{{openshift_logging_namespace}}" + replicas: "{{ openshift_logging_mux_replica_count | default (1) }}" + with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}" + loop_control: + loop_var: object + when: + - mux_dc.results is defined + - mux_dc.results.results is defined + - openshift_logging_use_mux + - name: Retrieve elasticsearch oc_obj: state: list diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml index 4b3722e29..f4b419d84 100644 --- a/roles/openshift_logging/tasks/stop_cluster.yaml +++ b/roles/openshift_logging/tasks/stop_cluster.yaml @@ -21,6 +21,26 @@ loop_control: loop_var: fluentd_host +- name: Retrieve mux + oc_obj: + state: list + kind: dc + selector: "component=mux" + namespace: "{{openshift_logging_namespace}}" + register: mux_dc + when: openshift_logging_use_mux + +- name: stop mux + oc_scale: + kind: dc + name: "{{ object }}" + namespace: "{{openshift_logging_namespace}}" + replicas: 0 + with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}" + loop_control: + loop_var: object + when: openshift_logging_use_mux + - name: Retrieve elasticsearch oc_obj: state: list diff --git a/roles/openshift_logging/tasks/update_master_config.yaml b/roles/openshift_logging/tasks/update_master_config.yaml index cef835668..10f522b61 100644 --- a/roles/openshift_logging/tasks/update_master_config.yaml +++ b/roles/openshift_logging/tasks/update_master_config.yaml @@ -4,6 +4,9 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: assetConfig.loggingPublicURL yaml_value: "https://{{ openshift_logging_kibana_hostname }}" - notify: restart master + notify: + - restart master + - restart master api + - restart master controllers tags: - - update_master_config + - update_master_config diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2 index a0fefd882..c6284166b 100644 --- a/roles/openshift_logging/templates/curator.j2 +++ b/roles/openshift_logging/templates/curator.j2 @@ -89,9 +89,6 @@ spec: - name: config mountPath: /etc/curator/settings readOnly: true - - name: elasticsearch-storage - mountPath: /elasticsearch/persistent - readOnly: true volumes: - name: certs secret: @@ -99,5 +96,3 @@ spec: - name: config configMap: name: logging-curator - - name: elasticsearch-storage - emptyDir: {} diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2 index 0bf1686ad..5c93d823e 100644 --- a/roles/openshift_logging/templates/fluentd.j2 +++ b/roles/openshift_logging/templates/fluentd.j2 @@ -59,6 +59,14 @@ spec: - name: dockercfg mountPath: /etc/sysconfig/docker readOnly: true + - name: dockerdaemoncfg + mountPath: /etc/docker + readOnly: true +{% if openshift_logging_use_mux_client | bool %} + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true +{% endif %} env: - name: "K8S_HOST_URL" value: "{{openshift_logging_master_url}}" @@ -122,6 +130,8 @@ spec: value: "{{openshift_logging_fluentd_journal_source | default('')}}" - name: "JOURNAL_READ_FROM_HEAD" value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" + - name: "USE_MUX_CLIENT" + value: "{{openshift_logging_use_mux_client| default('false')}}" volumes: - name: runlogjournal hostPath: @@ -147,3 +157,11 @@ spec: - name: dockercfg hostPath: path: /etc/sysconfig/docker + - name: dockerdaemoncfg + hostPath: + path: /etc/docker +{% if openshift_logging_use_mux_client | bool %} + - name: muxcerts + secret: + secretName: logging-mux +{% endif %} diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging/templates/mux.j2 new file mode 100644 index 000000000..41e6abd52 --- /dev/null +++ b/roles/openshift_logging/templates/mux.j2 @@ -0,0 +1,121 @@ +apiVersion: "v1" +kind: "DeploymentConfig" +metadata: + name: "{{deploy_name}}" + labels: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" +spec: + replicas: {{replicas|default(0)}} + selector: + provider: openshift + component: "{{component}}" + logging-infra: "{{logging_component}}" + strategy: + rollingParams: + intervalSeconds: 1 + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + name: "{{deploy_name}}" + labels: + logging-infra: "{{logging_component}}" + provider: openshift + component: "{{component}}" + spec: + serviceAccountName: aggregated-logging-fluentd +{% if mux_node_selector is iterable and mux_node_selector | length > 0 %} + nodeSelector: +{% for key, value in mux_node_selector.iteritems() %} + {{key}}: "{{value}}" +{% endfor %} +{% endif %} + containers: + - name: "mux" + image: {{image}} + imagePullPolicy: Always +{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %} + resources: + limits: +{% if mux_cpu_limit is not none %} + cpu: "{{mux_cpu_limit}}" +{% endif %} +{% if mux_memory_limit is not none %} + memory: "{{mux_memory_limit}}" +{% endif %} +{% endif %} + ports: + - containerPort: "{{ openshift_logging_mux_port }}" + name: mux-forward + volumeMounts: + - name: config + mountPath: /etc/fluent/configs.d/user + readOnly: true + - name: certs + mountPath: /etc/fluent/keys + readOnly: true + - name: dockerhostname + mountPath: /etc/docker-hostname + readOnly: true + - name: localtime + mountPath: /etc/localtime + readOnly: true + - name: muxcerts + mountPath: /etc/fluent/muxkeys + readOnly: true + env: + - name: "K8S_HOST_URL" + value: "{{openshift_logging_master_url}}" + - name: "ES_HOST" + value: "{{openshift_logging_es_host}}" + - name: "ES_PORT" + value: "{{openshift_logging_es_port}}" + - name: "ES_CLIENT_CERT" + value: "{{openshift_logging_es_client_cert}}" + - name: "ES_CLIENT_KEY" + value: "{{openshift_logging_es_client_key}}" + - name: "ES_CA" + value: "{{openshift_logging_es_ca}}" + - name: "OPS_HOST" + value: "{{ops_host}}" + - name: "OPS_PORT" + value: "{{ops_port}}" + - name: "OPS_CLIENT_CERT" + value: "{{openshift_logging_es_ops_client_cert}}" + - name: "OPS_CLIENT_KEY" + value: "{{openshift_logging_es_ops_client_key}}" + - name: "OPS_CA" + value: "{{openshift_logging_es_ops_ca}}" + - name: "USE_JOURNAL" + value: "false" + - name: "JOURNAL_SOURCE" + value: "{{openshift_logging_fluentd_journal_source | default('')}}" + - name: "JOURNAL_READ_FROM_HEAD" + value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}" + - name: FORWARD_LISTEN_HOST + value: "{{ openshift_logging_mux_hostname }}" + - name: FORWARD_LISTEN_PORT + value: "{{ openshift_logging_mux_port }}" + - name: USE_MUX + value: "true" + - name: MUX_ALLOW_EXTERNAL + value: "{{ openshift_logging_mux_allow_external| default('false') }}" + volumes: + - name: config + configMap: + name: logging-mux + - name: certs + secret: + secretName: logging-fluentd + - name: dockerhostname + hostPath: + path: /etc/hostname + - name: localtime + hostPath: + path: /etc/localtime + - name: muxcerts + secret: + secretName: logging-mux diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2 index 6c4ec0c76..70644a39c 100644 --- a/roles/openshift_logging/templates/service.j2 +++ b/roles/openshift_logging/templates/service.j2 @@ -26,3 +26,9 @@ spec: {% for key, value in selector.iteritems() %} {{key}}: {{value}} {% endfor %} +{% if externalIPs is defined -%} + externalIPs: +{% for ip in externalIPs %} + - {{ ip }} +{% endfor %} +{% endif %} diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml index f202486a5..cfc4e2722 100644 --- a/roles/openshift_manageiq/tasks/main.yaml +++ b/roles/openshift_manageiq/tasks/main.yaml @@ -3,24 +3,13 @@ msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1." when: not openshift.common.version_gte_3_1_or_1_1 | bool -- name: Copy Configuration to temporary conf - command: > - cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}} - changed_when: false - - name: Add Management Infrastructure project - command: > - {{ openshift.common.client_binary }} adm new-project - management-infra - --description="Management Infrastructure" - --config={{manage_iq_tmp_conf}} - register: osmiq_create_mi_project - failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0" - changed_when: osmiq_create_mi_project.rc == 0 + oc_project: + name: management-infra + description: Management Infrastructure - name: Create Admin and Image Inspector Service Account oc_serviceaccount: - kubeconfig: "{{ openshift_master_config_dir }}/admin.kubeconfig" name: "{{ item }}" namespace: management-infra state: present @@ -28,51 +17,42 @@ - management-admin - inspector-admin -- name: Create Cluster Role - shell: > - echo {{ manageiq_cluster_role | to_json | quote }} | - {{ openshift.common.client_binary }} create - --config={{manage_iq_tmp_conf}} - -f - - register: osmiq_create_cluster_role - failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0" - changed_when: osmiq_create_cluster_role.rc == 0 +- name: Create manageiq cluster role + oc_clusterrole: + name: management-infra-admin + rules: + - apiGroups: + - "" + resources: + - pods/proxy + verbs: + - "*" - name: Create Hawkular Metrics Admin Cluster Role - shell: > - echo {{ manageiq_metrics_admin_clusterrole | to_json | quote }} | - {{ openshift.common.client_binary }} - --config={{manage_iq_tmp_conf}} - create -f - - register: oshawkular_create_cluster_role - failed_when: "'already exists' not in oshawkular_create_cluster_role.stderr and oshawkular_create_cluster_role.rc != 0" - changed_when: oshawkular_create_cluster_role.rc == 0 - # AUDIT:changed_when_note: Checking the return code is insufficient - # here. We really need to verify the if the role even exists before - # we run this task. + oc_clusterrole: + name: hawkular-metrics-admin + rules: + - apiGroups: + - "" + resources: + - hawkular-alerts + - hawkular-metrics + verbs: + - "*" - name: Configure role/user permissions - command: > - {{ openshift.common.client_binary }} adm {{item}} - --config={{manage_iq_tmp_conf}} - with_items: "{{manage_iq_tasks}}" - register: osmiq_perm_task - failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0" - changed_when: osmiq_perm_task.rc == 0 - # AUDIT:changed_when_note: Checking the return code is insufficient - # here. We really need to compare the current role/user permissions - # with their expected state. I think we may have a module for this? - + oc_adm_policy_user: + namespace: management-infra + resource_name: "{{ item.resource_name }}" + resource_kind: "{{ item.resource_kind }}" + user: "{{ item.user }}" + with_items: "{{ manage_iq_tasks }}" - name: Configure 3_2 role/user permissions - command: > - {{ openshift.common.client_binary }} adm {{item}} - --config={{manage_iq_tmp_conf}} + oc_adm_policy_user: + namespace: management-infra + resource_name: "{{ item.resource_name }}" + resource_kind: "{{ item.resource_kind }}" + user: "{{ item.user }}" with_items: "{{manage_iq_openshift_3_2_tasks}}" - register: osmiq_perm_3_2_task - failed_when: osmiq_perm_3_2_task.rc != 0 - changed_when: osmiq_perm_3_2_task.rc == 0 when: openshift.common.version_gte_3_2_or_1_2 | bool - -- name: Clean temporary configuration file - file: path={{manage_iq_tmp_conf}} state=absent diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml index 9936bb126..15d667628 100644 --- a/roles/openshift_manageiq/vars/main.yml +++ b/roles/openshift_manageiq/vars/main.yml @@ -1,41 +1,31 @@ --- -openshift_master_config_dir: "{{ openshift.common.config_base }}/master" -manageiq_cluster_role: - apiVersion: v1 - kind: ClusterRole - metadata: - name: management-infra-admin - rules: - - resources: - - pods/proxy - verbs: - - '*' - -manageiq_metrics_admin_clusterrole: - apiVersion: v1 - kind: ClusterRole - metadata: - name: hawkular-metrics-admin - rules: - - apiGroups: - - "" - resources: - - hawkular-metrics - - hawkular-alerts - verbs: - - '*' - -manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig - manage_iq_tasks: -- policy add-role-to-user -n management-infra admin -z management-admin -- policy add-role-to-user -n management-infra management-infra-admin -z management-admin -- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin -- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin -- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin -- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin +- resource_kind: role + resource_name: admin + user: management-admin +- resource_kind: role + resource_name: management-infra-admin + user: management-admin +- resource_kind: cluster-role + resource_name: cluster-reader + user: system:serviceaccount:management-infra:management-admin +- resource_kind: scc + resource_name: privileged + user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: system:image-puller + user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: scc + resource_name: privileged + user: system:serviceaccount:management-infra:inspector-admin +- resource_kind: cluster-role + resource_name: self-provisioner + user: system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: hawkular-metrics-admin + user: system:serviceaccount:management-infra:management-admin manage_iq_openshift_3_2_tasks: -- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin +- resource_kind: cluster-role + resource_name: system:image-auditor + user: system:serviceaccount:management-infra:management-admin diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 98e0da1a2..5522fef26 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -194,7 +194,7 @@ state: stopped when: openshift_master_ha | bool register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" + failed_when: task_result|failed and 'could not' not in task_result.msg|lower - set_fact: master_service_status_changed: "{{ start_result | changed }}" diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 506c8b129..58fabddeb 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -90,6 +90,7 @@ dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api line: "{{ item }}" with_items: "{{ master_api_aws.stdout_lines | default([]) }}" + no_log: True - name: Preserve Master Controllers Proxy Config options command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index d4c9a96ca..2617efaf1 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -64,10 +64,10 @@ --signer-key={{ openshift_ca_key }} --signer-serial={{ openshift_ca_serial }} --overwrite=false + when: item != openshift_ca_host with_items: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) - | difference([openshift_ca_host])}}" + | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" delegate_to: "{{ openshift_ca_host }}" run_once: true @@ -94,8 +94,8 @@ creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig" with_items: "{{ hostvars | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) - | difference([openshift_ca_host])}}" + | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" + when: item != openshift_ca_host delegate_to: "{{ openshift_ca_host }}" run_once: true diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py index e570392ff..65f85066e 100644 --- a/roles/openshift_master_facts/filter_plugins/openshift_master.py +++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in openshift-master ''' diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py index 7f7bc4316..b50d6d9db 100644 --- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py +++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py @@ -40,7 +40,7 @@ class LookupModule(LookupBase): # pylint: disable=line-too-long raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") if deployment_type == 'origin': - if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']: + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']: raise AnsibleError("Unknown short_version %s" % short_version) elif deployment_type == 'openshift-enterprise': if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']: @@ -49,7 +49,7 @@ class LookupModule(LookupBase): raise AnsibleError("Unknown deployment_type %s" % deployment_type) if deployment_type == 'origin': - # convert short_version to enterpise short_version + # convert short_version to enterprise short_version short_version = re.sub('^1.', '3.', short_version) if short_version == 'latest': diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py index 66e6ecea3..a66cb3c88 100644 --- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py +++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py @@ -41,7 +41,7 @@ class LookupModule(LookupBase): raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified") if deployment_type == 'origin': - if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', '3.6', 'latest']: + if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', 'latest']: raise AnsibleError("Unknown short_version %s" % short_version) elif deployment_type == 'openshift-enterprise': if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']: diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 6f8f09b22..f048e0aef 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -128,10 +128,10 @@ - name: Test if scheduler config is readable fail: msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}" - when: "{{ openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' }}" + when: openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1' - name: Set current scheduler predicates and priorities set_fact: openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}" openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}" - when: "{{ scheduler_config_stat.stat.exists }}" + when: scheduler_config_stat.stat.exists diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py index 1fab84c71..4a28fb8f8 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py +++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py @@ -55,6 +55,8 @@ DEFAULT_PREDICATES_1_5 = [ {'name': 'CheckNodeDiskPressure'}, ] +DEFAULT_PREDICATES_3_6 = DEFAULT_PREDICATES_1_5 + REGION_PREDICATE = { 'name': 'Region', 'argument': { @@ -75,9 +77,8 @@ TEST_VARS = [ ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4), ('1.5', 'origin', DEFAULT_PREDICATES_1_5), ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), - ('1.6', 'origin', DEFAULT_PREDICATES_1_5), - ('3.6', 'origin', DEFAULT_PREDICATES_1_5), - ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5), + ('3.6', 'origin', DEFAULT_PREDICATES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_3_6), ] diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py index 1098f9391..97ef2387e 100644 --- a/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py +++ b/roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py @@ -42,6 +42,8 @@ DEFAULT_PRIORITIES_1_5 = [ {'name': 'TaintTolerationPriority', 'weight': 1} ] +DEFAULT_PRIORITIES_3_6 = DEFAULT_PRIORITIES_1_5 + ZONE_PRIORITY = { 'name': 'Zone', 'argument': { @@ -63,9 +65,8 @@ TEST_VARS = [ ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4), ('1.5', 'origin', DEFAULT_PRIORITIES_1_5), ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), - ('1.6', 'origin', DEFAULT_PRIORITIES_1_5), - ('3.6', 'origin', DEFAULT_PRIORITIES_1_5), - ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5), + ('3.6', 'origin', DEFAULT_PRIORITIES_3_6), + ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_3_6), ] diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_metrics/handlers/main.yml index ffb812271..69c5a1663 100644 --- a/roles/openshift_metrics/handlers/main.yml +++ b/roles/openshift_metrics/handlers/main.yml @@ -4,6 +4,15 @@ when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool)) notify: Verify API Server +- name: restart master api + systemd: name={{ openshift.common.service_type }}-master-api state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + notify: Verify API Server + +- name: restart master controllers + systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted + when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native' + - name: Verify API Server # Using curl here since the uri module requires python-httplib2 and # wait_for port doesn't provide health information. diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 07b7eca33..fb4fe2f03 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -14,20 +14,22 @@ changed_when: no - name: generate password for hawkular metrics - local_action: copy dest="{{ local_tmp.stdout}}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}" + local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}" with_items: - hawkular-metrics +- local_action: slurp src="{{ local_tmp.stdout }}/hawkular-metrics.pwd" + register: hawkular_metrics_pwd + no_log: true + - name: generate htpasswd file for hawkular metrics - local_action: > - shell htpasswd -ci - '{{ local_tmp.stdout }}/hawkular-metrics.htpasswd' hawkular - < '{{ local_tmp.stdout }}/hawkular-metrics.pwd' + local_action: htpasswd path="{{ local_tmp.stdout }}/hawkular-metrics.htpasswd" name=hawkular password="{{ hawkular_metrics_pwd.content | b64decode }}" + no_log: true - name: copy local generated passwords to target copy: - src: "{{local_tmp.stdout}}/{{item}}" - dest: "{{mktemp.stdout}}/{{item}}" + src: "{{ local_tmp.stdout }}/{{ item }}" + dest: "{{ mktemp.stdout }}/{{ item }}" with_items: - hawkular-metrics.pwd - hawkular-metrics.htpasswd diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml deleted file mode 100644 index ced2df1d0..000000000 --- a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: generate heapster key/cert - command: > - {{ openshift.common.admin_binary }} ca create-server-cert - --config={{ mktemp.stdout }}/admin.kubeconfig - --key='{{ mktemp.stdout }}/heapster.key' - --cert='{{ mktemp.stdout }}/heapster.cert' - --hostnames=heapster - --signer-cert='{{ mktemp.stdout }}/ca.crt' - --signer-key='{{ mktemp.stdout }}/ca.key' - --signer-serial='{{ mktemp.stdout }}/ca.serial.txt' - -- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines" - block: - - name: read files for the heapster secret - slurp: src={{ item }} - register: heapster_secret - with_items: - - "{{ mktemp.stdout }}/heapster.cert" - - "{{ mktemp.stdout }}/heapster.key" - - "{{ client_ca }}" - vars: - custom_ca: "{{ mktemp.stdout }}/heapster_client_ca.crt" - default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt" - client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}" - - name: generate heapster secret template - template: - src: secret.j2 - dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml" - force: no - vars: - name: heapster-secrets - labels: - metrics-infra: heapster - data: - heapster.cert: "{{ heapster_secret.results[0].content }}" - heapster.key: "{{ heapster_secret.results[1].content }}" - heapster.client-ca: "{{ heapster_secret.results[2].content }}" - heapster.allowed-users: > - {{ openshift_metrics_heapster_allowed_users|b64encode }} diff --git a/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml new file mode 100644 index 000000000..e81d90ae7 --- /dev/null +++ b/roles/openshift_metrics/tasks/generate_heapster_secrets.yaml @@ -0,0 +1,14 @@ +--- +- name: generate heapster secret template + template: + src: secret.j2 + dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml" + force: no + vars: + name: heapster-secrets + labels: + metrics-infra: heapster + data: + heapster.allowed-users: > + {{ openshift_metrics_heapster_allowed_users|b64encode }} + when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines" diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index a467c1a51..3b4e8560f 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -23,7 +23,7 @@ changed_when: false - set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics" - when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''" + when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == '' - name: generate hawkular-cassandra persistent volume claims template: diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index 8d27c4930..0eb852d91 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -22,7 +22,7 @@ with_items: - hawkular-metrics-certs - hawkular-metrics-account - when: "not {{ openshift_metrics_heapster_standalone | bool }}" + when: not openshift_metrics_heapster_standalone | bool - name: Generating serviceaccount for heapster template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml @@ -41,6 +41,8 @@ - {port: 80, targetPort: http-endpoint} selector: name: "{{obj_name}}" + annotations: + service.alpha.openshift.io/serving-cert-secret-name: heapster-certs labels: metrics-infra: "{{obj_name}}" name: "{{obj_name}}" @@ -64,4 +66,4 @@ namespace: "{{ openshift_metrics_project }}" changed_when: no -- include: generate_heapster_certificates.yaml +- include: generate_heapster_secrets.yaml diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index ffe6f63a2..74eb56713 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -10,11 +10,11 @@ - cassandra loop_control: loop_var: include_file - when: "not {{ openshift_metrics_heapster_standalone | bool }}" + when: not openshift_metrics_heapster_standalone | bool - name: Install Heapster Standalone include: install_heapster.yaml - when: "{{ openshift_metrics_heapster_standalone | bool }}" + when: openshift_metrics_heapster_standalone | bool - find: paths={{ mktemp.stdout }}/templates patterns=*.yaml register: object_def_files @@ -48,7 +48,7 @@ - name: Scaling down cluster to recognize changes include: stop_metrics.yaml - when: "{{ existing_metrics_rc.stdout_lines | length > 0 }}" + when: existing_metrics_rc.stdout_lines | length > 0 - name: Scaling up cluster include: start_metrics.yaml diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index c8d222c60..e8b7bea5c 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -19,7 +19,7 @@ - name: Create temp directory for all our templates file: path={{mktemp.stdout}}/templates state=directory mode=0755 changed_when: False - when: "{{ openshift_metrics_install_metrics | bool }}" + when: openshift_metrics_install_metrics | bool - name: Create temp directory local on control node local_action: command mktemp -d diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml index b5a1c8f06..2037e8dc3 100644 --- a/roles/openshift_metrics/tasks/start_metrics.yaml +++ b/roles/openshift_metrics/tasks/start_metrics.yaml @@ -20,7 +20,7 @@ loop_control: loop_var: object when: metrics_cassandra_rc is defined - changed_when: "{{metrics_cassandra_rc | length > 0 }}" + changed_when: metrics_cassandra_rc | length > 0 - command: > {{openshift.common.client_binary}} @@ -42,7 +42,7 @@ with_items: "{{metrics_metrics_rc.stdout_lines}}" loop_control: loop_var: object - changed_when: "{{metrics_metrics_rc | length > 0 }}" + changed_when: metrics_metrics_rc | length > 0 - command: > {{openshift.common.client_binary}} diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml index f69bb0f11..9a2ce9267 100644 --- a/roles/openshift_metrics/tasks/stop_metrics.yaml +++ b/roles/openshift_metrics/tasks/stop_metrics.yaml @@ -41,7 +41,7 @@ with_items: "{{metrics_hawkular_rc.stdout_lines}}" loop_control: loop_var: object - changed_when: "{{metrics_hawkular_rc | length > 0 }}" + changed_when: metrics_hawkular_rc | length > 0 - command: > {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig @@ -63,4 +63,4 @@ loop_control: loop_var: object when: metrics_cassandra_rc is defined - changed_when: "{{metrics_cassandra_rc | length > 0 }}" + changed_when: metrics_cassandra_rc | length > 0 diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 8a6be6237..9a5d52eb6 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -8,7 +8,7 @@ delete --ignore-not-found --selector=metrics-infra all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings register: delete_metrics - changed_when: "delete_metrics.stdout != 'No resources found'" + changed_when: delete_metrics.stdout != 'No resources found' - name: remove rolebindings command: > @@ -16,4 +16,4 @@ delete --ignore-not-found rolebinding/hawkular-view clusterrolebinding/heapster-cluster-reader - changed_when: "delete_metrics.stdout != 'No resources found'" + changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/tasks/update_master_config.yaml b/roles/openshift_metrics/tasks/update_master_config.yaml index 20fc45fd4..be1e3c3a0 100644 --- a/roles/openshift_metrics/tasks/update_master_config.yaml +++ b/roles/openshift_metrics/tasks/update_master_config.yaml @@ -4,6 +4,9 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: assetConfig.metricsPublicURL yaml_value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" - notify: restart master + notify: + - restart master + - restart master api + - restart master controllers tags: - - update_master_config + - update_master_config diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2 index f01ccfd58..ab998c2fb 100644 --- a/roles/openshift_metrics/templates/heapster.j2 +++ b/roles/openshift_metrics/templates/heapster.j2 @@ -34,9 +34,9 @@ spec: - "heapster-wrapper.sh" - "--wrapper.allowed_users_file=/secrets/heapster.allowed-users" - "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250" - - "--tls_cert=/secrets/heapster.cert" - - "--tls_key=/secrets/heapster.key" - - "--tls_client_ca=/secrets/heapster.client-ca" + - "--tls_cert=/heapster-certs/tls.crt" + - "--tls_key=/heapster-certs/tls.key" + - "--tls_client_ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - "--allowed_users=%allowed_users%" - "--metric_resolution={{openshift_metrics_resolution}}" {% if not openshift_metrics_heapster_standalone %} @@ -80,6 +80,8 @@ spec: volumeMounts: - name: heapster-secrets mountPath: "/secrets" + - name: heapster-certs + mountPath: "/heapster-certs" {% if not openshift_metrics_heapster_standalone %} - name: hawkular-metrics-certs mountPath: "/hawkular-metrics-certs" @@ -94,6 +96,9 @@ spec: - name: heapster-secrets secret: secretName: heapster-secrets + - name: heapster-certs + secret: + secretName: heapster-certs {% if not openshift_metrics_heapster_standalone %} - name: hawkular-metrics-certs secret: diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2 index 8df89127b..ce0bc2eec 100644 --- a/roles/openshift_metrics/templates/service.j2 +++ b/roles/openshift_metrics/templates/service.j2 @@ -2,6 +2,12 @@ apiVersion: "v1" kind: "Service" metadata: name: "{{obj_name}}" +{% if annotations is defined%} + annotations: +{% for key, value in annotations.iteritems() %} + {{key}}: {{value}} +{% endfor %} +{% endif %} {% if labels is defined%} labels: {% for key, value in labels.iteritems() %} diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 59003bbf9..656874f56 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -38,28 +38,33 @@ - name: Check for swap usage command: grep "^[^#].*swap" /etc/fstab # grep: match any lines which don't begin with '#' and contain 'swap' - # command: swapon --summary - # Alternate option, however if swap entries are in fstab, swap will be - # enabled at boot. Grepping fstab should catch a condition when swap was - # disabled, but the fstab entries were not removed. changed_when: false failed_when: false register: swap_result - # Disable Swap Block +# Disable Swap Block - block: - name: Disable swap command: swapoff --all - name: Remove swap entries from /etc/fstab + replace: + dest: /etc/fstab + regexp: '(^[^#].*swap.*)' + replace: '# \1' + backup: yes + + - name: Add notice about disabling swap lineinfile: dest: /etc/fstab - regexp: 'swap' - state: absent + line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' + state: present - when: swap_result.stdout_lines | length > 0 - # End Disable Swap Block + when: + - swap_result.stdout_lines | length > 0 + - openshift_disable_swap | default(true) | bool +# End Disable Swap Block # We have to add tuned-profiles in the same transaction otherwise we run into depsolving # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging. @@ -142,7 +147,7 @@ - regex: '^AWS_SECRET_ACCESS_KEY=' line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}" no_log: True - when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined" + when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined notify: - restart node diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml index 1aa826c09..502f80434 100644 --- a/roles/openshift_node_certificates/handlers/main.yml +++ b/roles/openshift_node_certificates/handlers/main.yml @@ -6,6 +6,6 @@ - name: restart docker after updating ca trust systemd: - name: docker + name: "{{ openshift.docker.service_name }}" state: restarted when: not openshift_certificates_redeploy | default(false) | bool diff --git a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml index e91891ca9..416cf605a 100644 --- a/roles/openshift_node_upgrade/tasks/docker/upgrade.yml +++ b/roles/openshift_node_upgrade/tasks/docker/upgrade.yml @@ -6,20 +6,6 @@ # - docker_version # - skip_docker_restart -# We need docker service up to remove all the images, but these services will keep -# trying to re-start and thus re-pull the images we're trying to delete. -- name: Stop containerized services - service: name={{ item }} state=stopped - with_items: - - "{{ openshift.common.service_type }}-master" - - "{{ openshift.common.service_type }}-master-api" - - "{{ openshift.common.service_type }}-master-controllers" - - "{{ openshift.common.service_type }}-node" - - etcd_container - - openvswitch - failed_when: false - when: openshift.common.is_containerized | bool - - name: Check Docker image count shell: "docker images -aq | wc -l" register: docker_image_count @@ -45,5 +31,4 @@ - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present -- include: restart.yml - when: not skip_docker_restart | default(False) | bool +# starting docker happens back in ../main.yml where it calls ../restart.yml diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml index 01bd3bf38..94c97d0a5 100644 --- a/roles/openshift_node_upgrade/tasks/main.yml +++ b/roles/openshift_node_upgrade/tasks/main.yml @@ -9,6 +9,28 @@ # - openshift_release # tasks file for openshift_node_upgrade + +- name: Stop node and openvswitch services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift.common.service_type }}-node" + - openvswitch + failed_when: false + +- name: Stop additional containerized services + service: + name: "{{ item }}" + state: stopped + with_items: + - "{{ openshift.common.service_type }}-master" + - "{{ openshift.common.service_type }}-master-controllers" + - "{{ openshift.common.service_type }}-master-api" + - etcd_container + failed_when: false + when: openshift.common.is_containerized | bool + - include: docker/upgrade.yml vars: # We will restart Docker ourselves after everything is ready: @@ -16,7 +38,6 @@ when: - l_docker_upgrade is defined - l_docker_upgrade | bool - - not openshift.common.is_containerized | bool - include: "{{ node_config_hook }}" when: node_config_hook is defined @@ -67,16 +88,6 @@ state: latest when: not openshift.common.is_containerized | bool -- name: Restart openvswitch - systemd: - name: openvswitch - state: started - when: - - not openshift.common.is_containerized | bool - -# Mandatory Docker restart, ensure all containerized services are running: -- include: docker/restart.yml - - name: Update oreg value yedit: src: "{{ openshift.common.config_base }}/node/node-config.yaml" @@ -88,10 +99,6 @@ - name: Check for swap usage command: grep "^[^#].*swap" /etc/fstab # grep: match any lines which don't begin with '#' and contain 'swap' - # command: swapon --summary - # Alternate option, however if swap entries are in fstab, swap will be - # enabled at boot. Grepping fstab should catch a condition when swap was - # disabled, but the fstab entries were not removed. changed_when: false failed_when: false register: swap_result @@ -103,19 +110,25 @@ command: swapoff --all - name: Remove swap entries from /etc/fstab + replace: + dest: /etc/fstab + regexp: '(^[^#].*swap.*)' + replace: '# \1' + backup: yes + + - name: Add notice about disabling swap lineinfile: dest: /etc/fstab - regexp: 'swap' - state: absent + line: '# OpenShift-Ansible Installer disabled swap per overcommit guidelines' + state: present - when: swap_result.stdout_lines | length > 0 + when: + - swap_result.stdout_lines | length > 0 + - openshift_disable_swap | default(true) | bool # End Disable Swap Block -- name: Restart rpm node service - service: - name: "{{ openshift.common.service_type }}-node" - state: restarted - when: not openshift.common.is_containerized | bool +# Restart all services +- include: restart.yml - name: Wait for node to be ready oc_obj: diff --git a/roles/openshift_node_upgrade/tasks/docker/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml index 176fc3c0b..e576228ba 100644 --- a/roles/openshift_node_upgrade/tasks/docker/restart.yml +++ b/roles/openshift_node_upgrade/tasks/restart.yml @@ -6,13 +6,15 @@ # - openshift.master.api_port - name: Restart docker - service: name=docker state=restarted + service: + name: "{{ openshift.docker.service_name }}" + state: restarted - name: Update docker facts openshift_facts: role: docker -- name: Restart containerized services +- name: Start services service: name={{ item }} state=started with_items: - etcd_container @@ -22,7 +24,6 @@ - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" failed_when: false - when: openshift.common.is_containerized | bool - name: Wait for master API to come back online wait_for: diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index 57279c665..b53b6afa1 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -65,6 +65,6 @@ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs register: efs_output - failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr" + failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr check_mode: no when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1 diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 84a0905cc..9a9436fcb 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -40,4 +40,21 @@ - openshift_deployment_type == 'origin' - openshift_enable_origin_repo | default(true) | bool + # Singleton block + - when: r_osr_first_run | default(true) + block: + - name: Ensure clean repo cache in the event repos have been changed manually + debug: + msg: "First run of openshift_repos" + changed_when: true + notify: refresh cache + + - name: Set fact r_osr_first_run false + set_fact: + r_osr_first_run: false + + # Force running ALL handlers now, because we expect repo cache to be cleared + # if changes have been made. + - meta: flush_handlers + when: not ostree_booted.stat.exists diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index cf0fb94c9..7b310dbf8 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -8,10 +8,24 @@ Requirements * Ansible 2.2 +Host Groups +----------- + +The following group is expected to be populated for this role to run: + +* `[glusterfs]` + +Additionally, the following group may be specified either in addition to or +instead of the above group to deploy a GlusterFS cluster for use by a natively +hosted Docker registry: + +* `[glusterfs_registry]` + Role Variables -------------- -From this role: +This role has the following variables that control the integration of a +GlusterFS cluster into a new or existing OpenShift cluster: | Name | Default value | | |--------------------------------------------------|-------------------------|-----------------------------------------| @@ -31,6 +45,25 @@ From this role: | openshift_storage_glusterfs_heketi_url | Undefined | URL for the heketi REST API, dynamically determined in native mode | openshift_storage_glusterfs_heketi_wipe | False | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` +Each role variable also has a corresponding variable to optionally configure a +separate GlusterFS cluster for use as storage for an integrated Docker +registry. These variables start with the prefix +`openshift_storage_glusterfs_registry_` and, for the most part, default to the +values in their corresponding non-registry variables. The following variables +are an exception: + +| Name | Default value | | +|---------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace | registry namespace | Default is to use the hosted registry's namespace, otherwise 'default' +| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters + +Additionally, this role's behavior responds to the following registry-specific +variable: + +| Name | Default value | Description | +|----------------------------------------------|---------------|------------------------------------------------------------------------------| +| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume | + Dependencies ------------ @@ -47,6 +80,7 @@ Example Playbook hosts: oo_first_master roles: - role: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 ``` License diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index ade850747..ebe9ca30b 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -2,7 +2,7 @@ openshift_storage_glusterfs_timeout: 300 openshift_storage_glusterfs_namespace: 'default' openshift_storage_glusterfs_is_native: True -openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}" +openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs' openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" openshift_storage_glusterfs_version: 'latest' openshift_storage_glusterfs_wipe: False @@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: '' openshift_storage_glusterfs_heketi_user_key: '' openshift_storage_glusterfs_heketi_topology_load: True openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_heketi_url: "{{ omit }}" + +openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}" +openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" +openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}" +openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry' +openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}" +openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}" +openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}" +openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" +openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" +openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" +openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" +openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" +openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" +openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" +openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" +openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" +openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}" diff --git a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml index c9945be13..c9945be13 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/deploy-heketi-template.yml diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml index 3f8d8f507..3f8d8f507 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-registry-service.yml diff --git a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml index c66705752..c66705752 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/glusterfs-template.yml diff --git a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml index df045c170..df045c170 100644 --- a/roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml +++ b/roles/openshift_storage_glusterfs/files/v3.6/heketi-template.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml new file mode 100644 index 000000000..fa5fa2cb0 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -0,0 +1,166 @@ +--- +- name: Verify target namespace exists + oc_project: + state: present + name: "{{ glusterfs_namespace }}" + when: glusterfs_is_native or glusterfs_heketi_is_native + +- include: glusterfs_deploy.yml + when: glusterfs_is_native + +- name: Make sure heketi-client is installed + package: name=heketi-client state=present + +- name: Delete pre-existing heketi resources + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: "{{ item.kind }}" + name: "{{ item.name | default(omit) }}" + selector: "{{ item.selector | default(omit) }}" + state: absent + with_items: + - kind: "template,route,service,dc,jobs,secret" + selector: "deploy-heketi" + - kind: "template,route,service,dc" + name: "heketi" + - kind: "svc,ep" + name: "heketi-storage-endpoints" + - kind: "sa" + name: "heketi-service-account" + failed_when: False + when: glusterfs_heketi_wipe + +- name: Wait for deploy-heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=deploy-heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Wait for heketi pods to terminate + oc_obj: + namespace: "{{ glusterfs_namespace }}" + kind: pod + state: list + selector: "glusterfs=heketi-pod" + register: heketi_pod + until: "heketi_pod.results.results[0]['items'] | count == 0" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: glusterfs_heketi_wipe + +- name: Create heketi service account + oc_serviceaccount: + namespace: "{{ glusterfs_namespace }}" + name: heketi-service-account + state: present + when: glusterfs_heketi_is_native + +- name: Add heketi service account to privileged SCC + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: scc + resource_name: privileged + state: present + when: glusterfs_heketi_is_native + +- name: Allow heketi service account to view/edit pods + oc_adm_policy_user: + user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account" + resource_kind: role + resource_name: edit + state: present + when: glusterfs_heketi_is_native + +- name: Check for existing deploy-heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy deploy-heketi + set_fact: + glusterfs_heketi_deploy_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- name: Check for existing heketi pod + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: pod + selector: "glusterfs=heketi-pod" + register: heketi_pod + when: glusterfs_heketi_is_native + +- name: Check if need to deploy heketi + set_fact: + glusterfs_heketi_is_missing: False + when: + - "glusterfs_heketi_is_native" + - "heketi_pod.results.results[0]['items'] | count > 0" + # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True + - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" + +- include: heketi_deploy_part1.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_deploy_is_missing + - glusterfs_heketi_is_missing + +- name: Determine heketi URL + oc_obj: + namespace: "{{ glusterfs_namespace }}" + state: list + kind: ep + selector: "glusterfs in (deploy-heketi-service, heketi-service)" + register: heketi_url + until: + - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" + - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" + delay: 10 + retries: "{{ (glusterfs_timeout / 10) | int }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Set heketi URL + set_fact: + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_url is undefined + +- name: Verify heketi service + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" + changed_when: False + +- name: Generate topology file + template: + src: "{{ openshift.common.examples_content_version }}/topology.json.j2" + dest: "{{ mktemp.stdout }}/topology.json" + when: + - glusterfs_heketi_topology_load + +- name: Load heketi topology + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" + register: topology_load + failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" + when: + - glusterfs_heketi_topology_load + +- include: heketi_deploy_part2.yml + when: + - glusterfs_heketi_is_native + - glusterfs_heketi_is_missing diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml new file mode 100644 index 000000000..451990240 --- /dev/null +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -0,0 +1,22 @@ +--- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_hosts }}" + +- include: glusterfs_common.yml diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 26ca5eebf..579112349 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -1,44 +1,44 @@ --- - assert: - that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1" + that: "glusterfs_nodeselector.keys() | count == 1" msg: Only one GlusterFS nodeselector key pair should be provided - assert: - that: "groups.oo_glusterfs_to_config | count >= 3" + that: "glusterfs_nodes | count >= 3" msg: There must be at least three GlusterFS nodes specified - name: Delete pre-existing GlusterFS resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "template,daemonset" name: glusterfs state: absent - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Unlabel any existing GlusterFS nodes oc_label: name: "{{ item }}" kind: node state: absent - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" with_items: "{{ groups.all }}" - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe - name: Delete pre-existing GlusterFS config file: path: /var/lib/glusterd state: absent delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config }}" - when: openshift_storage_glusterfs_wipe + with_items: "{{ glusterfs_nodes | default([]) }}" + when: glusterfs_wipe - name: Get GlusterFS storage devices state command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}" register: devices_info delegate_to: "{{ item }}" - with_items: "{{ groups.oo_glusterfs_to_config }}" + with_items: "{{ glusterfs_nodes | default([]) }}" failed_when: False - when: openshift_storage_glusterfs_wipe + when: glusterfs_wipe # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. - name: Clear GlusterFS storage device contents @@ -46,12 +46,12 @@ delegate_to: "{{ item.item }}" with_items: "{{ devices_info.results }}" when: - - openshift_storage_glusterfs_wipe + - glusterfs_wipe - item.stdout_lines | count > 0 - name: Add service accounts to privileged SCC oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}" + user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}" resource_kind: scc resource_name: privileged state: present @@ -64,8 +64,8 @@ name: "{{ glusterfs_host }}" kind: node state: add - labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}" - with_items: "{{ groups.oo_glusterfs_to_config }}" + labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" + with_items: "{{ glusterfs_nodes | default([]) }}" loop_control: loop_var: glusterfs_host @@ -76,7 +76,7 @@ - name: Create GlusterFS template oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: glusterfs state: present @@ -85,16 +85,16 @@ - name: Deploy GlusterFS pods oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "glusterfs" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}" + IMAGE_NAME: "{{ glusterfs_image }}" + IMAGE_VERSION: "{{ glusterfs_version }}" - name: Wait for GlusterFS pods oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs-node=pod" @@ -102,6 +102,6 @@ until: - "glusterfs_pods.results.results[0]['items'] | count > 0" # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods - - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count" + - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 9f092d5d5..392f4b65b 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -1,7 +1,30 @@ --- +- set_fact: + glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}" + glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}" + glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}" + glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}" + glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}" + glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}" + glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}" + glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}" + glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}" + glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}" + glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}" + glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}" + glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}" + glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}" + glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}" + glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}" + glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}" + glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}" + +- include: glusterfs_common.yml + when: g_glusterfs_registry_hosts != g_glusterfs_hosts + - name: Delete pre-existing GlusterFS registry resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -23,7 +46,7 @@ - name: Create GlusterFS registry endpoints oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: endpoints name: glusterfs-registry-endpoints @@ -32,7 +55,7 @@ - name: Create GlusterFS registry service oc_obj: - namespace: "{{ openshift.hosted.registry.namespace | default('default') }}" + namespace: "{{ glusterfs_namespace }}" state: present kind: service name: glusterfs-registry-endpoints @@ -40,9 +63,9 @@ - "{{ mktemp.stdout }}/glusterfs-registry-service.yml" - name: Check if GlusterFS registry volume exists - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list" register: registry_volume - name: Create GlusterFS registry volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" - when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}" + when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml index 76ae1db75..c14fcfb15 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml @@ -8,7 +8,7 @@ - name: Create deploy-heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: deploy-heketi state: present @@ -17,18 +17,18 @@ - name: Deploy deploy-heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "deploy-heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for deploy-heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" @@ -38,4 +38,4 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index 84b85e95d..64410a9ab 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -1,6 +1,6 @@ --- - name: Create heketi DB volume - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json" register: setup_storage failed_when: False @@ -13,12 +13,12 @@ # Need `command` here because heketi-storage.json contains multiple objects. - name: Copy heketi DB to GlusterFS volume - command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}" - when: "setup_storage.rc == 0" + command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" + when: setup_storage.rc == 0 - name: Wait for copy job to finish oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: job state: list name: "heketi-storage-copy-job" @@ -28,17 +28,17 @@ # Pod's 'Complete' status must be True - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" failed_when: - "'results' in heketi_job.results" - "heketi_job.results.results | count > 0" # Fail when pod's 'Failed' status is True - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" - when: "setup_storage.rc == 0" + when: setup_storage.rc == 0 - name: Delete deploy resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: "{{ item.kind }}" name: "{{ item.name | default(omit) }}" selector: "{{ item.selector | default(omit) }}" @@ -55,7 +55,7 @@ - name: Create heketi resources oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: template name: heketi state: present @@ -64,18 +64,18 @@ - name: Deploy heketi pod oc_process: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" template_name: "heketi" create: True params: - IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}" - IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}" - HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}" - HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}" + IMAGE_NAME: "{{ glusterfs_heketi_image }}" + IMAGE_VERSION: "{{ glusterfs_heketi_version }}" + HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}" + HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}" - name: Wait for heketi pod oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" kind: pod state: list selector: "glusterfs=heketi-pod" @@ -85,11 +85,11 @@ # Pod's 'Ready' status must be True - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Determine heketi URL oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" + namespace: "{{ glusterfs_namespace }}" state: list kind: ep selector: "glusterfs=heketi-service" @@ -98,12 +98,12 @@ - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" + retries: "{{ (glusterfs_timeout / 10) | int }}" - name: Set heketi URL set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" + glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" + command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list" changed_when: False diff --git a/roles/openshift_storage_glusterfs/tasks/main.yml b/roles/openshift_storage_glusterfs/tasks/main.yml index 265a3cc6e..ebd8db453 100644 --- a/roles/openshift_storage_glusterfs/tasks/main.yml +++ b/roles/openshift_storage_glusterfs/tasks/main.yml @@ -5,174 +5,14 @@ changed_when: False check_mode: no -- name: Verify target namespace exists - oc_project: - state: present - name: "{{ openshift_storage_glusterfs_namespace }}" - when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native - -- include: glusterfs_deploy.yml - when: openshift_storage_glusterfs_is_native - -- name: Make sure heketi-client is installed - package: name=heketi-client state=present - -- name: Delete pre-existing heketi resources - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: "{{ item.kind }}" - name: "{{ item.name | default(omit) }}" - selector: "{{ item.selector | default(omit) }}" - state: absent - with_items: - - kind: "template,route,service,jobs,dc,secret" - selector: "deploy-heketi" - - kind: "template,route,dc,service" - name: "heketi" - - kind: "svc,ep" - name: "heketi-storage-endpoints" - - kind: "sa" - name: "heketi-service-account" - failed_when: False - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for deploy-heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=deploy-heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Wait for heketi pods to terminate - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - kind: pod - state: list - selector: "glusterfs=heketi-pod" - register: heketi_pod - until: "heketi_pod.results.results[0]['items'] | count == 0" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: openshift_storage_glusterfs_heketi_wipe - -- name: Create heketi service account - oc_serviceaccount: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - name: heketi-service-account - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Add heketi service account to privileged SCC - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: scc - resource_name: privileged - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Allow heketi service account to view/edit pods - oc_adm_policy_user: - user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account" - resource_kind: role - resource_name: edit - state: present - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check for existing deploy-heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy deploy-heketi - set_fact: - openshift_storage_glusterfs_heketi_deploy_is_missing: False - when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- name: Check for existing heketi pod - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: pod - selector: "glusterfs=heketi-pod" - register: heketi_pod - when: openshift_storage_glusterfs_heketi_is_native - -- name: Check if need to deploy heketi - set_fact: - openshift_storage_glusterfs_heketi_is_missing: False +- include: glusterfs_config.yml when: - - "openshift_storage_glusterfs_heketi_is_native" - - "heketi_pod.results.results[0]['items'] | count > 0" - # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True - - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" - -- include: heketi_deploy_part1.yml - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_deploy_is_missing - - openshift_storage_glusterfs_heketi_is_missing - -- name: Determine heketi URL - oc_obj: - namespace: "{{ openshift_storage_glusterfs_namespace }}" - state: list - kind: ep - selector: "glusterfs in (deploy-heketi-service, heketi-service)" - register: heketi_url - until: - - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''" - - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''" - delay: 10 - retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Set heketi URL - set_fact: - openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}" - when: - - openshift_storage_glusterfs_heketi_is_native - - openshift_storage_glusterfs_heketi_url is undefined - -- name: Verify heketi service - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list" - changed_when: False - -- name: Generate topology file - template: - src: "{{ openshift.common.examples_content_version }}/topology.json.j2" - dest: "{{ mktemp.stdout }}/topology.json" - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- name: Load heketi topology - command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1" - register: topology_load - failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout" - when: - - openshift_storage_glusterfs_is_native - - openshift_storage_glusterfs_heketi_topology_load - -- include: heketi_deploy_part2.yml - when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing + - g_glusterfs_hosts | default([]) | count > 0 - include: glusterfs_registry.yml - when: "openshift.hosted.registry.storage.kind == 'glusterfs'" + when: + - g_glusterfs_registry_hosts | default([]) | count > 0 + - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap" - name: Delete temp directory file: diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 index d72d085c9..605627ab5 100644 --- a/roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2 @@ -4,7 +4,7 @@ metadata: name: glusterfs-registry-endpoints subsets: - addresses: -{% for node in groups.oo_glusterfs_to_config %} +{% for node in glusterfs_nodes %} - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }} {% endfor %} ports: diff --git a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 index eb5b4544f..33d8f9b36 100644 --- a/roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2 +++ b/roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2 @@ -1,7 +1,7 @@ { "clusters": [ {%- set clusters = {} -%} -{%- for node in groups.oo_glusterfs_to_config -%} +{%- for node in glusterfs_nodes -%} {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%} {%- if cluster in clusters -%} {%- set _dummy = clusters[cluster].append(node) -%} diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index fa9b20e92..d8b1158a6 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -86,8 +86,16 @@ include: set_version_rpm.yml when: not is_containerized | bool -- name: Set openshift_version for containerized installation - include: set_version_containerized.yml +- block: + - name: Set openshift_version for containerized installation + include: set_version_containerized.yml + - name: Determine openshift rpm version + include: rpm_version.yml + - name: Fail if rpm version and docker image version are different + fail: + msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}" + # Both versions have the same string representation + when: openshift_rpm_version != openshift_version when: is_containerized | bool # Warn if the user has provided an openshift_image_tag but is not doing a containerized install diff --git a/roles/openshift_version/tasks/rpm_version.yml b/roles/openshift_version/tasks/rpm_version.yml new file mode 100644 index 000000000..bd5e94b43 --- /dev/null +++ b/roles/openshift_version/tasks/rpm_version.yml @@ -0,0 +1,44 @@ +--- +# input_variables: +# - repoquery_cmd +# - openshift.common.service_type +# output_variables: +# - openshift_rpm_version + +# if {{ openshift.common.service_type}}-excluder is enabled, +# the repoquery for {{ openshift.common.service_type}} will not work. +# Thus, create a temporary yum,conf file where exclude= is set to an empty list +- name: Create temporary yum.conf file + command: mktemp -d /tmp/yum.conf.XXXXXX + register: yum_conf_temp_file_result + +- set_fact: + yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf" + +- name: Copy yum.conf into the temporary file + copy: + src: /etc/yum.conf + dest: "{{ yum_conf_temp_file }}" + remote_src: True + +- name: Clear the exclude= list in the temporary yum.conf + lineinfile: + # since ansible 2.3 s/dest/path + dest: "{{ yum_conf_temp_file }}" + regexp: '^exclude=' + line: 'exclude=' + +- name: Gather common package version + command: > + {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}" + register: common_version + failed_when: false + changed_when: false + +- name: Delete the temporary yum.conf + file: + path: "{{ yum_conf_temp_file_result.stdout }}" + state: absent + +- set_fact: + openshift_rpm_version: "{{ common_version.stdout | default('0.0', True) }}" diff --git a/roles/openshift_version/tasks/set_version_rpm.yml b/roles/openshift_version/tasks/set_version_rpm.yml index c7604af1a..3cf78068b 100644 --- a/roles/openshift_version/tasks/set_version_rpm.yml +++ b/roles/openshift_version/tasks/set_version_rpm.yml @@ -7,42 +7,8 @@ - openshift_pkg_version is defined - openshift_version is not defined -# if {{ openshift.common.service_type}}-excluder is enabled, -# the repoquery for {{ openshift.common.service_type}} will not work. -# Thus, create a temporary yum,conf file where exclude= is set to an empty list -- name: Create temporary yum.conf file - command: mktemp -d /tmp/yum.conf.XXXXXX - register: yum_conf_temp_file_result - -- set_fact: - yum_conf_temp_file: "{{yum_conf_temp_file_result.stdout}}/yum.conf" - -- name: Copy yum.conf into the temporary file - copy: - src: /etc/yum.conf - dest: "{{ yum_conf_temp_file }}" - remote_src: True - -- name: Clear the exclude= list in the temporary yum.conf - lineinfile: - # since ansible 2.3 s/dest/path - dest: "{{ yum_conf_temp_file }}" - regexp: '^exclude=' - line: 'exclude=' - -- name: Gather common package version - command: > - {{ repoquery_cmd }} --config "{{ yum_conf_temp_file }}" --qf '%{version}' "{{ openshift.common.service_type}}" - register: common_version - failed_when: false - changed_when: false - when: openshift_version is not defined - -- name: Delete the temporary yum.conf - file: - path: "{{ yum_conf_temp_file_result.stdout }}" - state: absent - -- set_fact: - openshift_version: "{{ common_version.stdout | default('0.0', True) }}" +- block: + - include: rpm_version.yml + - set_fact: + openshift_version: "{{ openshift_rpm_version }}" when: openshift_version is not defined diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py index 8d4878fa7..aeee3ede8 100755 --- a/roles/os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/os_firewall/library/os_firewall_manage_iptables.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 # pylint: disable=fixme, missing-docstring import subprocess diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml index 4b2979887..509655b0c 100644 --- a/roles/os_firewall/tasks/firewall/firewalld.yml +++ b/roles/os_firewall/tasks/firewall/firewalld.yml @@ -14,7 +14,7 @@ - iptables - ip6tables register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" + failed_when: task_result|failed and 'could not' not in task_result.msg|lower - name: Wait 10 seconds after disabling iptables pause: diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml index 38ea2477c..55f2fc471 100644 --- a/roles/os_firewall/tasks/firewall/iptables.yml +++ b/roles/os_firewall/tasks/firewall/iptables.yml @@ -7,7 +7,7 @@ enabled: no masked: yes register: task_result - failed_when: "task_result|failed and 'could not' not in task_result.msg|lower" + failed_when: task_result|failed and 'could not' not in task_result.msg|lower - name: Wait 10 seconds after disabling firewalld pause: diff --git a/roles/rhel_subscribe/tasks/enterprise.yml b/roles/rhel_subscribe/tasks/enterprise.yml index 41673ee40..ea0c42150 100644 --- a/roles/rhel_subscribe/tasks/enterprise.yml +++ b/roles/rhel_subscribe/tasks/enterprise.yml @@ -7,7 +7,7 @@ when: deployment_type == 'enterprise' - set_fact: - default_ose_version: '3.4' + default_ose_version: '3.5' when: deployment_type in ['atomic-enterprise', 'openshift-enterprise'] - set_fact: @@ -16,10 +16,13 @@ - fail: msg: "{{ ose_version }} is not a valid version for {{ deployment_type }} deployment type" when: ( deployment_type == 'enterprise' and ose_version not in ['3.0'] ) or - ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4'] ) + ( deployment_type in ['atomic-enterprise', 'openshift-enterprise'] and ose_version not in ['3.1', '3.2', '3.3', '3.4', '3.5'] ) - name: Enable RHEL repositories command: subscription-manager repos \ --enable="rhel-7-server-rpms" \ --enable="rhel-7-server-extras-rpms" \ - --enable="rhel-7-server-ose-{{ ose_version }}-rpms" + --enable="rhel-7-server-ose-{{ ose_version }}-rpms" \ + --enable="rhel-7-fast-datapath-rpms" + register: subscribe_repos + until: subscribe_repos | succeeded |