summaryrefslogtreecommitdiffstats
path: root/playbooks/common/openshift-cluster
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common/openshift-cluster')
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml23
-rw-r--r--playbooks/common/openshift-cluster/config.yml64
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml11
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml4
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml127
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml155
-rw-r--r--playbooks/common/openshift-cluster/initialize_oo_option_facts.yml27
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_repos.yml8
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml22
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml11
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml9
-rw-r--r--playbooks/common/openshift-cluster/openshift_provisioners.yml5
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml158
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml8
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters.yml10
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/ca.yml)156
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml10
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml64
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml8
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml20
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml30
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml75
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh (renamed from playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh)0
l---------playbooks/common/openshift-cluster/upgrades/docker/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/restart.yml)6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml)9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml96
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml179
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml60
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml45
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
l---------playbooks/common/openshift-cluster/upgrades/master_docker1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml71
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml23
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml43
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml101
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml51
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml113
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml67
l---------playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml16
l---------playbooks/common/openshift-cluster/upgrades/v3_6/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml12
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml16
l---------playbooks/common/openshift-cluster/upgrades/v3_7/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml23
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml19
86 files changed, 3110 insertions, 898 deletions
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
deleted file mode 100644
index c0ea93d2c..000000000
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Additional master configuration
- hosts: oo_first_master
- vars:
- cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
- etcd_urls: "{{ openshift.master.etcd_urls }}"
- openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"
- omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"
- roles:
- - role: openshift_master_cluster
- when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- - role: openshift_examples
- registry_url: "{{ openshift.master.registry_url }}"
- when: openshift.common.install_examples | bool
- - role: openshift_hosted_templates
- registry_url: "{{ openshift.master.registry_url }}"
- - role: openshift_manageiq
- when: openshift.common.use_manageiq | bool
- - role: cockpit
- when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
- (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- - role: flannel_register
- when: openshift.common.use_flannel | bool
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 113b401f9..5f420a76c 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,38 +1,28 @@
---
-- name: Set oo_option facts
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
hosts: oo_all_hosts
- tags:
- - always
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
- - set_fact:
- openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
- when: openshift_docker_selinux_enabled is not defined
-
-- include: disable_excluder.yml
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+- include: initialize_oo_option_facts.yml
tags:
- always
- include: ../openshift-etcd/config.yml
- tags:
- - etcd
- include: ../openshift-nfs/config.yml
tags:
@@ -43,17 +33,21 @@
- loadbalancer
- include: ../openshift-master/config.yml
- tags:
- - master
-
-- include: additional_config.yml
- tags:
- - master
- include: ../openshift-node/config.yml
tags:
- node
+- include: ../openshift-glusterfs/config.yml
+ tags:
+ - glusterfs
+
- include: openshift_hosted.yml
tags:
- hosted
+
+- include: service_catalog.yml
+ when:
+ - openshift_enable_service_catalog | default(false) | bool
+ tags:
+ - servicecatalog
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
deleted file mode 100644
index eb146bab8..000000000
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Record excluder state and disable
- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_excluder
- tasks_from: status
- - include_role:
- name: openshift_excluder
- tasks_from: unexclude
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index ca5177852..50351588f 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -37,7 +37,7 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: dnsConfig.bindAddress
yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
- notify: restart master
+ notify: restart master api
- meta: flush_handlers
- name: Configure nodes for dnsmasq
@@ -56,8 +56,6 @@
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
roles:
- openshift_node_dnsmasq
post_tasks:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 45a4875a3..c9f37109b 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,29 +5,49 @@
become: no
gather_facts: no
tasks:
- - fail:
- msg: This playbook requires g_etcd_hosts to be set
- when: "{{ g_etcd_hosts is not defined }}"
+ - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
+ fail:
+ msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
+ when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
+ fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
- when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}"
+ when: g_master_hosts is not defined and g_new_master_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
+ fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
- when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}"
+ when: g_node_hosts is not defined and g_new_node_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_lb_hosts required
+ fail:
msg: This playbook requires g_lb_hosts to be set
- when: "{{ g_lb_hosts is not defined }}"
+ when: g_lb_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts required
+ fail:
msg: This playbook requires g_nfs_hosts to be set
- when: "{{ g_nfs_hosts is not defined }}"
+ when: g_nfs_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts is single host
+ fail:
msg: The nfs group must be limited to one host
- when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}"
+ when: g_nfs_hosts | default([]) | length > 1
+
+ - name: Evaluate groups - g_glusterfs_hosts required
+ fail:
+ msg: This playbook requires g_glusterfs_hosts to be set
+ when: g_glusterfs_hosts is not defined
+
+ - name: Evaluate groups - Fail if no etcd hosts group is defined
+ fail:
+ msg: >
+ No etcd hosts defined. Running an all-in-one master is deprecated and
+ will no longer be supported in a future upgrade.
+ when:
+ - g_etcd_hosts | default([]) | length == 0
+ - not openshift_master_unsupported_all_in_one | default(False)
- name: Evaluate oo_all_hosts
add_host:
@@ -47,13 +67,22 @@
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
changed_when: no
- - name: Evaluate oo_etcd_to_config
+ - name: Evaluate oo_first_master
+ add_host:
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ when: g_master_hosts|length > 0
+ changed_when: no
+
+ - name: Evaluate oo_new_etcd_to_config
add_host:
name: "{{ item }}"
- groups: oo_etcd_to_config
+ groups: oo_new_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
+ with_items: "{{ g_new_etcd_hosts | default([]) }}"
changed_when: no
- name: Evaluate oo_masters_to_config
@@ -65,6 +94,41 @@
with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
changed_when: no
+ - name: Evaluate oo_etcd_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_etcd_hosts | default([]) }}"
+ changed_when: no
+
+ - name: Evaluate oo_first_etcd
+ add_host:
+ name: "{{ g_etcd_hosts[0] }}"
+ groups: oo_first_etcd
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ when: g_etcd_hosts|length > 0
+ changed_when: no
+
+ # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+ # The other for backing up which includes the embedded etcd host, there's no need to
+ # upgrade embedded etcd that just happens when the master is updated.
+ - name: Evaluate oo_etcd_hosts_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ changed_when: False
+
+ - name: Evaluate oo_etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
+ changed_when: False
+
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
@@ -82,40 +146,41 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ g_master_hosts | default([]) }}"
- when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}"
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
changed_when: no
- - name: Evaluate oo_first_etcd
+ - name: Evaluate oo_lb_to_config
add_host:
- name: "{{ g_etcd_hosts[0] }}"
- groups: oo_first_etcd
+ name: "{{ item }}"
+ groups: oo_lb_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- when: "{{ g_etcd_hosts|length > 0 }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_lb_hosts | default([]) }}"
changed_when: no
- - name: Evaluate oo_first_master
+ - name: Evaluate oo_nfs_to_config
add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
+ name: "{{ item }}"
+ groups: oo_nfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: "{{ g_master_hosts|length > 0 }}"
+ with_items: "{{ g_nfs_hosts | default([]) }}"
changed_when: no
- - name: Evaluate oo_lb_to_config
+ - name: Evaluate oo_glusterfs_to_config
add_host:
name: "{{ item }}"
- groups: oo_lb_to_config
+ groups: oo_glusterfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_lb_hosts | default([]) }}"
+ with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
changed_when: no
- - name: Evaluate oo_nfs_to_config
+ - name: Evaluate oo_etcd_to_migrate
add_host:
name: "{{ item }}"
- groups: oo_nfs_to_config
+ groups: oo_etcd_to_migrate
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_nfs_hosts | default([]) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 18f99728c..9eaf3bc34 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -6,14 +6,155 @@
- name: Initialize host facts
hosts: oo_all_hosts
- roles:
- - openshift_facts
tasks:
- - openshift_facts:
+ - name: load openshift_facts module
+ include_role:
+ name: openshift_facts
+
+ # TODO: Should this role be refactored into health_checks??
+ - name: Run openshift_sanitize_inventory to set variables
+ include_role:
+ name: openshift_sanitize_inventory
+
+ - name: Detecting Operating System from ostree_booted
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
+ # Locally setup containerized facts for now
+ - name: initialize_facts set fact l_is_atomic
+ set_fact:
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
+
+ - name: initialize_facts set fact for containerized and l_is_*_system_container
+ set_fact:
+ l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
+ - name: initialize_facts set facts for l_any_system_container
+ set_fact:
+ l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
+
+ - name: initialize_facts set fact for l_etcd_runtime
+ set_fact:
+ l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist is fedora and python is v3
+ fail:
+ msg: |
+ openshift-ansible requires Python 3 for {{ ansible_distribution }};
+ For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist not Fedora and python must be v2
+ fail:
+ msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ # Fail as early as possible if Atomic and old version of Docker
+ - when:
+ - l_is_atomic | bool
+ block:
+
+ # See https://access.redhat.com/articles/2317361
+ # and https://github.com/ansible/ansible/issues/15892
+ # NOTE: the "'s can not be removed at this level else the docker command will fail
+ # NOTE: When ansible >2.2.1.x is used this can be updated per
+ # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - name: assert atomic host docker version is 1.12 or later
+ assert:
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
+
+ - when:
+ - not l_is_atomic | bool
+ block:
+ - name: Ensure openshift-ansible installer package deps are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iproute
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}"
+ - PyYAML
+ - yum-utils
+
+ - name: Ensure various deps for running system containers are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - atomic
+ - ostree
+ - runc
+ when:
+ - l_any_system_container | bool
+
+ - name: Default system_images_registry to a enterprise registry
+ set_fact:
+ system_images_registry: "registry.access.redhat.com"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "openshift-enterprise"
+
+ - name: Default system_images_registry to community registry
+ set_fact:
+ system_images_registry: "docker.io"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "origin"
+
+ - name: Gather Cluster facts and set is_containerized if needed
+ openshift_facts:
role: common
local_facts:
+ debug_level: "{{ openshift_debug_level | default(2) }}"
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cli_image: "{{ osm_image | default(None) }}"
+ cluster_id: "{{ openshift_cluster_id | default('default') }}"
hostname: "{{ openshift_hostname | default(None) }}"
- - set_fact:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- - set_fact:
- openshift_deployment_type: "{{ deployment_type }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ is_containerized: "{{ l_is_containerized | default(None) }}"
+ is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+ is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+ is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+ is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ etcd_runtime: "{{ l_etcd_runtime }}"
+ system_images_registry: "{{ system_images_registry }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+ sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}"
+ use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}"
+
+ - name: initialize_facts set_fact repoquery command
+ set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+ - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
+ set_fact:
+ openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
new file mode 100644
index 000000000..ac3c702a0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml
@@ -0,0 +1,27 @@
+---
+- name: Set oo_option facts
+ hosts: oo_all_hosts
+ tags:
+ - always
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
+ - set_fact:
+ openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
+ when: openshift_docker_selinux_enabled is not defined
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
new file mode 100644
index 000000000..a7114fc80
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
@@ -0,0 +1,8 @@
+---
+- name: Setup yum repositories for all hosts
+ hosts: oo_all_hosts
+ gather_facts: no
+ tasks:
+ - name: initialize openshift repos
+ include_role:
+ name: openshift_repos
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 6b40176e1..7112a6084 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,27 +1,5 @@
---
# NOTE: requires openshift_facts be run
-- name: Verify compatible yum/subscription-manager combination
- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- # See:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
- # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
- # https://github.com/openshift/openshift-ansible/issues/1138
- - name: Check for bad combinations of yum and subscription-manager
- command: >
- {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
- register: yum_ver_test
- changed_when: false
- when: not openshift.common.is_atomic | bool
- - fail:
- msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
-
-- include: disable_excluder.yml
- tags:
- - always
-
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 06cda36a5..99a634970 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -26,7 +26,10 @@
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
+ r_openshift_hosted_use_calico: "{{ openshift.common.use_calico | default(false) | bool }}"
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_logging
@@ -53,6 +56,8 @@
pre_tasks:
- set_fact:
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ - set_fact:
+ openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
tasks:
- block:
@@ -60,3 +65,9 @@
name: openshift_logging
tasks_from: update_master_config
when: openshift_hosted_logging_deploy | default(false) | bool
+
+ - block:
+ - include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config
+ when: openshift_hosted_metrics_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index d96a78c4c..c1a5d83cd 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -5,7 +5,7 @@
- openshift_logging
- name: Update Master configs
- hosts: masters:!oo_first_master
+ hosts: oo_masters:!oo_first_master
tasks:
- block:
- include_role:
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index 9f38ceea6..1dc180c26 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -3,3 +3,12 @@
hosts: oo_first_master
roles:
- openshift_metrics
+
+- name: OpenShift Metrics
+ hosts: oo_masters:!oo_first_master
+ serial: 1
+ tasks:
+ - name: Setup the non-first masters configs
+ include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config.yaml
diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/common/openshift-cluster/openshift_provisioners.yml
new file mode 100644
index 000000000..b1ca6f606
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_provisioners.yml
@@ -0,0 +1,5 @@
+---
+- name: OpenShift Provisioners
+ hosts: oo_first_master
+ roles:
+ - openshift_provisioners
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
new file mode 100644
index 000000000..6964e8567
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -0,0 +1,158 @@
+---
+- name: Check cert expirys
+ hosts: oo_etcd_to_config:oo_masters_to_config
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
+
+- name: Backup existing etcd CA certificate directories
+ hosts: oo_etcd_to_config
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Determine if CA certificate directory exists
+ stat:
+ path: "{{ etcd_ca_dir }}"
+ register: etcd_ca_certs_dir_stat
+ - name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_ca_dir }}
+ args:
+ warn: no
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - name: Remove CA certificate directory
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: absent
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+
+- name: Generate new etcd CA
+ hosts: oo_first_etcd
+ roles:
+ - role: openshift_etcd_ca
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_etcd_mktemp
+ changed_when: false
+
+- name: Distribute etcd CA to etcd hosts
+ hosts: oo_etcd_to_config
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Create a tarball of the etcd ca certs
+ command: >
+ tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
+ -C {{ etcd_ca_dir }} .
+ args:
+ creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ warn: no
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Retrieve etcd ca cert tarball
+ fetch:
+ src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Ensure ca directory exists
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: directory
+ - name: Unarchive etcd ca cert tarballs
+ unarchive:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_ca_dir }}"
+ - name: Read current etcd CA
+ slurp:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ register: g_current_etcd_ca_output
+ - name: Read new etcd CA
+ slurp:
+ src: "{{ etcd_ca_dir }}/ca.crt"
+ register: g_new_etcd_ca_output
+ - copy:
+ content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
+ dest: "{{ item }}/ca.crt"
+ with_items:
+ - "{{ etcd_conf_dir }}"
+ - "{{ etcd_ca_dir }}"
+
+- include: ../../openshift-etcd/restart.yml
+ # Do not restart etcd when etcd certificates were previously expired.
+ when: ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
+
+- name: Retrieve etcd CA certificate
+ hosts: oo_first_etcd
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Retrieve etcd CA certificate
+ fetch:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+
+- name: Distribute etcd CA to masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ tasks:
+ - name: Deploy etcd CA
+ copy:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
+ dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
+ when: groups.oo_etcd_to_config | default([]) | length > 0
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file:
+ name: "{{ g_etcd_mktemp.stdout }}"
+ state: absent
+ changed_when: false
+
+- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
index 2963a5940..6b5c805e6 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
@@ -3,7 +3,8 @@
hosts: oo_first_etcd
any_errors_fatal: true
roles:
- - etcd_common
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
post_tasks:
- name: Determine if generated etcd certificates exist
stat:
@@ -27,7 +28,8 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - etcd_common
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
post_tasks:
- name: Backup etcd certificates
command: >
@@ -50,6 +52,7 @@
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Redeploy etcd client certificates for masters
hosts: oo_masters_to_config
@@ -63,4 +66,5 @@
etcd_cert_prefix: "master.etcd-"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
index c30889d64..51b196299 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
@@ -51,3 +51,13 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_certificates_redeploy: true
+ - role: lib_utils
+ post_tasks:
+ - yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: servingInfo.namedCertificates
+ value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}"
+ when:
+ - ('named_certificates' in openshift.master)
+ - openshift.master.named_certificates | default([]) | length > 0
+ - openshift_master_overwrite_named_certificates | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 2af699209..089ae6bbc 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -6,131 +6,17 @@
msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
when: not openshift.common.version_gte_3_2_or_1_2 | bool
-- name: Backup existing etcd CA certificate directories
- hosts: oo_etcd_to_config
- roles:
- - etcd_common
- tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
-
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: false
-
-- name: Distribute etcd CA to etcd hosts
- hosts: oo_etcd_to_config
+- name: Check cert expirys
+ hosts: oo_nodes_to_config:oo_masters_to_config
vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_certificate_expiry_show_all: yes
roles:
- - etcd_common
- tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
-
-- name: Retrieve etcd CA certificate
- hosts: oo_first_etcd
- roles:
- - etcd_common
- tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
-
-- name: Distribute etcd CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy CA certificate, key, bundle and serial
- copy:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
- dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
- when: groups.oo_etcd_to_config | default([]) | length > 0
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_etcd_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../../common/openshift-etcd/restart.yml
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -322,7 +208,17 @@
group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}"
with_items: "{{ client_users }}"
-- include: ../../../common/openshift-master/restart.yml
+- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -371,4 +267,14 @@
state: absent
changed_when: false
-- include: ../../../common/openshift-node/restart.yml
+- include: ../../openshift-node/restart.yml
+ # Do not restart nodes when node certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index 6771cc98d..afd5463b2 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -48,10 +48,6 @@
# Replace dc/docker-registry certificate secret contents if set.
- block:
- - name: Load lib_openshift modules
- include_role:
- name: lib_openshift
-
- name: Retrieve registry service IP
oc_service:
namespace: default
@@ -70,9 +66,13 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
- --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }}
+ {% endif %}
- name: Update registry certificates secret
oc_secret:
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 35eedd5ee..748bbbf91 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -51,7 +51,7 @@
name: router-certs
namespace: default
state: absent
- run_once: true
+ run_once: true
- name: Remove router service annotations
command: >
@@ -67,7 +67,67 @@
service.alpha.openshift.io/serving-cert-secret-name=router-certs
--config={{ mktemp.stdout }}/admin.kubeconfig
-n default
- when: l_router_dc.rc == 0 and 'router-certs' in router_secrets
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined
+
+ - block:
+ - assert:
+ that:
+ - "'certfile' in openshift_hosted_router_certificate"
+ - "'keyfile' in openshift_hosted_router_certificate"
+ - "'cafile' in openshift_hosted_router_certificate"
+ msg: |-
+ openshift_hosted_router_certificate has been set in the inventory but is
+ missing one or more required keys. Ensure that 'certfile', 'keyfile',
+ and 'cafile' keys have been specified for the openshift_hosted_router_certificate
+ inventory variable.
+
+ - name: Read router certificate and key
+ become: no
+ local_action:
+ module: slurp
+ src: "{{ item }}"
+ register: openshift_router_certificate_output
+ # Defaulting dictionary keys to none to avoid deprecation warnings
+ # (future fatal errors) during template evaluation. Dictionary keys
+ # won't be accessed unless openshift_hosted_router_certificate is
+ # defined and has all keys (certfile, keyfile, cafile) which we
+ # check above.
+ with_items:
+ - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
+
+ - name: Write temporary router certificate file
+ copy:
+ content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ mode: 0600
+
+ - name: Write temporary router key file
+ copy:
+ content: "{{ (openshift_router_certificate_output.results
+ | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}"
+ dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ mode: 0600
+
+ - name: Replace router-certs secret
+ shell: >
+ {{ openshift.common.client_binary }} secrets new router-certs
+ tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ --type=kubernetes.io/tls
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ --confirm
+ -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f -
+
+ - name: Remove temporary router certificate and key files
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
+ - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
+ when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined
- name: Redeploy router
command: >
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
deleted file mode 100644
index fe86f4c23..000000000
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Re-enable excluder if it was previously enabled
- hosts: l_oo_all_hosts
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_excluder
- tasks_from: reset
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..599350258
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,20 @@
+---
+
+- name: Update Master configs
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - block:
+ - include_role:
+ name: openshift_service_catalog
+ tasks_from: wire_aggregator
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 078991b12..6cc56889a 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,30 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: no
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - set_fact:
- openshift_deployment_type: "{{ deployment_type }}"
-
- include: evaluate_groups.yml
tags:
- always
@@ -37,6 +11,10 @@
tags:
- node
+- include: initialize_openshift_repos.yml
+ tags:
+ - always
+
- include: initialize_openshift_version.yml
tags:
- always
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
deleted file mode 100644
index b83e4d821..000000000
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Subscribe hosts, update repos and update OS packages
- hosts: oo_hosts_to_update
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
deleted file mode 100644
index 9f7961614..000000000
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
-# restart services. We will unconditionally restart all containerized services
-# because we have to unconditionally restart Docker:
-- set_fact:
- skip_node_svc_handlers: True
-
-- name: Update systemd units
- include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-
-# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
-# play when the node has already been marked schedulable again. (this would look strange
-# in logs otherwise)
-- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
new file mode 100644
index 000000000..800621857
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
new file mode 100644
index 000000000..a66301c0d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
new file mode 100644
index 000000000..7cc13137f
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -0,0 +1,75 @@
+---
+- include: ../../evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+
+- include: ../initialize_nodes_to_upgrade.yml
+
+- name: Check for appropriate Docker versions
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+ - fail:
+ msg: Cannot upgrade Docker on Atomic operating systems.
+ when: openshift.common.is_atomic | bool
+
+ - include: upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool
+
+
+# If a node fails, halt everything, the admin will need to clean up and we
+# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
+# and will not take any action on a node already running the requested docker version.
+- name: Drain and upgrade nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ serial: 1
+ any_errors_fatal: true
+
+ roles:
+ - lib_openshift
+
+ tasks:
+ - name: Mark node unschedulable
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: False
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_unschedulable
+ until: node_unschedulable|succeeded
+ when:
+ - l_docker_upgrade is defined
+ - l_docker_upgrade | bool
+ - inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - name: Drain Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
+
+ - include: tasks/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool
+
+ - name: Set node schedulability
+ oc_adm_manage_node:
+ node: "{{ openshift.node.nodename | lower }}"
+ schedulable: True
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ retries: 10
+ delay: 5
+ register: node_schedulable
+ until: node_schedulable|succeeded
+ when: node_unschedulable|changed
diff --git a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
index 8635eab0d..8635eab0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh
+++ b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/roles b/playbooks/common/openshift-cluster/upgrades/docker/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/docker/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 1b418920f..83f16ac0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -1,6 +1,10 @@
---
- name: Restart docker
service: name=docker state=restarted
+ register: l_docker_restart_docker_in_upgrade_result
+ until: not l_docker_restart_docker_in_upgrade_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -11,7 +15,6 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -24,4 +27,5 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 17f8fc6e9..808cc562c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,7 +4,6 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -32,7 +31,13 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_pb_docker_upgrade_stop_result
+ until: not l_pb_docker_upgrade_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 7ef79afa9..616ba04f8 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,88 +1,14 @@
---
- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}"
+ hosts: oo_etcd_hosts_to_backup
roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- # For non containerized and non embedded we should have the correct version of
- # etcd installed already. So don't do anything.
- #
- # For embedded or containerized we need to use the latest because OCP 3.3 uses
- # a version of etcd that can only be backed up with etcd-3.x and if it's
- # containerized then etcd version may be newer than that on the host so
- # upgrade it.
- #
- # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
- # if you use package there, like this: "Could not find a module for unknown."
- # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
- #
- # TODO - We should refactor all containerized backups to use the containerized
- # version of etcd to perform the backup rather than relying on the host's
- # binaries. Until we do that we'll continue to have problems backing up etcd
- # when atomic host has an older version than the version that's running in the
- # container whether that's embedded or not
- - name: Install latest etcd for containerized or embedded
- package:
- name: etcd
- state: latest
- when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
-
- - name: Generate etcd backup
- command: >
- {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}"
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_backup_tag: etcd_backup_tag
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -91,10 +17,10 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ | oo_select_keys(groups.oo_etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
deleted file mode 100644
index 5f8b59e17..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Get current image
- shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
- register: current_image
-
-- name: Set new_etcd_image
- set_fact:
- new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
-
-- name: Pull new etcd image
- command: "docker pull {{ new_etcd_image }}"
-
-- name: Update to latest etcd image
- replace:
- dest: /etc/systemd/system/etcd_container.service
- regexp: "{{ current_image.stdout }}$"
- replace: "{{ new_etcd_image }}"
-
-- name: Restart etcd_container
- systemd:
- name: etcd_container
- daemon_reload: yes
- state: restarted
-
-## TODO: probably should just move this into the backup playbooks, also this
-## will fail on atomic host. We need to revisit how to do etcd backups there as
-## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
-- name: Upgrade etcd for etcdctl when not atomic
- package: name=etcd state=latest
- when: not openshift.common.is_atomic | bool
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
-
-- name: Store new etcd_image
- openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
deleted file mode 100644
index 30232110e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-# F23 GA'd with etcd 2.0, currently has 2.2 in updates
-# F24 GA'd with etcd-2.2, currently has 2.2 in updates
-# F25 Beta currently has etcd 3.0
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Update etcd
- package:
- name: "etcd"
- state: "latest"
-
-- name: Restart etcd
- service:
- name: etcd
- state: restarted
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
deleted file mode 120000
index 641e04e44..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1 +0,0 @@
-../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index fa86d29fb..64abc54e7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -5,42 +5,19 @@
# mirrored packages on your own because only the GA and latest versions are
# available in the repos. So for Fedora we'll simply skip this, sorry.
-- include: ../../evaluate_groups.yml
- tags:
- - always
-
-# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
-# The other for backing up which includes the embedded etcd host, there's no need to
-# upgrade embedded etcd that just happens when the master is updated.
-- name: Evaluate additional groups for etcd
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
-
- name: Backup etcd before upgrading anything
include: backup.yml
vars:
- backup_tag: "pre-upgrade-"
+ etcd_backup_tag: "pre-upgrade-"
when: openshift_etcd_backup | default(true) | bool
- name: Drop etcdctl profiles
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include: roles/etcd/tasks/etcdctl.yml
+ - include_role:
+ name: etcd_common
+ vars:
+ r_etcd_common_action: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
deleted file mode 100644
index 3a972e8ab..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Update etcd RPM
- package:
- name: etcd-{{ upgrade_version }}*
- state: latest
-
-- name: Restart etcd
- service:
- name: etcd
- state: restarted
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index a9b5b94e6..39e82498d 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -1,119 +1,110 @@
---
- name: Determine etcd version
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- args:
- warn: no
- register: etcd_rpm_version
- failed_when: false
- when: not openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
-# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
-# through hosts, then loop through tasks only when appropriate
-- name: Upgrade to 2.1
- hosts: etcd_hosts_to_upgrade
- serial: 1
+ - block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
+ when:
+ - not openshift.common.is_containerized | bool
+
+ - block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+ when:
+ - openshift.common.is_containerized | bool
+
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.1'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.1'
-- name: Upgrade RPM hosts to 2.2
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.2'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.2'
-- name: Upgrade containerized hosts to 2.2.5
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 2.2.5
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.2.5'
-- name: Upgrade RPM hosts to 2.3
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.3'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.3'
-- name: Upgrade containerized hosts to 2.3.7
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 2.3.7
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.3.7'
-- name: Upgrade RPM hosts to 3.0
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '3.0'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '3.0'
-- name: Upgrade containerized hosts to etcd3 image
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 3.0.15
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '3.0.15'
+
+- include: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '3.1'
+
+- include: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '3.1.3'
- name: Upgrade fedora to latest
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include: fedora_tasks.yml
- when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+ - include_role:
+ name: etcd_upgrade
+ when:
+ - ansible_distribution == 'Fedora'
+ - not openshift.common.is_containerized | bool
- name: Backup etcd
include: backup.yml
vars:
- backup_tag: "post-3.0-"
+ etcd_backup_tag: "post-3.0-"
when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
new file mode 100644
index 000000000..831ca8f57
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
@@ -0,0 +1,17 @@
+---
+# INPUT etcd_upgrade_version
+# INPUT etcd_container_version
+# INPUT openshift.common.is_containerized
+- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
+ hosts: oo_etcd_hosts_to_upgrade
+ serial: 1
+ roles:
+ - role: etcd_upgrade
+ r_etcd_upgrade_action: upgrade
+ r_etcd_upgrade_mechanism: image
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_peer: "{{ openshift.common.hostname }}"
+ when:
+ - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
+ - openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
new file mode 100644
index 000000000..2e79451e0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
@@ -0,0 +1,18 @@
+---
+# INPUT etcd_upgrade_version
+# INPUT etcd_rpm_version
+# INPUT openshift.common.is_containerized
+- name: Upgrade to {{ etcd_upgrade_version }}
+ hosts: oo_etcd_hosts_to_upgrade
+ serial: 1
+ roles:
+ - role: etcd_upgrade
+ r_etcd_upgrade_action: upgrade
+ r_etcd_upgrade_mechanism: rpm
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
+ when:
+ - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
+ - ansible_distribution == 'RedHat'
+ - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index a3b8c489e..0f421928b 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,78 +1,22 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
-
- include: ../evaluate_groups.yml
vars:
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
-- name: Set oo_options
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- - set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
+- include: ../initialize_oo_option_facts.yml
- include: ../initialize_facts.yml
-- name: Ensure clean repo cache in the event repos have been changed manually
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
- args:
- warn: no
-
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
tasks:
- name: Check if iptables is running
command: systemctl status iptables
- ignore_errors: true
changed_when: false
+ failed_when: false
register: service_iptables_status
- name: Set fact os_firewall_use_firewalld FALSE for iptables
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 046535680..72de63070 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -6,27 +6,32 @@
- lib_openshift
tasks:
- - name: Retrieve list of openshift nodes matching upgrade label
- oc_obj:
- state: list
- kind: node
- selector: "{{ openshift_upgrade_nodes_label }}"
- register: nodes_to_upgrade
- when: openshift_upgrade_nodes_label is defined
+ - when: openshift_upgrade_nodes_label is defined
+ block:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
- # We got a list of nodes with the label, now we need to match these with inventory hosts
- # using their openshift.common.hostname fact.
- - name: Map labelled nodes to inventory hosts
- add_host:
- name: "{{ item }}"
- groups: temp_nodes_to_upgrade
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: " {{ groups['oo_nodes_to_config'] }}"
- when:
- - openshift_upgrade_nodes_label is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
- changed_when: false
+ - name: Fail if no nodes match openshift_upgrade_nodes_label
+ fail:
+ msg: "openshift_upgrade_nodes_label was specified but no nodes matched"
+ when: nodes_to_upgrade.results.results[0]['items'] | length == 0
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when:
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
# present, otherwise hit all nodes:
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 673f11889..4eac8b067 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker
deleted file mode 120000
index 6aeca2842..000000000
--- a/playbooks/common/openshift-cluster/upgrades/master_docker
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles/openshift_master/templates/master_docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 6f096f705..d9ddf3860 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -5,10 +5,12 @@
- name: Upgrade default router and default registry
hosts: oo_first_master
vars:
- openshift_deployment_type: "{{ deployment_type }}"
- registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}"
- router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
- oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) |
+ replace ( '${version}', openshift_image_tag ) }}"
+ router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) |
+ replace ( '${version}', openshift_image_tag ) }}"
+ registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') |
+ replace ( '${version}', 'v' ~ openshift.common.short_version ) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -22,7 +24,10 @@
selector: 'router'
register: all_routers
- - set_fact: haproxy_routers="{{ all_routers.results.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+ - set_fact:
+ haproxy_routers: "{{ all_routers.results.results[0]['items'] |
+ oo_pods_match_component(openshift_deployment_type, 'haproxy-router') |
+ oo_select_keys_from_list(['metadata']) }}"
when:
- all_routers.results.returncode == 0
@@ -31,16 +36,15 @@
- all_routers.results.returncode != 0
- name: Update router image to current version
+ oc_edit:
+ kind: dc
+ name: "{{ item['labels']['deploymentconfig'] }}"
+ namespace: "{{ item['namespace'] }}"
+ content:
+ spec.template.spec.containers[0].image: "{{ router_image }}"
+ with_items: "{{ haproxy_routers }}"
when:
- all_routers.results.returncode == 0
- command: >
- {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
- --api-version=v1
- with_items: "{{ haproxy_routers }}"
- # AUDIT:changed_when_note: `false` not being set here. What we
- # need to do is check the current router image version and see if
- # this task needs to be ran.
- name: Check for default registry
oc_obj:
@@ -50,15 +54,34 @@
register: _default_registry
- name: Update registry image to current version
+ oc_edit:
+ kind: dc
+ name: docker-registry
+ namespace: default
+ content:
+ spec.template.spec.containers[0].image: "{{ registry_image }}"
when:
- _default_registry.results.results[0] != {}
- command: >
- {{ oc_cmd }} patch dc/docker-registry -n default -p
- '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
- --api-version=v1
- # AUDIT:changed_when_note: `false` not being set here. What we
- # need to do is check the current registry image version and see
- # if this task needs to be ran.
+
+ - name: Check for registry-console
+ oc_obj:
+ state: list
+ kind: dc
+ name: registry-console
+ register: _registry_console
+ when:
+ - openshift.common.deployment_type != 'origin'
+
+ - name: Update registry-console image to current version
+ oc_edit:
+ kind: dc
+ name: registry-console
+ namespace: default
+ content:
+ spec.template.spec.containers[0].image: "{{ registry_console_image }}"
+ when:
+ - openshift.common.deployment_type != 'origin'
+ - _registry_console.results.results[0] != {}
roles:
- openshift_manageiq
@@ -96,6 +119,12 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- include: ../reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
new file mode 100644
index 000000000..9d8b73cff
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -0,0 +1,20 @@
+---
+# Only check if docker upgrade is required if docker_upgrade is not
+# already set to False.
+- include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+# Additional checks for Atomic hosts:
+
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 06eb5f936..45022cd61 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,23 +9,16 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure Master is running
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- name: Ensure HA Master is running
service:
name: "{{ openshift.common.service_type }}-master-api"
state: started
enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ when: openshift.common.is_containerized | bool
- name: Ensure HA Master is running
service:
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 7646e0fa6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
-
- # Additional checks for Atomic hosts:
-
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
- - fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
new file mode 100644
index 000000000..497709d25
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -0,0 +1,13 @@
+---
+- name: Verify Host Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
deleted file mode 100644
index 354af3cde..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index c83923dae..9b4a8e413 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -1,41 +1,43 @@
---
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
+- name: Fail when OpenShift is not installed
+ fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+- name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
+- when: not openshift.common.is_containerized | bool
+ block:
+ - name: Check latest available OpenShift RPM version
+ repoquery:
+ name: "{{ openshift.common.service_type }}"
+ ignore_excluders: true
+ register: repoquery_out
- - name: Verify containers are available for upgrade
- command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+ - name: Fail when unable to determine available OpenShift RPM version
+ fail:
+ msg: "Unable to determine available OpenShift RPM version"
+ when:
+ - not repoquery_out.results.package_found
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
+ - name: Set fact avail_openshift_version
+ set_fact:
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
- name: Verify OpenShift RPMs are available for upgrade
fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when:
+ - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
- - fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
+- name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when:
+ - deployment_type == 'origin'
+ - openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index df2b664d4..164baca81 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,8 +1,39 @@
---
-# We verified latest rpm available is suitable, so just yum update.
-- name: Upgrade packages
- package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
+# When we update package "a-${version}" and a requires b >= ${version} if we
+# don't specify the version of b yum will choose the latest version of b
+# available and the whole set of dependencies end up at the latest version.
+# Since the package module, unlike the yum module, doesn't flatten a list
+# of packages into one transaction we need to do that explicitly. The ansible
+# core team tells us not to rely on yum module transaction flattening anyway.
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
+# TODO: If the sdn package isn't already installed this will install it, we
+# should fix that
+
+- name: Upgrade master packages
+ package: name={{ master_pkgs | join(',') }} state=present
+ vars:
+ master_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "master"
+ - not openshift.common.is_atomic | bool
+
+- name: Upgrade node packages
+ package: name={{ node_pkgs | join(',') }} state=present
+ vars:
+ node_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "node"
+ - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index fd01a6625..18f10437d 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,17 +2,22 @@
###############################################################################
# Upgrade Masters
###############################################################################
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
+
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade all storage
+ hosts: oo_first_master
tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
+ - name: Upgrade all storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -64,6 +69,7 @@
static: yes
roles:
- openshift_facts
+ - lib_utils
post_tasks:
# Run the pre-upgrade hook if defined:
@@ -85,7 +91,7 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
- - name: Update systemd units
+ - name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
- name: Check for ca-bundle.crt
@@ -113,6 +119,13 @@
state: link
when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
+ - name: Update oreg value
+ yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: 'imageConfig.format'
+ value: "{{ oreg_url | default(oreg_url_master) }}"
+ when: oreg_url is defined or oreg_url_master is defined
+
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
when: openshift_master_upgrade_hook is defined
@@ -133,6 +146,19 @@
- include: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
+ - name: Post master upgrade - Upgrade clusterpolicies storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=clusterpolicies --confirm
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool
+ run_once: true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
- set_fact:
master_update_complete: True
@@ -173,7 +199,11 @@
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- policy reconcile-cluster-roles --additive-only=true --confirm
+ policy reconcile-cluster-roles --additive-only=true --confirm -o name
+ register: reconcile_cluster_role_result
+ changed_when:
+ - reconcile_cluster_role_result.stdout != ''
+ - reconcile_cluster_role_result.rc == 0
run_once: true
- name: Reconcile Cluster Role Bindings
@@ -184,21 +214,45 @@
--exclude-groups=system:authenticated:oauth
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
- --additive-only=true --confirm
+ --additive-only=true --confirm -o name
when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+ register: reconcile_bindings_result
+ changed_when:
+ - reconcile_bindings_result.stdout != ''
+ - reconcile_bindings_result.rc == 0
run_once: true
- name: Reconcile Jenkins Pipeline Role Bindings
command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name
run_once: true
+ register: reconcile_jenkins_role_binding_result
+ changed_when:
+ - reconcile_jenkins_role_binding_result.stdout != ''
+ - reconcile_jenkins_role_binding_result.rc == 0
when: openshift.common.version_gte_3_4_or_1_4 | bool
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
+ {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
+ register: reconcile_scc_result
+ changed_when:
+ - reconcile_scc_result.stdout != ''
+ - reconcile_scc_result.rc == 0
run_once: true
+ - name: Migrate storage post policy reconciliation
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ run_once: true
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool
+
- set_fact:
reconcile_complete: True
@@ -227,15 +281,15 @@
roles:
- openshift_facts
tasks:
- - include: docker/upgrade.yml
+ - include: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -246,7 +300,7 @@
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -257,18 +311,23 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_control_plane_drain_result
+ until: not l_upgrade_control_plane_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
+ - openshift_node_dnsmasq
- openshift_node_upgrade
post_tasks:
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 4e1838c71..c93a5d89c 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -4,7 +4,7 @@
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -15,7 +15,7 @@
# or docker actually needs an upgrade before proceeding. Perhaps best to save this until
# we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- name: Mark node unschedulable
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: False
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -26,18 +26,26 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
+ - openshift_node_dnsmasq
- openshift_node_upgrade
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
- name: Set node schedulability
- oadm_manage_node:
+ oc_adm_manage_node:
node: "{{ openshift.node.nodename | lower }}"
schedulable: True
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -46,7 +54,3 @@
register: node_schedulable
until: node_schedulable|succeeded
when: node_unschedulable|changed
-
-- include: ../reset_excluder.yml
- tags:
- - always
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 88f2ddc78..8558bf3e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -63,31 +63,34 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}"
+ when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
- debug:
msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}"
+ when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
+ when: openshift_master_scheduler_predicates | default(none) is not none
# Handle cases where openshift_master_predicates is not defined
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
- openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != default_predicates_no_region
+ - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
- when: "{{ openshift_master_scheduler_predicates | default(none) is none }}"
+ when: openshift_master_scheduler_predicates | default(none) is none
# Upgrade priorities
@@ -120,31 +123,34 @@
- block:
- debug:
msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}"
+ when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
- debug:
msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}"
+ when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
+ when: openshift_master_scheduler_priorities | default(none) is not none
# Handle cases where openshift_master_priorities is not defined
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
- openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != default_priorities_no_zone
+ - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
- when: "{{ openshift_master_scheduler_priorities | default(none) is none }}"
+ when: openshift_master_scheduler_priorities | default(none) is none
# Update scheduler
@@ -162,5 +168,6 @@
content: "{{ scheduler_config | to_nice_json }}"
dest: "{{ openshift_master_scheduler_conf }}"
backup: true
- when: "{{ openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined }}"
+ when: >
+ openshift_upgrade_scheduler_predicates is defined or
+ openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 68c71a132..d69472fad 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -53,7 +53,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
new file mode 100644
index 000000000..a241ef039
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -0,0 +1,118 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..54c85f0fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,118 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..cee4e9087
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,113 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index 43c2ffcd4..ed89dbe8d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -3,7 +3,7 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'admissionConfig.pluginConfig'
yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "{{ 'admission_plugin_config' in openshift.master }}"
+ when: "'admission_plugin_config' in openshift.master"
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..ae217ba2e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,116 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..d7cb38d03
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,118 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..8531e6045
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
deleted file mode 100644
index 48c69eccd..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
- hosts: oo_first_master
- roles:
- - { role: openshift_cli }
- vars:
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
- tasks:
- - name: Upgrade job storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
- run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
new file mode 100644
index 000000000..a3d0d6305
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -0,0 +1,118 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
new file mode 100644
index 000000000..5fee56615
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -0,0 +1,122 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
new file mode 100644
index 000000000..e29d0f8e6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
new file mode 100644
index 000000000..ae63c9ca9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml
@@ -0,0 +1,67 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+#
+# oc_objectvalidator provides these two checks
+# 1 - SDN Data issues, never seen in the wild but known possible due to code audits
+# https://github.com/openshift/origin/issues/12697
+# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934
+#
+###############################################################################
+- name: Verify 3.5 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
+
+ # What's all this PetSet business about?
+ #
+ # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are
+ # no longer supported. The BETA resource 'StatefulSets' replaces
+ # them. We can't migrate clients PetSets to
+ # StatefulSets. Additionally, Red Hat has never officially supported
+ # these resource types. Sorry users, but if you were using
+ # unsupported resources from the Kube documentation then we can't
+ # help you at this time.
+ #
+ # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229
+ - name: Check if legacy PetSets exist
+ oc_obj:
+ state: list
+ all_namespaces: true
+ kind: petsets
+ register: l_do_petsets_exist
+
+ - name: Fail on unsupported resource migration 'PetSets'
+ fail:
+ msg: >
+ PetSet objects were detected in your cluster. These are an
+ Alpha feature in upstream Kubernetes 1.4 and are not supported
+ by Red Hat. In Kubernetes 1.5, they are replaced by the Beta
+ feature StatefulSets. Red Hat currently does not offer support
+ for either PetSets or StatefulSets.
+
+ Automatically migrating PetSets to StatefulSets in OpenShift
+ Container Platform (OCP) 3.5 is not supported. See the
+ Kubernetes "Upgrading from PetSets to StatefulSets"
+ documentation for additional information:
+
+ https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/
+
+ PetSets MUST be removed before upgrading to OCP 3.5. Red Hat
+ strongly recommends reading the above referenced documentation
+ in its entirety before taking any destructive actions.
+
+ If you want to simply remove all PetSets without manually
+ migrating to StatefulSets, run this command as a user with
+ cluster-admin privileges:
+
+ $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false
+ when:
+ # Search did not fail, valid resource type found
+ - l_do_petsets_exist.results.returncode == 0
+ # Items do exist in the search results
+ - l_do_petsets_exist.results.results.0['items'] | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/roles b/playbooks/common/openshift-cluster/upgrades/v3_6/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..51acd17da
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,122 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..9fe059ac9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,122 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..1b10d4e37
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
new file mode 100644
index 000000000..78c1767b8
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
@@ -0,0 +1,12 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+###############################################################################
+- name: Verify 3.6 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
new file mode 100644
index 000000000..9ec40723a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -0,0 +1,122 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
new file mode 100644
index 000000000..f97f34c3b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -0,0 +1,122 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
new file mode 100644
index 000000000..e95b90cd5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
new file mode 100644
index 000000000..f76fc68d1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -0,0 +1,23 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+###############################################################################
+- name: Verify 3.7 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
+
+ - name: Confirm OpenShift authorization objects are in sync
+ command: >
+ {{ openshift.common.client_binary }} adm migrate authorization
+ when: not openshift.common.version_gte_3_7 | bool
+ changed_when: false
+ register: l_oc_result
+ until: l_oc_result.rc == 0
+ retries: 4
+ delay: 15
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 48cc03b19..be2e6a15a 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -1,16 +1,23 @@
---
-- name: Gather and set facts for node hosts
+- name: Validate node hostnames
hosts: oo_nodes_to_config
- roles:
- - openshift_facts
tasks:
- - shell:
+ - name: Query DNS for IP address of {{ openshift.common.hostname }}
+ shell:
getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
register: lookupip
changed_when: false
failed_when: false
- name: Warn user about bad openshift_hostname values
pause:
- prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ prompt:
+ The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
+ doesn't resolve to an IP address owned by this host. Please set
+ openshift_hostname variable to a hostname that when resolved on the host
+ in question resolves to an IP address matching an interface on this
+ host. This host will fail liveness checks for pods utilizing hostPorts,
+ press ENTER to continue or CTRL-C to abort.
seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
- when: lookupip.stdout not in ansible_all_ipv4_addresses
+ when:
+ - lookupip.stdout != '127.0.0.1'
+ - lookupip.stdout not in ansible_all_ipv4_addresses