summaryrefslogtreecommitdiffstats
path: root/playbooks/common
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/common')
-rw-r--r--playbooks/common/README.md7
-rw-r--r--playbooks/common/openshift-cfme/config.yml44
l---------playbooks/common/openshift-cfme/filter_plugins1
l---------playbooks/common/openshift-cfme/library1
l---------playbooks/common/openshift-cfme/roles1
-rw-r--r--playbooks/common/openshift-cfme/uninstall.yml8
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml12
-rw-r--r--playbooks/common/openshift-checks/health.yml11
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml11
l---------playbooks/common/openshift-checks/roles1
-rw-r--r--playbooks/common/openshift-cluster/config.yml44
-rw-r--r--playbooks/common/openshift-cluster/disable_excluder.yml17
-rw-r--r--playbooks/common/openshift-cluster/enable_dnsmasq.yml8
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml116
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml149
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_repos.yml8
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml29
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml11
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml9
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml158
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml8
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/masters.yml10
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml (renamed from playbooks/common/openshift-cluster/redeploy-certificates/ca.yml)152
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/registry.yml1
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/router.yml3
-rw-r--r--playbooks/common/openshift-cluster/reset_excluder.yml8
-rw-r--r--playbooks/common/openshift-cluster/sanity_checks.yml51
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml20
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml8
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/update_repos_and_packages.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_excluder.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/restart.yml)6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml (renamed from playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml)9
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml92
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml46
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml23
l---------playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/main.yml35
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml179
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml17
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml45
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
l---------playbooks/common/openshift-cluster/upgrades/master_docker1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml30
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml29
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml11
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml23
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml56
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml71
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml35
l---------playbooks/common/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml113
l---------playbooks/common/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml116
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml118
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml4
l---------playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml16
l---------playbooks/common/openshift-cluster/upgrades/v3_7/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml122
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml111
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml23
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml19
-rw-r--r--playbooks/common/openshift-etcd/config.yml2
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml147
-rw-r--r--playbooks/common/openshift-etcd/restart.yml2
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml74
-rw-r--r--playbooks/common/openshift-etcd/service.yml23
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml37
-rw-r--r--playbooks/common/openshift-glusterfs/registry.yml49
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml2
-rw-r--r--playbooks/common/openshift-loadbalancer/service.yml23
-rw-r--r--playbooks/common/openshift-master/additional_config.yml (renamed from playbooks/common/openshift-cluster/additional_config.yml)6
-rw-r--r--playbooks/common/openshift-master/config.yml127
-rw-r--r--playbooks/common/openshift-master/restart.yml2
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml1
-rw-r--r--playbooks/common/openshift-master/restart_services.yml6
-rw-r--r--playbooks/common/openshift-master/scaleup.yml25
-rw-r--r--playbooks/common/openshift-master/service.yml23
-rw-r--r--playbooks/common/openshift-nfs/config.yml2
-rw-r--r--playbooks/common/openshift-nfs/service.yml21
-rw-r--r--playbooks/common/openshift-node/config.yml53
-rw-r--r--playbooks/common/openshift-node/network_manager.yml4
-rw-r--r--playbooks/common/openshift-node/restart.yml8
-rw-r--r--playbooks/common/openshift-node/scaleup.yml38
-rw-r--r--playbooks/common/openshift-node/service.yml26
118 files changed, 3411 insertions, 1081 deletions
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
index 0b5e26989..968bd99cb 100644
--- a/playbooks/common/README.md
+++ b/playbooks/common/README.md
@@ -1,9 +1,8 @@
# Common playbooks
This directory has a generic set of playbooks that are included by playbooks in
-[`byo`](../byo), as well as other playbooks related to the
-[`bin/cluster`](../../bin) tool.
+[`byo`](../byo).
Note: playbooks in this directory use generic group names that do not line up
-with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
-requiring an explicit remapping of groups.
+with the groups used by the `byo` playbooks, requiring an explicit remapping of
+groups.
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml
new file mode 100644
index 000000000..533a35d9e
--- /dev/null
+++ b/playbooks/common/openshift-cfme/config.yml
@@ -0,0 +1,44 @@
+---
+# TODO: Make this work. The 'name' variable below is undefined
+# presently because it's part of the cfme role. This play can't run
+# until that's re-worked.
+#
+# - name: Pre-Pull manageiq-pods docker images
+# hosts: nodes
+# tasks:
+# - name: Ensure the latest manageiq-pods docker image is pulling
+# docker_image:
+# name: "{{ openshift_cfme_container_image }}"
+# # Fire-and-forget method, never timeout
+# async: 99999999999
+# # F-a-f, never check on this. True 'background' task.
+# poll: 0
+
+- name: Configure Masters for CFME Bulk Image Imports
+ hosts: oo_masters_to_config
+ serial: 1
+ tasks:
+ - name: Run master cfme tuning playbook
+ include_role:
+ name: openshift_cfme
+ tasks_from: tune_masters
+
+- name: Setup CFME
+ hosts: oo_first_master
+ vars:
+ r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}"
+ pre_tasks:
+ - name: Create a temporary place to evaluate the PV templates
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: r_openshift_cfme_mktemp
+ changed_when: false
+ - name: Ensure the server template was read from disk
+ debug:
+ msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}"
+
+ tasks:
+ - name: Run the CFME Setup Role
+ include_role:
+ name: openshift_cfme
+ vars:
+ template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/common/openshift-cfme/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library
new file mode 120000
index 000000000..ba40d2f56
--- /dev/null
+++ b/playbooks/common/openshift-cfme/library
@@ -0,0 +1 @@
+../../../library \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-cfme/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml
new file mode 100644
index 000000000..78b8e7668
--- /dev/null
+++ b/playbooks/common/openshift-cfme/uninstall.yml
@@ -0,0 +1,8 @@
+---
+- name: Uninstall CFME
+ hosts: masters
+ tasks:
+ - name: Run the CFME Uninstall Role Tasks
+ include_role:
+ name: openshift_cfme
+ tasks_from: uninstall
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..dfcef8435
--- /dev/null
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -0,0 +1,12 @@
+---
+- name: OpenShift health checks
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ post_tasks:
+ - name: Run health checks
+ action: openshift_health_check
+ args:
+ checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
new file mode 100644
index 000000000..21ea785ef
--- /dev/null
+++ b/playbooks/common/openshift-checks/health.yml
@@ -0,0 +1,11 @@
+---
+- name: Run OpenShift health checks
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: health
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks: ['@health']
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..88e6f9120
--- /dev/null
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -0,0 +1,11 @@
+---
+- name: run OpenShift pre-install checks
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: pre-install
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks: ['@preflight']
diff --git a/playbooks/common/openshift-checks/roles b/playbooks/common/openshift-checks/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-checks/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 239bb211b..bbd5a0185 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,15 +1,37 @@
---
+# TODO: refactor this into its own include
+# and pass a variable for ctx
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
- include: initialize_oo_option_facts.yml
tags:
- always
-- include: disable_excluder.yml
- tags:
- - always
+- name: Set hostname
+ hosts: oo_masters_to_config:oo_nodes_to_config
+ tasks:
+ # TODO: switch back to hostname module once we depend on ansible-2.4
+ # https://github.com/ansible/ansible/pull/25906
+ - name: Set hostname
+ command: "hostnamectl set-hostname {{ openshift.common.hostname }}"
+ when: openshift_set_hostname | default(false,true) | bool
- include: ../openshift-etcd/config.yml
- tags:
- - etcd
- include: ../openshift-nfs/config.yml
tags:
@@ -20,12 +42,8 @@
- loadbalancer
- include: ../openshift-master/config.yml
- tags:
- - master
-- include: additional_config.yml
- tags:
- - master
+- include: ../openshift-master/additional_config.yml
- include: ../openshift-node/config.yml
tags:
@@ -39,6 +57,8 @@
tags:
- hosted
-- include: reset_excluder.yml
+- include: service_catalog.yml
+ when:
+ - openshift_enable_service_catalog | default(false) | bool
tags:
- - always
+ - servicecatalog
diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml
deleted file mode 100644
index f664c51c9..000000000
--- a/playbooks/common/openshift-cluster/disable_excluder.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Disable excluders
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
-
- # During installation the excluders are installed with present state.
- # So no pre-validation check here as the excluders are either to be installed (present = latest)
- # or they are not going to be updated if already installed
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: present
- docker_excluder_package_state: present
diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
index 5425f448f..be14b06f0 100644
--- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml
@@ -27,9 +27,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: master
local_facts:
dns_port: '8053'
@@ -37,7 +34,7 @@
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: dnsConfig.bindAddress
yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}"
- notify: restart master
+ notify: restart master api
- meta: flush_handlers
- name: Configure nodes for dnsmasq
@@ -50,9 +47,6 @@
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- - role: common
- local_facts:
- use_dnsmasq: True
- role: node
local_facts:
dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 4331bc88d..e55b2f964 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -5,34 +5,54 @@
become: no
gather_facts: no
tasks:
- - fail:
- msg: This playbook requires g_etcd_hosts to be set
- when: g_etcd_hosts is not defined
+ - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required
+ fail:
+ msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set
+ when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_master_hosts or g_new_master_hosts required
+ fail:
msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
when: g_master_hosts is not defined and g_new_master_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_node_hosts or g_new_node_hosts required
+ fail:
msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
when: g_node_hosts is not defined and g_new_node_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_lb_hosts required
+ fail:
msg: This playbook requires g_lb_hosts to be set
when: g_lb_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts required
+ fail:
msg: This playbook requires g_nfs_hosts to be set
when: g_nfs_hosts is not defined
- - fail:
+ - name: Evaluate groups - g_nfs_hosts is single host
+ fail:
msg: The nfs group must be limited to one host
- when: (groups[g_nfs_hosts] | default([])) | length > 1
+ when: g_nfs_hosts | default([]) | length > 1
- - fail:
+ - name: Evaluate groups - g_glusterfs_hosts required
+ fail:
msg: This playbook requires g_glusterfs_hosts to be set
when: g_glusterfs_hosts is not defined
+ - name: Evaluate groups - Fail if no etcd hosts group is defined
+ fail:
+ msg: >
+ Running etcd as an embedded service is no longer supported. If this is a
+ new install please define an 'etcd' group with either one or three
+ hosts. These hosts may be the same hosts as your masters. If this is an
+ upgrade you may set openshift_master_unsupported_embedded_etcd=true
+ until a migration playbook becomes available.
+ when:
+ - g_etcd_hosts | default([]) | length not in [3,1]
+ - not openshift_master_unsupported_embedded_etcd | default(False)
+ - not openshift_node_bootstrap | default(False)
+
- name: Evaluate oo_all_hosts
add_host:
name: "{{ item }}"
@@ -51,42 +71,40 @@
with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}"
changed_when: no
- - name: Evaluate oo_etcd_to_config
+ - name: Evaluate oo_first_master
add_host:
- name: "{{ item }}"
- groups: oo_etcd_to_config
+ name: "{{ g_master_hosts[0] }}"
+ groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_etcd_hosts | default([]) }}"
+ when: g_master_hosts|length > 0
changed_when: no
- - name: Evaluate oo_masters_to_config
+ - name: Evaluate oo_new_etcd_to_config
add_host:
name: "{{ item }}"
- groups: oo_masters_to_config
+ groups: oo_new_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
+ with_items: "{{ g_new_etcd_hosts | default([]) }}"
changed_when: no
- - name: Evaluate oo_nodes_to_config
+ - name: Evaluate oo_masters_to_config
add_host:
name: "{{ item }}"
- groups: oo_nodes_to_config
+ groups: oo_masters_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+ with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}"
changed_when: no
- # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
- - name: Add master to oo_nodes_to_config
+ - name: Evaluate oo_etcd_to_config
add_host:
name: "{{ item }}"
- groups: oo_nodes_to_config
+ groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_master_hosts | default([]) }}"
- when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
+ with_items: "{{ g_etcd_hosts | default([]) }}"
changed_when: no
- name: Evaluate oo_first_etcd
@@ -94,16 +112,45 @@
name: "{{ g_etcd_hosts[0] }}"
groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
when: g_etcd_hosts|length > 0
changed_when: no
- - name: Evaluate oo_first_master
+ # We use two groups one for hosts we're upgrading which doesn't include embedded etcd
+ # The other for backing up which includes the embedded etcd host, there's no need to
+ # upgrade embedded etcd that just happens when the master is updated.
+ - name: Evaluate oo_etcd_hosts_to_upgrade
add_host:
- name: "{{ g_master_hosts[0] }}"
- groups: oo_first_master
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_upgrade
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
+ changed_when: False
+
+ - name: Evaluate oo_etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_hosts_to_backup
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}"
+ changed_when: False
+
+ - name: Evaluate oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- when: g_master_hosts|length > 0
+ with_items: "{{ g_new_node_hosts | default(g_node_hosts | default([], true), true) }}"
+ changed_when: no
+
+ # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
+ - name: Add master to oo_nodes_to_config
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ g_master_hosts | default([]) }}"
+ when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
changed_when: no
- name: Evaluate oo_lb_to_config
@@ -130,5 +177,14 @@
groups: oo_glusterfs_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: "{{ g_glusterfs_hosts | default([]) }}"
+ with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"
+ changed_when: no
+
+ - name: Evaluate oo_etcd_to_migrate
+ add_host:
+ name: "{{ item }}"
+ groups: oo_etcd_to_migrate
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}"
changed_when: no
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 9cebecd68..0723575c2 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -6,12 +6,151 @@
- name: Initialize host facts
hosts: oo_all_hosts
- roles:
- - openshift_facts
tasks:
- - openshift_facts:
+ - name: load openshift_facts module
+ include_role:
+ name: openshift_facts
+
+ # TODO: Should this role be refactored into health_checks??
+ - name: Run openshift_sanitize_inventory to set variables
+ include_role:
+ name: openshift_sanitize_inventory
+
+ - name: Detecting Operating System from ostree_booted
+ stat:
+ path: /run/ostree-booted
+ register: ostree_booted
+
+ # Locally setup containerized facts for now
+ - name: initialize_facts set fact l_is_atomic
+ set_fact:
+ l_is_atomic: "{{ ostree_booted.stat.exists }}"
+
+ - name: initialize_facts set fact for containerized and l_is_*_system_container
+ set_fact:
+ l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
+ l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+ l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
+ - name: initialize_facts set facts for l_any_system_container
+ set_fact:
+ l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}"
+
+ - name: initialize_facts set fact for l_etcd_runtime
+ set_fact:
+ l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist is fedora and python is v3
+ fail:
+ msg: |
+ openshift-ansible requires Python 3 for {{ ansible_distribution }};
+ For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_python['version']['major'] != 3
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ - name: Validate python version - ans_dist not Fedora and python must be v2
+ fail:
+ msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}"
+ when:
+ - ansible_distribution != 'Fedora'
+ - ansible_python['version']['major'] != 2
+
+ # TODO: Should this be moved into health checks??
+ # Seems as though any check that happens with a corresponding fail should move into health_checks
+ # Fail as early as possible if Atomic and old version of Docker
+ - when:
+ - l_is_atomic | bool
+ block:
+
+ # See https://access.redhat.com/articles/2317361
+ # and https://github.com/ansible/ansible/issues/15892
+ # NOTE: the "'s can not be removed at this level else the docker command will fail
+ # NOTE: When ansible >2.2.1.x is used this can be updated per
+ # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121
+ - name: Determine Atomic Host Docker Version
+ shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
+ register: l_atomic_docker_version
+
+ - name: assert atomic host docker version is 1.12 or later
+ assert:
+ that:
+ - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+ msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
+
+ - when:
+ - not l_is_atomic | bool
+ block:
+ - name: Ensure openshift-ansible installer package deps are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iproute
+ - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}"
+ - PyYAML
+ - yum-utils
+
+ - name: Ensure various deps for running system containers are installed
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - atomic
+ - ostree
+ - runc
+ when:
+ - l_any_system_container | bool
+
+ - name: Default system_images_registry to a enterprise registry
+ set_fact:
+ system_images_registry: "registry.access.redhat.com"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "openshift-enterprise"
+
+ - name: Default system_images_registry to community registry
+ set_fact:
+ system_images_registry: "docker.io"
+ when:
+ - system_images_registry is not defined
+ - openshift_deployment_type == "origin"
+
+ - name: Gather Cluster facts and set is_containerized if needed
+ openshift_facts:
role: common
local_facts:
+ deployment_type: "{{ openshift_deployment_type }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}"
+ cli_image: "{{ osm_image | default(None) }}"
hostname: "{{ openshift_hostname | default(None) }}"
- - set_fact:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ ip: "{{ openshift_ip | default(None) }}"
+ is_containerized: "{{ l_is_containerized | default(None) }}"
+ is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}"
+ is_node_system_container: "{{ l_is_node_system_container | default(false) }}"
+ is_master_system_container: "{{ l_is_master_system_container | default(false) }}"
+ is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}"
+ etcd_runtime: "{{ l_etcd_runtime }}"
+ system_images_registry: "{{ system_images_registry }}"
+ public_hostname: "{{ openshift_public_hostname | default(None) }}"
+ public_ip: "{{ openshift_public_ip | default(None) }}"
+ portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}"
+ http_proxy: "{{ openshift_http_proxy | default(None) }}"
+ https_proxy: "{{ openshift_https_proxy | default(None) }}"
+ no_proxy: "{{ openshift_no_proxy | default(None) }}"
+ generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
+ no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+
+ - name: initialize_facts set_fact repoquery command
+ set_fact:
+ repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
+ - name: initialize_facts set_fact on openshift_docker_hosted_registry_network
+ set_fact:
+ openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
new file mode 100644
index 000000000..a7114fc80
--- /dev/null
+++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml
@@ -0,0 +1,8 @@
+---
+- name: Setup yum repositories for all hosts
+ hosts: oo_all_hosts
+ gather_facts: no
+ tasks:
+ - name: initialize openshift repos
+ include_role:
+ name: openshift_repos
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 88f82f6f2..1b186f181 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,23 +1,13 @@
---
-# NOTE: requires openshift_facts be run
-- name: Verify compatible yum/subscription-manager combination
- hosts: l_oo_all_hosts
- gather_facts: no
+- name: Set version_install_base_package true on masters and nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config
tasks:
- # See:
- # https://bugzilla.redhat.com/show_bug.cgi?id=1395047
- # https://bugzilla.redhat.com/show_bug.cgi?id=1282961
- # https://github.com/openshift/openshift-ansible/issues/1138
- - name: Check for bad combinations of yum and subscription-manager
- command: >
- {{ repoquery_cmd }} --installed --qf '%{version}' "yum"
- register: yum_ver_test
- changed_when: false
- when: not openshift.common.is_atomic | bool
- - fail:
- msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
- when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
+ - name: Set version_install_base_package true
+ set_fact:
+ version_install_base_package: True
+ when: version_install_base_package is not defined
+# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
roles:
@@ -30,5 +20,10 @@
hosts: oo_all_hosts:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+ pre_tasks:
+ - set_fact:
+ openshift_pkg_version: -{{ openshift_version }}
+ when: openshift_pkg_version is not defined
+ - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}"
roles:
- openshift_version
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 5db71b857..75339f6df 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -26,6 +26,8 @@
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
+ - role: openshift_default_storage_class
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
- role: openshift_hosted
- role: openshift_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
@@ -46,6 +48,9 @@
- role: cockpit-ui
when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool)
+ - role: openshift_prometheus
+ when: openshift_hosted_prometheus_deploy | default(false) | bool
+
- name: Update master-config for publicLoggingURL
hosts: oo_masters_to_config:!oo_first_master
tags:
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 57580406c..c1a5d83cd 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,6 +1,4 @@
---
-- include: evaluate_groups.yml
-
- name: OpenShift Aggregated Logging
hosts: oo_first_master
roles:
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index bcff4a1a1..1dc180c26 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,7 +1,14 @@
---
-- include: evaluate_groups.yml
-
- name: OpenShift Metrics
hosts: oo_first_master
roles:
- openshift_metrics
+
+- name: OpenShift Metrics
+ hosts: oo_masters:!oo_first_master
+ serial: 1
+ tasks:
+ - name: Setup the non-first masters configs
+ include_role:
+ name: openshift_metrics
+ tasks_from: update_master_config.yaml
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
new file mode 100644
index 000000000..a979c0c00
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -0,0 +1,9 @@
+---
+- include: std_include.yml
+
+- name: OpenShift Prometheus
+ hosts: oo_first_master
+ roles:
+ - openshift_prometheus
+ vars:
+ openshift_prometheus_state: present
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
new file mode 100644
index 000000000..6964e8567
--- /dev/null
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml
@@ -0,0 +1,158 @@
+---
+- name: Check cert expirys
+ hosts: oo_etcd_to_config:oo_masters_to_config
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
+
+- name: Backup existing etcd CA certificate directories
+ hosts: oo_etcd_to_config
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Determine if CA certificate directory exists
+ stat:
+ path: "{{ etcd_ca_dir }}"
+ register: etcd_ca_certs_dir_stat
+ - name: Backup generated etcd certificates
+ command: >
+ tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ {{ etcd_ca_dir }}
+ args:
+ warn: no
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+ - name: Remove CA certificate directory
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: absent
+ when: etcd_ca_certs_dir_stat.stat.exists | bool
+
+- name: Generate new etcd CA
+ hosts: oo_first_etcd
+ roles:
+ - role: openshift_etcd_ca
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+
+- name: Create temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_etcd_mktemp
+ changed_when: false
+
+- name: Distribute etcd CA to etcd hosts
+ hosts: oo_etcd_to_config
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Create a tarball of the etcd ca certs
+ command: >
+ tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
+ -C {{ etcd_ca_dir }} .
+ args:
+ creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ warn: no
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Retrieve etcd ca cert tarball
+ fetch:
+ src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ delegate_to: "{{ etcd_ca_host }}"
+ run_once: true
+ - name: Ensure ca directory exists
+ file:
+ path: "{{ etcd_ca_dir }}"
+ state: directory
+ - name: Unarchive etcd ca cert tarballs
+ unarchive:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
+ dest: "{{ etcd_ca_dir }}"
+ - name: Read current etcd CA
+ slurp:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ register: g_current_etcd_ca_output
+ - name: Read new etcd CA
+ slurp:
+ src: "{{ etcd_ca_dir }}/ca.crt"
+ register: g_new_etcd_ca_output
+ - copy:
+ content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
+ dest: "{{ item }}/ca.crt"
+ with_items:
+ - "{{ etcd_conf_dir }}"
+ - "{{ etcd_ca_dir }}"
+
+- include: ../../openshift-etcd/restart.yml
+ # Do not restart etcd when etcd certificates were previously expired.
+ when: ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
+
+- name: Retrieve etcd CA certificate
+ hosts: oo_first_etcd
+ roles:
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ tasks:
+ - name: Retrieve etcd CA certificate
+ fetch:
+ src: "{{ etcd_conf_dir }}/ca.crt"
+ dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+
+- name: Distribute etcd CA to masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ tasks:
+ - name: Deploy etcd CA
+ copy:
+ src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
+ dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
+ when: groups.oo_etcd_to_config | default([]) | length > 0
+
+- name: Delete temporary directory on localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - file:
+ name: "{{ g_etcd_mktemp.stdout }}"
+ state: absent
+ changed_when: false
+
+- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
index 2963a5940..6b5c805e6 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml
@@ -3,7 +3,8 @@
hosts: oo_first_etcd
any_errors_fatal: true
roles:
- - etcd_common
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
post_tasks:
- name: Determine if generated etcd certificates exist
stat:
@@ -27,7 +28,8 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
- - etcd_common
+ - role: etcd_common
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
post_tasks:
- name: Backup etcd certificates
command: >
@@ -50,6 +52,7 @@
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Redeploy etcd client certificates for masters
hosts: oo_masters_to_config
@@ -63,4 +66,5 @@
etcd_cert_prefix: "master.etcd-"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
index c30889d64..51b196299 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml
@@ -51,3 +51,13 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_certificates_redeploy: true
+ - role: lib_utils
+ post_tasks:
+ - yedit:
+ src: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ key: servingInfo.namedCertificates
+ value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}"
+ when:
+ - ('named_certificates' in openshift.master)
+ - openshift.master.named_certificates | default([]) | length > 0
+ - openshift_master_overwrite_named_certificates | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 4fa7f9cdf..089ae6bbc 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -6,131 +6,17 @@
msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
when: not openshift.common.version_gte_3_2_or_1_2 | bool
-- name: Backup existing etcd CA certificate directories
- hosts: oo_etcd_to_config
- roles:
- - etcd_common
- tasks:
- - name: Determine if CA certificate directory exists
- stat:
- path: "{{ etcd_ca_dir }}"
- register: etcd_ca_certs_dir_stat
- - name: Backup generated etcd certificates
- command: >
- tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz
- {{ etcd_ca_dir }}
- args:
- warn: no
- when: etcd_ca_certs_dir_stat.stat.exists | bool
- - name: Remove CA certificate directory
- file:
- path: "{{ etcd_ca_dir }}"
- state: absent
- when: etcd_ca_certs_dir_stat.stat.exists | bool
-
-- name: Generate new etcd CA
- hosts: oo_first_etcd
- roles:
- - role: openshift_etcd_ca
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_etcd_mktemp
- changed_when: false
-
-- name: Distribute etcd CA to etcd hosts
- hosts: oo_etcd_to_config
+- name: Check cert expirys
+ hosts: oo_nodes_to_config:oo_masters_to_config
vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_certificate_expiry_show_all: yes
roles:
- - etcd_common
- tasks:
- - name: Create a tarball of the etcd ca certs
- command: >
- tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz
- -C {{ etcd_ca_dir }} .
- args:
- creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- warn: no
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Retrieve etcd ca cert tarball
- fetch:
- src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- delegate_to: "{{ etcd_ca_host }}"
- run_once: true
- - name: Ensure ca directory exists
- file:
- path: "{{ etcd_ca_dir }}"
- state: directory
- - name: Unarchive etcd ca cert tarballs
- unarchive:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_ca_dir }}"
- - name: Read current etcd CA
- slurp:
- src: "{{ etcd_conf_dir }}/ca.crt"
- register: g_current_etcd_ca_output
- - name: Read new etcd CA
- slurp:
- src: "{{ etcd_ca_dir }}/ca.crt"
- register: g_new_etcd_ca_output
- - copy:
- content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}"
- dest: "{{ item }}/ca.crt"
- with_items:
- - "{{ etcd_conf_dir }}"
- - "{{ etcd_ca_dir }}"
-
-- name: Retrieve etcd CA certificate
- hosts: oo_first_etcd
- roles:
- - etcd_common
- tasks:
- - name: Retrieve etcd CA certificate
- fetch:
- src: "{{ etcd_conf_dir }}/ca.crt"
- dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
-
-- name: Distribute etcd CA to masters
- hosts: oo_masters_to_config
- vars:
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- tasks:
- - name: Deploy CA certificate, key, bundle and serial
- copy:
- src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt"
- dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt"
- when: groups.oo_etcd_to_config | default([]) | length > 0
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file:
- name: "{{ g_etcd_mktemp.stdout }}"
- state: absent
- changed_when: false
-
-- include: ../../openshift-etcd/restart.yml
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -323,6 +209,16 @@
with_items: "{{ client_users }}"
- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -372,3 +268,13 @@
changed_when: false
- include: ../../openshift-node/restart.yml
+ # Do not restart nodes when node certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
index 8c8062585..afd5463b2 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml
@@ -66,6 +66,7 @@
--signer-cert={{ openshift.common.config_base }}/master/ca.crt
--signer-key={{ openshift.common.config_base }}/master/ca.key
--signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt
+ --config={{ mktemp.stdout }}/admin.kubeconfig
--hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift.common.config_base }}/master/registry.crt
--key={{ openshift.common.config_base }}/master/registry.key
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
index 9f14f2d69..748bbbf91 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml
@@ -116,8 +116,9 @@
tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem"
tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key"
--type=kubernetes.io/tls
+ --config={{ mktemp.stdout }}/admin.kubeconfig
--confirm
- -o json | {{ openshift.common.client_binary }} replace -f -
+ -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f -
- name: Remove temporary router certificate and key files
file:
diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml
deleted file mode 100644
index eaa8ce39c..000000000
--- a/playbooks/common/openshift-cluster/reset_excluder.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include_role:
- name: openshift_excluder
- tasks_from: enable
diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml
new file mode 100644
index 000000000..26716a92d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/sanity_checks.yml
@@ -0,0 +1,51 @@
+---
+- name: Verify Requirements
+ hosts: oo_all_hosts
+ tasks:
+ - fail:
+ msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Nuage sdn can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with flannel
+ when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Contiv can not be used with nuage
+ when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico
+ when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool
+
+ - fail:
+ msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both.
+ when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool
+
+ - fail:
+ msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both
+ when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool
+
+ - fail:
+ msg: openshift_hostname must be 63 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 63
+
+ - fail:
+ msg: openshift_public_hostname must be 63 characters or less
+ when: openshift_public_hostname is defined and openshift_public_hostname | length > 63
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
new file mode 100644
index 000000000..599350258
--- /dev/null
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -0,0 +1,20 @@
+---
+
+- name: Update Master configs
+ hosts: oo_masters
+ serial: 1
+ tasks:
+ - block:
+ - include_role:
+ name: openshift_service_catalog
+ tasks_from: wire_aggregator
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
+
+- name: Service Catalog
+ hosts: oo_first_master
+ roles:
+ - openshift_service_catalog
+ - ansible_service_broker
+ vars:
+ first_master: "{{ groups.oo_first_master[0] }}"
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 6ed31a644..cef0072f3 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -7,10 +7,18 @@
tags:
- always
+- include: sanity_checks.yml
+ tags:
+ - always
+
- include: validate_hostnames.yml
tags:
- node
+- include: initialize_openshift_repos.yml
+ tags:
+ - always
+
- include: initialize_openshift_version.yml
tags:
- always
diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
index 1a6580795..eb118365a 100644
--- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate etcd instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: etcd_names_output
with_sequence: count={{ num_etcd }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
index 36d7b7870..783f70f50 100644
--- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
@@ -3,7 +3,7 @@
- name: Generate master instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
with_sequence: count={{ num_masters }}
diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
index 278942f8b..c103e40a9 100644
--- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
+++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
@@ -5,7 +5,7 @@
- name: Generate node instance names(s)
set_fact:
- scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
+ scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
with_sequence: count={{ number_nodes }}
diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml
deleted file mode 100644
index be956fca5..000000000
--- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- include: evaluate_groups.yml
-
-- name: Subscribe hosts, update repos and update OS packages
- hosts: oo_hosts_to_update
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
- - openshift_repos
- - os_update_latest
diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
deleted file mode 100644
index 9f7961614..000000000
--- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# This is a hack to allow us to use systemd_units.yml, but skip the handlers which
-# restart services. We will unconditionally restart all containerized services
-# because we have to unconditionally restart Docker:
-- set_fact:
- skip_node_svc_handlers: True
-
-- name: Update systemd units
- include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
-
-# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
-# play when the node has already been marked schedulable again. (this would look strange
-# in logs otherwise)
-- meta: flush_handlers
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
deleted file mode 100644
index a30952929..000000000
--- a/playbooks/common/openshift-cluster/upgrades/disable_excluder.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Record excluder state and disable
- hosts: oo_masters_to_config:oo_nodes_to_config
- gather_facts: no
- tasks:
- - include: pre/validate_excluder.yml
- vars:
- excluder: "{{ openshift.common.service_type }}-docker-excluder"
- when: enable_docker_excluder | default(enable_excluders) | default(True) | bool
- - include: pre/validate_excluder.yml
- vars:
- excluder: "{{ openshift.common.service_type }}-excluder"
- when: enable_openshift_excluder | default(enable_excluders) | default(True) | bool
-
-
- # disable excluders based on their status
- - include_role:
- name: openshift_excluder
- tasks_from: disable
- vars:
- openshift_excluder_package_state: latest
- docker_excluder_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
new file mode 100644
index 000000000..800621857
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
new file mode 100644
index 000000000..a66301c0d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -0,0 +1,12 @@
+---
+- name: Disable excluders
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ r_openshift_excluder_verify_upgrade: true
+ r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
+ r_openshift_excluder_package_state: latest
+ r_openshift_excluder_docker_package_state: latest
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 07db071ce..98953f72e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -4,7 +4,6 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_nodes_to_upgrade.yml
@@ -52,11 +51,15 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets
+ {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
+ register: l_docker_upgrade_drain_result
+ until: not l_docker_upgrade_drain_result | failed
+ retries: 60
+ delay: 60
- - include: upgrade.yml
+ - include: tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
index 1b418920f..83f16ac0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
@@ -1,6 +1,10 @@
---
- name: Restart docker
service: name=docker state=restarted
+ register: l_docker_restart_docker_in_upgrade_result
+ until: not l_docker_restart_docker_in_upgrade_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -11,7 +15,6 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -24,4 +27,5 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
index 17f8fc6e9..808cc562c 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
@@ -4,7 +4,6 @@
- name: Stop containerized services
service: name={{ item }} state=stopped
with_items:
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -32,7 +31,13 @@
- debug: var=docker_image_count.stdout
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-- service: name=docker state=stopped
+- service:
+ name: docker
+ state: stopped
+ register: l_pb_docker_upgrade_stop_result
+ until: not l_pb_docker_upgrade_stop_result | failed
+ retries: 3
+ delay: 30
- name: Upgrade Docker
package: name=docker{{ '-' + docker_version }} state=present
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index b2a2eac9a..52345a9ba 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -18,12 +18,16 @@
- name: Get current version of Docker
command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
register: curr_docker_version
+ retries: 4
+ until: curr_docker_version | succeeded
changed_when: false
- name: Get latest available version of Docker
command: >
{{ repoquery_cmd }} --qf '%{version}' "docker"
register: avail_docker_version
+ retries: 4
+ until: avail_docker_version | succeeded
# Don't expect docker rpm to be available on hosts that don't already have it installed:
when: pkg_check.rc == 0
failed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index fb51a0061..2cc6c9019 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -1,84 +1,14 @@
---
- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ hosts: oo_etcd_hosts_to_backup
roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
- - set_fact:
- etcd_backup_dir: "{{ openshift.etcd.etcd_data_dir }}/openshift-backup-{{ backup_tag | default('') }}{{ timestamp }}"
-
- # TODO: replace shell module with command and update later checks
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1
- register: avail_disk
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- # TODO: replace shell module with command and update later checks
- - name: Check current etcd disk usage
- shell: du --exclude='*openshift-backup*' -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- # For non containerized and non embedded we should have the correct version of
- # etcd installed already. So don't do anything.
- #
- # For containerized installs we now exec into etcd_container
- #
- # For embedded non containerized we need to ensure we have the latest version
- # etcd on the host.
- - name: Install latest etcd for embedded
- package:
- name: etcd
- state: latest
- when:
- - embedded_etcd | bool
- - not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ etcd_backup_dir }}
-
- # According to the docs change you can simply copy snap/db
- # https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
- - name: Check for v3 data store
- stat:
- path: "{{ openshift.etcd.etcd_data_dir }}/member/snap/db"
- register: v3_db
-
- - name: Copy etcd v3 data store
- command: >
- cp -a {{ openshift.etcd.etcd_data_dir }}/member/snap/db
- {{ etcd_backup_dir }}/member/snap/
- when: v3_db.stat.exists
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ etcd_backup_dir }}"
+ - role: openshift_etcd_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_backup_tag: etcd_backup_tag
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- name: Gate on etcd backup
hosts: localhost
@@ -87,10 +17,10 @@
tasks:
- set_fact:
etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ | oo_select_keys(groups.oo_etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
- set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- fail:
msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
deleted file mode 100644
index 5f8b59e17..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Get current image
- shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}'
- register: current_image
-
-- name: Set new_etcd_image
- set_fact:
- new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
-
-- name: Pull new etcd image
- command: "docker pull {{ new_etcd_image }}"
-
-- name: Update to latest etcd image
- replace:
- dest: /etc/systemd/system/etcd_container.service
- regexp: "{{ current_image.stdout }}$"
- replace: "{{ new_etcd_image }}"
-
-- name: Restart etcd_container
- systemd:
- name: etcd_container
- daemon_reload: yes
- state: restarted
-
-## TODO: probably should just move this into the backup playbooks, also this
-## will fail on atomic host. We need to revisit how to do etcd backups there as
-## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
-- name: Upgrade etcd for etcdctl when not atomic
- package: name=etcd state=latest
- when: not openshift.common.is_atomic | bool
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
-
-- name: Store new etcd_image
- openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
deleted file mode 100644
index 30232110e..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-# F23 GA'd with etcd 2.0, currently has 2.2 in updates
-# F24 GA'd with etcd-2.2, currently has 2.2 in updates
-# F25 Beta currently has etcd 3.0
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Update etcd
- package:
- name: "etcd"
- state: "latest"
-
-- name: Restart etcd
- service:
- name: etcd
- state: restarted
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
deleted file mode 120000
index 641e04e44..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh
+++ /dev/null
@@ -1 +0,0 @@
-../roles/etcd/files/etcdctl.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
index fa86d29fb..64abc54e7 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml
@@ -5,42 +5,19 @@
# mirrored packages on your own because only the GA and latest versions are
# available in the repos. So for Fedora we'll simply skip this, sorry.
-- include: ../../evaluate_groups.yml
- tags:
- - always
-
-# We use two groups one for hosts we're upgrading which doesn't include embedded etcd
-# The other for backing up which includes the embedded etcd host, there's no need to
-# upgrade embedded etcd that just happens when the master is updated.
-- name: Evaluate additional groups for etcd
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_upgrade
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_upgrade
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}"
- changed_when: False
-
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
-
- name: Backup etcd before upgrading anything
include: backup.yml
vars:
- backup_tag: "pre-upgrade-"
+ etcd_backup_tag: "pre-upgrade-"
when: openshift_etcd_backup | default(true) | bool
- name: Drop etcdctl profiles
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - include: roles/etcd/tasks/etcdctl.yml
+ - include_role:
+ name: etcd_common
+ vars:
+ r_etcd_common_action: drop_etcdctl
- name: Perform etcd upgrade
include: ./upgrade.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
deleted file mode 100644
index 3a972e8ab..000000000
--- a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Verify cluster is healthy pre-upgrade
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
-
-- name: Update etcd RPM
- package:
- name: etcd-{{ upgrade_version }}*
- state: latest
-
-- name: Restart etcd
- service:
- name: etcd
- state: restarted
-
-- name: Verify cluster is healthy
- command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health"
- register: etcdctl
- until: etcdctl.rc == 0
- retries: 3
- delay: 10
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index a9b5b94e6..39e82498d 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -1,119 +1,110 @@
---
- name: Determine etcd version
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
tasks:
- - name: Record RPM based etcd version
- command: rpm -qa --qf '%{version}' etcd\*
- args:
- warn: no
- register: etcd_rpm_version
- failed_when: false
- when: not openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
- - name: Record containerized etcd version
- command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\*
- register: etcd_container_version
- failed_when: false
- when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool
- # AUDIT:changed_when: `false` because we are only inspecting
- # state, not manipulating anything
- changed_when: false
-
-# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop
-# through hosts, then loop through tasks only when appropriate
-- name: Upgrade to 2.1
- hosts: etcd_hosts_to_upgrade
- serial: 1
+ - block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
+ when:
+ - not openshift.common.is_containerized | bool
+
+ - block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not openshift.common.is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - openshift.common.is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+ when:
+ - openshift.common.is_containerized | bool
+
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.1'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.1'
-- name: Upgrade RPM hosts to 2.2
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.2'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.2'
-- name: Upgrade containerized hosts to 2.2.5
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 2.2.5
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.2.5'
-- name: Upgrade RPM hosts to 2.3
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '2.3'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.3'
-- name: Upgrade containerized hosts to 2.3.7
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 2.3.7
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '2.3.7'
-- name: Upgrade RPM hosts to 3.0
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_rpm_members.yml
vars:
- upgrade_version: '3.0'
- tasks:
- - include: rhel_tasks.yml
- when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool
+ etcd_upgrade_version: '3.0'
-- name: Upgrade containerized hosts to etcd3 image
- hosts: etcd_hosts_to_upgrade
- serial: 1
+- include: upgrade_image_members.yml
vars:
- upgrade_version: 3.0.15
- tasks:
- - include: containerized_tasks.yml
- when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
+ etcd_upgrade_version: '3.0.15'
+
+- include: upgrade_rpm_members.yml
+ vars:
+ etcd_upgrade_version: '3.1'
+
+- include: upgrade_image_members.yml
+ vars:
+ etcd_upgrade_version: '3.1.3'
- name: Upgrade fedora to latest
- hosts: etcd_hosts_to_upgrade
+ hosts: oo_etcd_hosts_to_upgrade
serial: 1
tasks:
- - include: fedora_tasks.yml
- when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool
+ - include_role:
+ name: etcd_upgrade
+ when:
+ - ansible_distribution == 'Fedora'
+ - not openshift.common.is_containerized | bool
- name: Backup etcd
include: backup.yml
vars:
- backup_tag: "post-3.0-"
+ etcd_backup_tag: "post-3.0-"
when: openshift_etcd_backup | default(true) | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
new file mode 100644
index 000000000..831ca8f57
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml
@@ -0,0 +1,17 @@
+---
+# INPUT etcd_upgrade_version
+# INPUT etcd_container_version
+# INPUT openshift.common.is_containerized
+- name: Upgrade containerized hosts to {{ etcd_upgrade_version }}
+ hosts: oo_etcd_hosts_to_upgrade
+ serial: 1
+ roles:
+ - role: etcd_upgrade
+ r_etcd_upgrade_action: upgrade
+ r_etcd_upgrade_mechanism: image
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_peer: "{{ openshift.common.hostname }}"
+ when:
+ - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
+ - openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
new file mode 100644
index 000000000..2e79451e0
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml
@@ -0,0 +1,18 @@
+---
+# INPUT etcd_upgrade_version
+# INPUT etcd_rpm_version
+# INPUT openshift.common.is_containerized
+- name: Upgrade to {{ etcd_upgrade_version }}
+ hosts: oo_etcd_hosts_to_upgrade
+ serial: 1
+ roles:
+ - role: etcd_upgrade
+ r_etcd_upgrade_action: upgrade
+ r_etcd_upgrade_mechanism: rpm
+ r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
+ r_etcd_common_etcd_runtime: "host"
+ etcd_peer: "{{ openshift.common.hostname }}"
+ when:
+ - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
+ - ansible_distribution == 'RedHat'
+ - not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index cbf6d58b3..c98065cf4 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -4,23 +4,11 @@
# Do not allow adding hosts during upgrade.
g_new_master_hosts: []
g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- include: ../initialize_oo_option_facts.yml
- include: ../initialize_facts.yml
-- name: Ensure clean repo cache in the event repos have been changed manually
- hosts: oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
- args:
- warn: no
-
- name: Ensure firewall is not switched during upgrade
hosts: oo_all_hosts
tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 046535680..72de63070 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -6,27 +6,32 @@
- lib_openshift
tasks:
- - name: Retrieve list of openshift nodes matching upgrade label
- oc_obj:
- state: list
- kind: node
- selector: "{{ openshift_upgrade_nodes_label }}"
- register: nodes_to_upgrade
- when: openshift_upgrade_nodes_label is defined
+ - when: openshift_upgrade_nodes_label is defined
+ block:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ oc_obj:
+ state: list
+ kind: node
+ selector: "{{ openshift_upgrade_nodes_label }}"
+ register: nodes_to_upgrade
- # We got a list of nodes with the label, now we need to match these with inventory hosts
- # using their openshift.common.hostname fact.
- - name: Map labelled nodes to inventory hosts
- add_host:
- name: "{{ item }}"
- groups: temp_nodes_to_upgrade
- ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
- ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: " {{ groups['oo_nodes_to_config'] }}"
- when:
- - openshift_upgrade_nodes_label is defined
- - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
- changed_when: false
+ - name: Fail if no nodes match openshift_upgrade_nodes_label
+ fail:
+ msg: "openshift_upgrade_nodes_label was specified but no nodes matched"
+ when: nodes_to_upgrade.results.results[0]['items'] | length == 0
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when:
+ - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
+ changed_when: false
# Build up the oo_nodes_to_upgrade group, use the list filtered by label if
# present, otherwise hit all nodes:
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 673f11889..4eac8b067 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -1,7 +1,5 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker
deleted file mode 120000
index 6aeca2842..000000000
--- a/playbooks/common/openshift-cluster/upgrades/master_docker
+++ /dev/null
@@ -1 +0,0 @@
-../../../../roles/openshift_master/templates/master_docker \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 0d7cdb227..d9ddf3860 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -9,6 +9,8 @@
replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) |
replace ( '${version}', openshift_image_tag ) }}"
+ registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') |
+ replace ( '${version}', 'v' ~ openshift.common.short_version ) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -61,6 +63,26 @@
when:
- _default_registry.results.results[0] != {}
+ - name: Check for registry-console
+ oc_obj:
+ state: list
+ kind: dc
+ name: registry-console
+ register: _registry_console
+ when:
+ - openshift.common.deployment_type != 'origin'
+
+ - name: Update registry-console image to current version
+ oc_edit:
+ kind: dc
+ name: registry-console
+ namespace: default
+ content:
+ spec.template.spec.containers[0].image: "{{ registry_console_image }}"
+ when:
+ - openshift.common.deployment_type != 'origin'
+ - _registry_console.results.results[0] != {}
+
roles:
- openshift_manageiq
# Create the new templates shipped in 3.2, existing templates are left
@@ -97,6 +119,12 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- include: ../reset_excluder.yml
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
tags:
- always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
new file mode 100644
index 000000000..6d8503879
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
@@ -0,0 +1,22 @@
+---
+# Only check if docker upgrade is required if docker_upgrade is not
+# already set to False.
+- include: ../../docker/upgrade_check.yml
+ when:
+ - docker_upgrade is not defined or (docker_upgrade | bool)
+ - not (openshift.common.is_atomic | bool)
+
+# Additional checks for Atomic hosts:
+
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml b/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
deleted file mode 100644
index 6de1ed061..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/validate_excluder.yml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# input variables:
-# - repoquery_cmd
-# - excluder
-# - openshift_upgrade_target
-- block:
- - name: Get available excluder version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ excluder }}"
- register: excluder_version
- failed_when: false
- changed_when: false
-
- - name: Docker excluder version detected
- debug:
- msg: "{{ excluder }}: {{ excluder_version.stdout }}"
-
- - name: Printing upgrade target version
- debug:
- msg: "{{ openshift_upgrade_target }}"
-
- - name: Check the available {{ excluder }} version is at most of the upgrade target version
- fail:
- msg: "Available {{ excluder }} version {{ excluder_version.stdout }} is higher than the upgrade target version"
- when:
- - "{{ excluder_version.stdout != '' }}"
- - "{{ excluder_version.stdout.split('.')[0:2] | join('.') | version_compare(openshift_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True) }}"
- when:
- - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 06eb5f936..45022cd61 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,23 +9,16 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure Master is running
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- name: Ensure HA Master is running
service:
name: "{{ openshift.common.service_type }}-master-api"
state: started
enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ when: openshift.common.is_containerized | bool
- name: Ensure HA Master is running
service:
name: "{{ openshift.common.service_type }}-master-controllers"
state: started
enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+ when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
deleted file mode 100644
index 7646e0fa6..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
-
- # Additional checks for Atomic hosts:
-
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
- - fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
new file mode 100644
index 000000000..497709d25
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -0,0 +1,13 @@
+---
+- name: Verify Host Requirements
+ hosts: oo_all_hosts
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: upgrade
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
deleted file mode 100644
index 354af3cde..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index c83923dae..9b4a8e413 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -1,41 +1,43 @@
---
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
+- name: Fail when OpenShift is not installed
+ fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+- name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
+- when: not openshift.common.is_containerized | bool
+ block:
+ - name: Check latest available OpenShift RPM version
+ repoquery:
+ name: "{{ openshift.common.service_type }}"
+ ignore_excluders: true
+ register: repoquery_out
- - name: Verify containers are available for upgrade
- command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
+ - name: Fail when unable to determine available OpenShift RPM version
+ fail:
+ msg: "Unable to determine available OpenShift RPM version"
+ when:
+ - not repoquery_out.results.package_found
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
+ - name: Set fact avail_openshift_version
+ set_fact:
+ avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}"
- name: Verify OpenShift RPMs are available for upgrade
fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when:
+ - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<')
- - fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
+- name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when:
+ - deployment_type == 'origin'
+ - openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
index 03ac02e9f..164baca81 100644
--- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml
@@ -1,27 +1,39 @@
---
-# We verified latest rpm available is suitable, so just yum update.
+# When we update package "a-${version}" and a requires b >= ${version} if we
+# don't specify the version of b yum will choose the latest version of b
+# available and the whole set of dependencies end up at the latest version.
+# Since the package module, unlike the yum module, doesn't flatten a list
+# of packages into one transaction we need to do that explicitly. The ansible
+# core team tells us not to rely on yum module transaction flattening anyway.
+
+# TODO: If the sdn package isn't already installed this will install it, we
+# should fix that
-# Master package upgrade ends up depending on node and sdn packages, we need to be explicit
-# with all versions to avoid yum from accidentally jumping to something newer than intended:
- name: Upgrade master packages
- package: name={{ item }} state=present
- when: component == "master"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ package: name={{ master_pkgs | join(',') }} state=present
+ vars:
+ master_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "master"
+ - not openshift.common.is_atomic | bool
- name: Upgrade node packages
- package: name={{ item }} state=present
- when: component == "node"
- with_items:
- - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
- - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
-
-- name: Ensure python-yaml present for config upgrade
- package: name=PyYAML state=present
- when: not openshift.common.is_atomic | bool
+ package: name={{ node_pkgs | join(',') }} state=present
+ vars:
+ node_pkgs:
+ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}"
+ - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}"
+ - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}"
+ - PyYAML
+ when:
+ - component == "node"
+ - not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index c6e799261..b75aae589 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -2,17 +2,22 @@
###############################################################################
# Upgrade Masters
###############################################################################
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
+
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade all storage
+ hosts: oo_first_master
tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"
- changed_when: False
+ - name: Upgrade all storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ register: l_pb_upgrade_control_plane_pre_upgrade_storage
+ when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
+ - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
# If facts cache were for some reason deleted, this fact may not be set, and if not set
# it will always default to true. This causes problems for the etcd data dir fact detection
@@ -86,7 +91,7 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
- - name: Update systemd units
+ - name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
- name: Check for ca-bundle.crt
@@ -118,8 +123,8 @@
yedit:
src: "{{ openshift.common.config_base }}/master/master-config.yaml"
key: 'imageConfig.format'
- value: "{{ oreg_url }}"
- when: oreg_url is defined
+ value: "{{ oreg_url | default(oreg_url_master) }}"
+ when: oreg_url is defined or oreg_url_master is defined
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
@@ -141,6 +146,19 @@
- include: "{{ openshift_master_upgrade_post_hook }}"
when: openshift_master_upgrade_post_hook is defined
+ - name: Post master upgrade - Upgrade clusterpolicies storage
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=clusterpolicies --confirm
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
+ run_once: true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+
- set_fact:
master_update_complete: True
@@ -216,13 +234,25 @@
- name: Reconcile Security Context Constraints
command: >
- {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name
+ {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name
register: reconcile_scc_result
changed_when:
- reconcile_scc_result.stdout != ''
- reconcile_scc_result.rc == 0
run_once: true
+ - name: Migrate storage post policy reconciliation
+ command: >
+ {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ migrate storage --include=* --confirm
+ run_once: true
+ register: l_pb_upgrade_control_plane_post_upgrade_storage
+ when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ failed_when:
+ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
+ - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
+ - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
+
- set_fact:
reconcile_complete: True
@@ -251,15 +281,15 @@
roles:
- openshift_facts
tasks:
- - include: docker/upgrade.yml
+ - include: docker/tasks/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- name: Drain and upgrade master nodes
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -281,13 +311,18 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_control_plane_drain_result
+ until: not l_upgrade_control_plane_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
+ - openshift_node_dnsmasq
- openshift_node_upgrade
post_tasks:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index e9f894942..c93a5d89c 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -4,7 +4,7 @@
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
@@ -26,14 +26,22 @@
- name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets
+ {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
delegate_to: "{{ groups.oo_first_master.0 }}"
+ register: l_upgrade_nodes_drain_result
+ until: not l_upgrade_nodes_drain_result | failed
+ retries: 60
+ delay: 60
roles:
- lib_openshift
- openshift_facts
- docker
+ - openshift_node_dnsmasq
- openshift_node_upgrade
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
post_tasks:
- name: Set node schedulability
@@ -46,7 +54,3 @@
register: node_schedulable
until: node_schedulable|succeeded
when: node_unschedulable|changed
-
-- include: ../reset_excluder.yml
- tags:
- - always
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
index 83d2cec81..8558bf3e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
@@ -74,18 +74,21 @@
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}"
- when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and
- openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates
+ - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates]
- set_fact:
openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}"
- when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
- openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
+ when:
+ - openshift_master_scheduler_current_predicates != default_predicates_no_region
+ - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region]
when: openshift_master_scheduler_predicates | default(none) is none
@@ -131,18 +134,21 @@
- block:
- debug:
msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}"
- when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and
- openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities
+ - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities]
- set_fact:
openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}"
- when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
- openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
+ when:
+ - openshift_master_scheduler_current_priorities != default_priorities_no_zone
+ - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone]
when: openshift_master_scheduler_priorities | default(none) is none
@@ -162,5 +168,6 @@
content: "{{ scheduler_config | to_nice_json }}"
dest: "{{ openshift_master_scheduler_conf }}"
backup: true
- when: "{{ openshift_upgrade_scheduler_predicates is defined or
- openshift_upgrade_scheduler_priorities is defined }}"
+ when: >
+ openshift_upgrade_scheduler_predicates is defined or
+ openshift_upgrade_scheduler_priorities is defined
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
new file mode 100644
index 000000000..a241ef039
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -0,0 +1,118 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..54c85f0fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,118 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..cee4e9087
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,113 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
new file mode 100644
index 000000000..ae217ba2e
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -0,0 +1,116 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
new file mode 100644
index 000000000..d7cb38d03
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -0,0 +1,118 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
new file mode 100644
index 000000000..8531e6045
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+ openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
deleted file mode 100644
index 48c69eccd..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
- hosts: oo_first_master
- roles:
- - { role: openshift_cli }
- vars:
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
- tasks:
- - name: Upgrade job storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
- run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
new file mode 100644
index 000000000..a3d0d6305
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -0,0 +1,118 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
new file mode 100644
index 000000000..5fee56615
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -0,0 +1,122 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
new file mode 100644
index 000000000..e29d0f8e6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+ openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
deleted file mode 100644
index 48c69eccd..000000000
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
- hosts: oo_first_master
- roles:
- - { role: openshift_cli }
- vars:
- # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
- # restart.
- skip_docker_role: True
- tasks:
- - name: Upgrade job storage
- command: >
- {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- migrate storage --include=jobs --confirm
- run_once: true
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..51acd17da
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,122 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..9fe059ac9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,122 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..1b10d4e37
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.6'
+ openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
index ac5704f69..78c1767b8 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml
@@ -7,4 +7,6 @@
hosts: oo_first_master
roles:
- { role: lib_openshift }
- tasks: []
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
new file mode 120000
index 000000000..7de3c1dd7
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins
@@ -0,0 +1 @@
+../../../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
new file mode 100644
index 000000000..ed89dbe8d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -0,0 +1,16 @@
+---
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginConfig'
+ yaml_value: "{{ openshift.master.admission_plugin_config }}"
+ when: "'admission_plugin_config' in openshift.master"
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'admissionConfig.pluginOrderOverride'
+ yaml_value:
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'kubernetesMasterConfig.admissionConfig'
+ yaml_value:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
new file mode 100644
index 000000000..9ec40723a
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -0,0 +1,122 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_health_checks.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+
+- include: ../upgrade_nodes.yml
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
new file mode 100644
index 000000000..f97f34c3b
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -0,0 +1,122 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_master_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: validator.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
new file mode 100644
index 000000000..e95b90cd5
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -0,0 +1,111 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../init.yml
+ tags:
+ - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: '3.7'
+ openshift_upgrade_min: '3.6'
+
+# Pre-upgrade
+- include: ../initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- include: ../pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../disable_node_excluders.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/tasks/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../cleanup_unused_images.yml
+
+- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
new file mode 100644
index 000000000..f76fc68d1
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -0,0 +1,23 @@
+---
+###############################################################################
+# Pre upgrade checks for known data problems, if this playbook fails you should
+# contact support. If you're not supported contact users@lists.openshift.com
+###############################################################################
+- name: Verify 3.7 specific upgrade checks
+ hosts: oo_first_master
+ roles:
+ - { role: lib_openshift }
+
+ tasks:
+ - name: Check for invalid namespaces and SDN errors
+ oc_objectvalidator:
+
+ - name: Confirm OpenShift authorization objects are in sync
+ command: >
+ {{ openshift.common.client_binary }} adm migrate authorization
+ when: not openshift.common.version_gte_3_7 | bool
+ changed_when: false
+ register: l_oc_result
+ until: l_oc_result.rc == 0
+ retries: 4
+ delay: 15
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 48cc03b19..be2e6a15a 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -1,16 +1,23 @@
---
-- name: Gather and set facts for node hosts
+- name: Validate node hostnames
hosts: oo_nodes_to_config
- roles:
- - openshift_facts
tasks:
- - shell:
+ - name: Query DNS for IP address of {{ openshift.common.hostname }}
+ shell:
getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }'
register: lookupip
changed_when: false
failed_when: false
- name: Warn user about bad openshift_hostname values
pause:
- prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+ prompt:
+ The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }}
+ doesn't resolve to an IP address owned by this host. Please set
+ openshift_hostname variable to a hostname that when resolved on the host
+ in question resolves to an IP address matching an interface on this
+ host. This host will fail liveness checks for pods utilizing hostPorts,
+ press ENTER to continue or CTRL-C to abort.
seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
- when: lookupip.stdout not in ansible_all_ipv4_addresses
+ when:
+ - lookupip.stdout != '127.0.0.1'
+ - lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 1b8106e0e..f2b85eea1 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -3,8 +3,10 @@
hosts: oo_etcd_to_config
any_errors_fatal: true
roles:
+ - role: os_firewall
- role: openshift_etcd
etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..e4ab0aa41
--- /dev/null
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -0,0 +1,147 @@
+---
+- name: Run pre-checks
+ hosts: oo_etcd_to_migrate
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: check
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ ansible_default_ipv4.address }}"
+
+# TODO: This will be different for release-3.6 branch
+- name: Prepare masters for etcd data migration
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type + '-master-controllers' }}"
+ - "{{ openshift.common.service_type + '-master-api' }}"
+ - debug:
+ msg: "master service name: {{ master_services }}"
+ - name: Stop masters
+ service:
+ name: "{{ item }}"
+ state: stopped
+ with_items: "{{ master_services }}"
+
+- name: Backup v2 data
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ roles:
+ - role: openshift_facts
+ - role: etcd_common
+ r_etcd_common_action: backup
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migration
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when:
+ - etcd_backup_failed | length > 0
+
+- name: Stop etcd
+ hosts: oo_etcd_to_migrate
+ gather_facts: no
+ pre_tasks:
+ - set_fact:
+ l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
+ - name: Disable etcd members
+ service:
+ name: "{{ l_etcd_service }}"
+ state: stopped
+
+- name: Migrate data on first etcd
+ hosts: oo_etcd_to_migrate[0]
+ gather_facts: no
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: migrate
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+- name: Clean data stores on remaining etcd hosts
+ hosts: oo_etcd_to_migrate[1:]
+ gather_facts: no
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: clean_data
+ r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ post_tasks:
+ - name: Add etcd hosts
+ delegate_to: localhost
+ add_host:
+ name: "{{ item }}"
+ groups: oo_new_etcd_to_config
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}"
+ changed_when: no
+ - name: Set success
+ set_fact:
+ r_etcd_migrate_success: true
+
+- include: ./scaleup.yml
+
+- name: Gate on etcd migration
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - set_fact:
+ etcd_migration_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_etcd_to_migrate)
+ | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}"
+ - set_fact:
+ etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}"
+
+- name: Add TTLs on the first master
+ hosts: oo_first_master[0]
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: add_ttls
+ etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+ when: etcd_migration_failed | length == 0
+
+- name: Configure masters if etcd data migration is succesfull
+ hosts: oo_masters_to_config
+ roles:
+ - role: etcd_migrate
+ r_etcd_migrate_action: configure
+ when: etcd_migration_failed | length == 0
+ tasks:
+ - debug:
+ msg: "Skipping master re-configuration since migration failed."
+ when:
+ - etcd_migration_failed | length > 0
+ - name: Start master services
+ service:
+ name: "{{ item }}"
+ state: started
+ register: service_status
+ # Sometimes the master-api, resp. master-controllers fails to start for the first time
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
+ with_items: "{{ master_services[::-1] }}"
+ - fail:
+ msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}"
+ when:
+ - etcd_migration_failed | length > 0
diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml
index 196c86f28..af1ef245a 100644
--- a/playbooks/common/openshift-etcd/restart.yml
+++ b/playbooks/common/openshift-etcd/restart.yml
@@ -5,5 +5,5 @@
tasks:
- name: restart etcd
service:
- name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
+ name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"
state: restarted
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
new file mode 100644
index 000000000..d3fa48bad
--- /dev/null
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -0,0 +1,74 @@
+---
+- name: Gather facts
+ hosts: oo_etcd_to_config:oo_new_etcd_to_config
+ roles:
+ - openshift_etcd_facts
+ post_tasks:
+ - set_fact:
+ etcd_hostname: "{{ etcd_hostname }}"
+ etcd_ip: "{{ etcd_ip }}"
+
+- name: Configure etcd
+ hosts: oo_new_etcd_to_config
+ serial: 1
+ any_errors_fatal: true
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ pre_tasks:
+ - name: Add new etcd members to cluster
+ command: >
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
+ --key-file {{ etcd_peer_key_file }}
+ --ca-file {{ etcd_peer_ca_file }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}
+ delegate_to: "{{ etcd_ca_host }}"
+ failed_when:
+ - etcd_add_check.rc == 1
+ - ("peerURL exists" not in etcd_add_check.stderr)
+ register: etcd_add_check
+ retries: 3
+ delay: 10
+ until: etcd_add_check.rc == 0
+ roles:
+ - role: os_firewall
+ when: etcd_add_check.rc == 0
+ - role: openshift_etcd
+ when: etcd_add_check.rc == 0
+ etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_initial_cluster_state: "existing"
+ initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_ca_setup: False
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - role: nickhammond.logrotate
+ when: etcd_add_check.rc == 0
+ post_tasks:
+ - name: Verify cluster is stable
+ command: >
+ /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }}
+ --key-file {{ etcd_peer_key_file }}
+ --ca-file {{ etcd_peer_ca_file }}
+ -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }}
+ cluster-health
+ register: scaleup_health
+ retries: 3
+ delay: 30
+ until: scaleup_health.rc == 0
+
+- name: Update master etcd client urls
+ hosts: oo_masters_to_config
+ serial: 1
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: update_etcd_client_urls
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml
deleted file mode 100644
index ced4bddc5..000000000
--- a/playbooks/common/openshift-etcd/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_etcd
- add_host:
- name: "{{ item }}"
- groups: g_service_etcd
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change etcd state on etcd instance(s)
- hosts: g_service_etcd
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=etcd state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 75faf5ba8..d9de578f3 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,21 +1,26 @@
---
-- name: Open firewall ports for GlusterFS
- hosts: oo_glusterfs_to_config
- vars:
- os_firewall_allow:
- - service: glusterfs_sshd
- port: "2222/tcp"
- - service: glusterfs_daemon
- port: "24007/tcp"
- - service: glusterfs_management
- port: "24008/tcp"
- - service: glusterfs_bricks
- port: "49152-49251/tcp"
- roles:
- - os_firewall
+- name: Open firewall ports for GlusterFS nodes
+ hosts: glusterfs
+ tasks:
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: firewall.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
+
+- name: Open firewall ports for GlusterFS registry nodes
+ hosts: glusterfs_registry
+ tasks:
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: firewall.yml
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
- roles:
- - role: openshift_storage_glusterfs
+ tasks:
+ - name: setup glusterfs
+ include_role:
+ name: openshift_storage_glusterfs
when: groups.oo_glusterfs_to_config | default([]) | count > 0
diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..80cf7529e
--- /dev/null
+++ b/playbooks/common/openshift-glusterfs/registry.yml
@@ -0,0 +1,49 @@
+---
+- include: config.yml
+
+- name: Initialize GlusterFS registry PV and PVC vars
+ hosts: oo_first_master
+ tags: hosted
+ tasks:
+ - set_fact:
+ glusterfs_pv: []
+ glusterfs_pvc: []
+
+ - set_fact:
+ glusterfs_pv:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ storage:
+ glusterfs:
+ endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
+ path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
+ readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
+ glusterfs_pvc:
+ - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+ capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+ access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+ when: openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Create persistent volumes
+ hosts: oo_first_master
+ tags:
+ - hosted
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
+ roles:
+ - role: openshift_persistent_volumes
+ when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+
+- name: Create Hosted Resources
+ hosts: oo_first_master
+ tags:
+ - hosted
+ pre_tasks:
+ - set_fact:
+ openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+ when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ roles:
+ - role: openshift_hosted
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index c414913bf..09ed81a83 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -12,5 +12,7 @@
openshift_use_nuage | default(false),
nuage_mon_rest_server_port | default(none)))
+ openshift_loadbalancer_additional_backends | default([]) }}"
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
+ - role: os_firewall
- role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml
deleted file mode 100644
index d3762c961..000000000
--- a/playbooks/common/openshift-loadbalancer/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_lb
- add_host:
- name: "{{ item }}"
- groups: g_service_lb
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on lb instance(s)
- hosts: g_service_lb
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=haproxy state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index c0ea93d2c..7468c78f0 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -11,13 +11,13 @@
when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
- when: openshift.common.install_examples | bool
+ when: openshift_install_examples | default(True)
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift.common.use_manageiq | bool
+ when: openshift_use_manageiq | default(false) | bool
- role: cockpit
when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
(osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' )
- role: flannel_register
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 60cf56108..e1b9a4964 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,10 +1,31 @@
---
+- name: Disable excluders
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+
- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
vars:
t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
pre_tasks:
+ # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336
+ #
+ # When scaling up a cluster upgraded from OCP <= 3.5, ensure that
+ # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing
+ # masters, or absent if such is the case.
+ - name: Detect if this host is a new master in a scale up
+ set_fact:
+ g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}"
+
+ - name: Scaleup Detection
+ debug:
+ var: g_openshift_master_is_scaleup
+
- name: Check for RPM generated config marker file .config_managed
stat:
path: /etc/origin/.config_managed
@@ -69,7 +90,7 @@
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
-- name: Determine if session secrets must be generated
+- name: Inspect state of first master config settings
hosts: oo_first_master
roles:
- role: openshift_facts
@@ -79,6 +100,60 @@
local_facts:
session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+ - name: Check for existing configuration
+ stat:
+ path: /etc/origin/master/master-config.yaml
+ register: master_config_stat
+
+ - name: Set clean install fact
+ set_fact:
+ l_clean_install: "{{ not master_config_stat.stat.exists | bool }}"
+
+ - name: Determine if etcd3 storage is in use
+ command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q
+ register: etcd3_grep
+ failed_when: false
+ changed_when: false
+
+ - name: Set etcd3 fact
+ set_fact:
+ l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}"
+
+ - name: Check if atomic-openshift-master sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master
+ register: l_aom_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master
+ register: l_default_registry_defined
+ when: l_aom_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-api sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-api
+ register: l_aom_api_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api
+ register: l_default_registry_defined_api
+ when: l_aom_api_exists.stat.exists | bool
+
+ - name: Check if atomic-openshift-master-controllers sysconfig exists yet
+ stat:
+ path: /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_aom_controllers_exists
+
+ - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present
+ command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers
+ register: l_default_registry_defined_controllers
+ when: l_aom_controllers_exists.stat.exists | bool
+
+ - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value
+ set_fact:
+ l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}"
+ l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}"
+ l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}"
- name: Generate master session secrets
hosts: oo_first_master
@@ -104,29 +179,55 @@
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- roles:
- - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.ip') | default([]) | join(',')
+ }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_master_facts
+ - role: openshift_hosted_facts
+ - role: openshift_master_certificates
+ - role: openshift_etcd_client_certificates
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ when: groups.oo_etcd_to_config | default([]) | length != 0
+ - role: openshift_clock
+ - role: openshift_cloud_provider
+ - role: openshift_builddefaults
+ - role: openshift_buildoverrides
+ - role: nickhammond.logrotate
+ - role: contiv
+ contiv_role: netmaster
+ when: openshift_use_contiv | default(False) | bool
+ - role: openshift_master
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
+ r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"
+ r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"
+ openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}"
+ openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
+ openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
+ openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
- role: nuage_master
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: calico_master
- when: openshift.common.use_calico | bool
-
+ when: openshift_use_calico | default(false) | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_masters_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml
index 6fec346c3..4d73b8124 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/restart.yml
@@ -7,7 +7,7 @@
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
serial: 1
handlers:
- - include: roles/openshift_master/handlers/main.yml
+ - include: ../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index 67ba0aa2e..a5dbe0590 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -37,3 +37,4 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 508b5a3ac..4f8b758fd 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -1,9 +1,4 @@
---
-- name: Restart master
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: restarted
- when: not openshift_master_ha | bool
- name: Restart master API
service:
name: "{{ openshift.common.service_type }}-master-api"
@@ -15,6 +10,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: openshift_master_ha | bool
- name: Restart master controllers
service:
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 92f16dc47..17f9ef4bc 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -1,11 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
- name: Update master count
hosts: oo_masters:!oo_masters_to_config
serial: 1
@@ -50,26 +43,8 @@
delay: 1
changed_when: false
-- name: Configure docker hosts
- hosts: oo_masters_to-config:oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- include: ../openshift-cluster/disable_excluder.yml
- tags:
- - always
-
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
- include: ../openshift-node/config.yml
-
-- include: ../openshift-cluster/reset_excluder.yml
- tags:
- - always
diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml
deleted file mode 100644
index 48a2731aa..000000000
--- a/playbooks/common/openshift-master/service.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Populate g_service_masters host group if needed
- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_masters
- add_host:
- name: "{{ item }}"
- groups: g_service_masters
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on master instance(s)
- hosts: g_service_masters
- connection: ssh
- gather_facts: no
- tasks:
- - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 000e46e80..64ea0d3c4 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -2,5 +2,5 @@
- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- - role: openshift_facts
+ - role: os_firewall
- role: openshift_storage_nfs
diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml
deleted file mode 100644
index b1e35e4b1..000000000
--- a/playbooks/common/openshift-nfs/service.yml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-- name: Populate g_service_nfs host group if needed
- hosts: localhost
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nfs
- add_host:
- name: "{{ item }}"
- groups: g_service_nfs
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on nfs instance(s)
- hosts: g_service_nfs
- connection: ssh
- gather_facts: no
- tasks:
- - service: name=nfs-server state="{{ new_cluster_state }}"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 792ffb4e2..0801c41ff 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,24 +1,11 @@
---
-- name: Gather and set facts for node hosts
+- name: Disable excluders
hosts: oo_nodes_to_config
- vars:
- t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}"
- pre_tasks:
- - set_fact:
- openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}"
- when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != ""
+ gather_facts: no
roles:
- - openshift_facts
- tasks:
- # Since the master is generating the node certificates before they are
- # configured, we need to make sure to set the node properties beforehand if
- # we do not want the defaults
- - openshift_facts:
- role: node
- local_facts:
- labels: "{{ openshift_node_labels | default(None) }}"
- annotations: "{{ openshift_node_annotations | default(None) }}"
- schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
+ - role: openshift_excluder
+ r_openshift_excluder_action: disable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Evaluate node groups
hosts: localhost
@@ -32,7 +19,11 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ when:
+ - hostvars[item].openshift is defined
+ - hostvars[item].openshift.common is defined
+ - hostvars[item].openshift.common.is_containerized | bool
+ - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
- name: Configure containerized nodes
@@ -47,9 +38,9 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -64,9 +55,8 @@
| union(groups['oo_etcd_to_config'] | default([])))
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
+ - role: os_firewall
- role: openshift_node
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
@@ -81,18 +71,27 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
- when: openshift.common.use_flannel | bool
+ when: openshift_use_flannel | default(false) | bool
- role: calico
- when: openshift.common.use_calico | bool
+ when: openshift_use_calico | default(false) | bool
- role: nuage_node
- when: openshift.common.use_nuage | bool
+ when: openshift_use_nuage | default(false) | bool
- role: contiv
contiv_role: netplugin
- when: openshift.common.use_contiv | bool
+ when: openshift_use_contiv | default(false) | bool
- role: nickhammond.logrotate
- role: openshift_manage_node
openshift_master_host: "{{ groups.oo_first_master.0 }}"
+ when: not openshift_node_bootstrap | default(False)
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
+
+- name: Re-enable excluder if it was previously enabled
+ hosts: oo_nodes_to_config
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml
index be050c12c..b3a7399dc 100644
--- a/playbooks/common/openshift-node/network_manager.yml
+++ b/playbooks/common/openshift-node/network_manager.yml
@@ -1,6 +1,8 @@
---
+- include: ../openshift-cluster/evaluate_groups.yml
+
- name: Install and configure NetworkManager
- hosts: l_oo_all_hosts
+ hosts: oo_all_hosts
become: yes
tasks:
- name: install NetworkManager
diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml
index 441b100e9..c3beb59b7 100644
--- a/playbooks/common/openshift-node/restart.yml
+++ b/playbooks/common/openshift-node/restart.yml
@@ -11,6 +11,10 @@
service:
name: docker
state: restarted
+ register: l_docker_restart_docker_in_node_result
+ until: not l_docker_restart_docker_in_node_result | failed
+ retries: 3
+ delay: 30
- name: Update docker facts
openshift_facts:
@@ -23,7 +27,6 @@
with_items:
- etcd_container
- openvswitch
- - "{{ openshift.common.service_type }}-master"
- "{{ openshift.common.service_type }}-master-api"
- "{{ openshift.common.service_type }}-master-controllers"
- "{{ openshift.common.service_type }}-node"
@@ -36,6 +39,7 @@
state: started
delay: 10
port: "{{ openshift.master.api_port }}"
+ timeout: 600
when: inventory_hostname in groups.oo_masters_to_config
- name: restart node
@@ -51,7 +55,7 @@
register: node_output
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_config
- until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+ until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
# Give the node two minutes to come back online.
retries: 24
delay: 5
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
deleted file mode 100644
index c31aca62b..000000000
--- a/playbooks/common/openshift-node/scaleup.yml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- include: ../openshift-cluster/evaluate_groups.yml
-
-- name: Gather facts
- hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config
- roles:
- - openshift_facts
-
-- name: Gather and set facts for first master
- hosts: oo_first_master
- vars:
- openshift_master_count: "{{ groups.oo_masters | length }}"
- pre_tasks:
- - set_fact:
- openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
- when: openshift_master_default_subdomain is not defined
- roles:
- - openshift_master_facts
-
-- name: Configure docker hosts
- hosts: oo_nodes_to_config
- vars:
- docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
- docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}"
- docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
- roles:
- - openshift_facts
- - openshift_docker
-
-- include: ../openshift-cluster/disable_excluder.yml
- tags:
- - always
-
-- include: ../openshift-node/config.yml
-
-- include: ../openshift-cluster/reset_excluder.yml
- tags:
- - always
diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml
deleted file mode 100644
index 130a5416f..000000000
--- a/playbooks/common/openshift-node/service.yml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Populate g_service_nodes host group if needed
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - fail: msg="new_cluster_state is required to be injected in this playbook"
- when: new_cluster_state is not defined
-
- - name: Evaluate g_service_nodes
- add_host:
- name: "{{ item }}"
- groups: g_service_nodes
- with_items: "{{ oo_host_group_exp | default([]) }}"
- changed_when: False
-
-- name: Change state on node instance(s)
- hosts: g_service_nodes
- connection: ssh
- gather_facts: no
- tasks:
- - name: Change state on node instance(s)
- service:
- name: "{{ service_type }}-node"
- state: "{{ new_cluster_state }}"