summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/uninstall.yml74
-rw-r--r--playbooks/byo/openshift-cluster/config.yml6
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml16
l---------playbooks/byo/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml138
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml100
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml102
-rw-r--r--playbooks/common/openshift-cluster/config.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml106
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml20
-rw-r--r--playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml22
-rw-r--r--playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml76
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml50
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml40
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/post.yml)0
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre.yml311
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml6
l---------playbooks/common/openshift-cluster/upgrades/pre/roles1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml31
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml23
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml13
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml45
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml (renamed from playbooks/common/openshift-cluster/upgrades/upgrade.yml)210
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml75
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/verify_ansible_version.yml3
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml11
-rw-r--r--playbooks/common/openshift-master/config.yml4
-rw-r--r--playbooks/common/openshift-master/scaleup.yml4
-rw-r--r--playbooks/common/openshift-nfs/config.yml2
-rw-r--r--playbooks/common/openshift-node/config.yml18
-rw-r--r--playbooks/common/openshift-node/scaleup.yml11
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/cluster_hosts.yml22
-rw-r--r--playbooks/openstack/openshift-cluster/dns.yml52
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack.yaml161
-rw-r--r--playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml24
-rw-r--r--playbooks/openstack/openshift-cluster/files/user-data13
-rw-r--r--playbooks/openstack/openshift-cluster/launch.yml20
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/terminate.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/update.yml2
-rw-r--r--playbooks/openstack/openshift-cluster/vars.yml1
46 files changed, 1016 insertions, 861 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 30e0f05fd..789f66b14 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -22,6 +22,7 @@
- set_fact:
is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
+# Stop services on all hosts prior to removing files.
- hosts: nodes
become: yes
tasks:
@@ -35,6 +36,46 @@
- origin-node
failed_when: false
+- hosts: masters
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - atomic-enterprise-master
+ - atomic-openshift-master
+ - atomic-openshift-master-api
+ - atomic-openshift-master-controllers
+ - openshift-master
+ - openshift-master-api
+ - openshift-master-controllers
+ - origin-master
+ - origin-master-api
+ - origin-master-controllers
+ - pcsd
+ failed_when: false
+
+- hosts: etcd
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - etcd
+ failed_when: false
+
+- hosts: lb
+ become: yes
+ tasks:
+ - name: Stop services
+ service: name={{ item }} state=stopped
+ with_items:
+ - haproxy
+ failed_when: false
+
+- hosts: nodes
+ become: yes
+ tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
changed_when: False
@@ -74,7 +115,7 @@
- name: Remove flannel package
action: "{{ ansible_pkg_mgr }} name=flannel state=absent"
- when: openshift_use_flannel | default(false) | bool
+ when: openshift_use_flannel | default(false) | bool and not is_atomic | bool
- shell: systemctl reset-failed
changed_when: False
@@ -181,6 +222,7 @@
- /etc/systemd/system/openvswitch.service
- /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/origin-node.service
+ - /etc/systemd/system/origin-node.service.wants
- /run/openshift-sdn
- /var/lib/atomic-enterprise
- /var/lib/openshift
@@ -196,22 +238,6 @@
- hosts: masters
become: yes
tasks:
- - name: Stop services
- service: name={{ item }} state=stopped
- with_items:
- - atomic-enterprise-master
- - atomic-openshift-master
- - atomic-openshift-master-api
- - atomic-openshift-master-controllers
- - openshift-master
- - openshift-master-api
- - openshift-master-controllers
- - origin-master
- - origin-master-api
- - origin-master-controllers
- - pcsd
- failed_when: false
-
- name: unmask services
command: systemctl unmask "{{ item }}"
changed_when: False
@@ -306,12 +332,6 @@
- hosts: etcd
become: yes
tasks:
- - name: Stop services
- service: name={{ item }} state=stopped
- with_items:
- - etcd
- failed_when: false
-
- name: unmask services
command: systemctl unmask "{{ item }}"
changed_when: False
@@ -346,7 +366,7 @@
- /etc/etcd
- /etc/systemd/system/etcd_container.service
- # Intenationally using rm command over file module because if someone had mounted a filesystem
+ # Intenationally using rm command over file module because if someone had mounted a filesystem
# at /var/lib/etcd then the contents was not removed correctly
- name: Remove etcd data
shell: rm -rf /var/lib/etcd/*
@@ -357,12 +377,6 @@
- hosts: lb
become: yes
tasks:
- - name: Stop services
- service: name={{ item }} state=stopped
- with_items:
- - haproxy
- failed_when: false
-
- name: unmask services
command: systemctl unmask "{{ item }}"
changed_when: False
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 0b85b2485..fccb03982 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,7 +1,8 @@
---
- include: ../../common/openshift-cluster/verify_ansible_version.yml
-- hosts: localhost
+- name: Create initial host groups for localhost
+ hosts: localhost
connection: local
become: no
gather_facts: no
@@ -14,7 +15,8 @@
groups: l_oo_all_hosts
with_items: "{{ g_all_hosts | default([]) }}"
-- hosts: l_oo_all_hosts
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
gather_facts: no
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 3a285ab9f..9be6becc1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -1,6 +1,6 @@
- name: Check for appropriate Docker versions
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
roles:
- openshift_facts
tasks:
@@ -19,29 +19,29 @@
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
- name: Evacuate and upgrade nodes
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- name: Prepare for Node evacuation
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- name: Evacuate Node for Kubelet upgrade
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml
when: l_docker_upgrade is defined and l_docker_upgrade | bool
- name: Set node schedulability
command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift.node.schedulable | bool
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
new file mode 120000
index 000000000..6bc1a7aef
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
@@ -0,0 +1 @@
+../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index e740b12c0..7a3829283 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -1,67 +1,101 @@
---
-- include: ../../../../common/openshift-cluster/verify_ansible_version.yml
-
-- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
- - add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: g_all_hosts | default([])
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+# Configure the upgrade target for the common upgrade tasks:
- hosts: l_oo_all_hosts
- gather_facts: no
+ tags:
+ - pre_upgrade
tasks:
- - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
-
-- name: Set oo_options
- hosts: oo_all_hosts
- tasks:
- - set_fact:
- openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
- when: openshift_docker_additional_registries is not defined
- - set_fact:
- openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
- when: openshift_docker_insecure_registries is not defined
- - set_fact:
- openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
- when: openshift_docker_blocked_registries is not defined
- set_fact:
- openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
- when: openshift_docker_options is not defined
- - set_fact:
- openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
- when: openshift_docker_log_driver is not defined
- - set_fact:
- openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
- when: openshift_docker_log_options is not defined
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+# Pre-upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
tasks:
- set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
-- include: ../../../../common/openshift-cluster/upgrades/pre.yml
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
vars:
- openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../../common/openshift-cluster/upgrades/upgrade.yml
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
vars:
- openshift_deployment_type: "{{ deployment_type }}"
master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+ vars:
node_config_hook: "v3_3/node_config_upgrade.yml"
+
- include: ../../../openshift-master/restart.yml
-- include: ../../../../common/openshift-cluster/upgrades/post.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
new file mode 100644
index 000000000..d6af71827
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -0,0 +1,100 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+
+- name: Update repos on control plane hosts
+ hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+ tags:
+ - pre_upgrade
+ roles:
+ - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_3/master_config_upgrade.yml"
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
new file mode 100644
index 000000000..e2a33cc00
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -0,0 +1,102 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+ tags:
+ - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
+ openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+ tags:
+ - pre_upgrade
+
+- name: Update repos on nodes
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+ roles:
+ - openshift_repos
+ tags:
+ - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tags:
+ - pre_upgrade
+ tasks:
+ - set_fact:
+ openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+ openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+ tags:
+ - pre_upgrade
+ vars:
+ # Request specific openshift_release and let the openshift_version role handle converting this
+ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+ # defined, and overriding the normal behavior of protecting the installed version
+ openshift_release: "{{ openshift_upgrade_target }}"
+ openshift_protect_installed_version: False
+
+ # We skip the docker role at this point in upgrade to prevent
+ # unintended package, container, or config upgrades which trigger
+ # docker restarts. At this early stage of upgrade we can assume
+ # docker is configured and running.
+ skip_docker_role: True
+
+- name: Verify masters are already upgraded
+ hosts: oo_masters_to_config
+ tags:
+ - pre_upgrade
+ tasks:
+ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+ when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+ tags:
+ - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+ tags:
+ - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+ vars:
+ node_config_hook: "v3_3/node_config_upgrade.yml"
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index d6a99fcda..801c8065d 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -13,7 +13,7 @@
- include: initialize_openshift_version.yml
-- name: Set oo_options
+- name: Set oo_option facts
hosts: oo_all_hosts
tags:
- always
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index 04dde632b..6d83d2527 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -11,3 +11,5 @@
hostname: "{{ openshift_hostname | default(None) }}"
- set_fact:
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ - set_fact:
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 4aca4daf4..2ba7fded5 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -19,6 +19,12 @@
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+ - set_fact:
+ logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+ logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
+ logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
+ logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- role: openshift_cli
- role: openshift_hosted_facts
@@ -43,88 +49,20 @@
when: not openshift.common.version_gte_3_2_or_1_2
- role: openshift_hosted
- role: openshift_metrics
- when: openshift.hosted.metrics.deploy | bool
- - role: cockpit-ui
- when: openshift.common.deployment_subtype == 'registry'
+ when: openshift_hosted_metrics_deploy | default(false) | bool
+ - role: openshift_hosted_logging
+ when: openshift_hosted_logging_deploy | default(false) | bool
+ openshift_hosted_logging_hostname: "{{ logging_hostname }}"
+ openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
+ openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
+ openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
+ openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
+ openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
+ openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
-- name: Configure CA certificate for secure registry
- hosts: oo_nodes_to_config
- tags:
- - hosted
- tasks:
- - name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
- register: mktemp
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - set_fact:
- openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- when: openshift.common.deployment_subtype == 'registry'
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Copy the admin client config(s)
- command: >
- cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_hosted_kubeconfig }}
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Retrieve docker-registry route
- command: >
- {{ openshift.common.client_binary }} get route docker-registry
- --template='{{ '{{' }} .spec.host {{ '}}' }}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_route
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Retrieve registry service IP
- command: >
- {{ openshift.common.client_binary }} get service docker-registry
- --template='{{ '{{' }} .spec.clusterIP {{ '}}' }}'
- --config={{ openshift_hosted_kubeconfig }}
- -n default
- register: docker_registry_service_ip
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- - name: Create registry CA directories
- file:
- path: "/etc/docker/certs.d/{{ item }}"
- state: directory
- with_items:
- - "{{ docker_registry_service_ip.stdout }}:5000"
- - "{{ docker_registry_route.stdout }}"
- - "docker-registry.default.svc.cluster.local:5000"
- when: openshift.common.deployment_subtype == 'registry'
- - name: Copy CA to registry CA directories
- copy:
- src: "{{ openshift.common.config_base }}/node/ca.crt"
- dest: "/etc/docker/certs.d/{{ item }}"
- remote_src: yes
- force: yes
- with_items:
- - "{{ docker_registry_service_ip.stdout }}:5000"
- - "{{ docker_registry_route.stdout }}"
- - "docker-registry.default.svc.cluster.local:5000"
- when: openshift.common.deployment_subtype == 'registry'
- notify:
- - Restart docker
- - name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- when: openshift.common.deployment_subtype == 'registry'
- changed_when: False
- delegate_to: "{{ groups.oo_first_master.0 }}"
- run_once: true
- handlers:
- - name: Restart docker
- service:
- name: docker
- state: restarted
+ - role: cockpit-ui
+ when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool )
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index b97906072..4996c56a7 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -52,6 +52,14 @@
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}"
pre_tasks:
+ # set_fact task copied from playbooks/common/openshift-master/config.yml
+ # so that openshift_master_default_subdomain has a default value of ""
+ # (emptry string). openshift_master_default_subdomain must have a default
+ # value for openshift_master_facts to set metrics_public_url.
+ # TODO: clean this up.
+ - set_fact:
+ openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+ when: openshift_master_default_subdomain is not defined
- stat:
path: "{{ openshift_generated_configs_dir }}"
register: openshift_generated_configs_dir_stat
@@ -133,7 +141,9 @@
hosts: oo_etcd_to_config
tasks:
- name: restart etcd
- service: name=etcd state=restarted
+ service:
+ name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}"
+ state: restarted
- name: Stop master services
hosts: oo_masters_to_config
@@ -202,7 +212,7 @@
- name: Determine if node is currently scheduleable
command: >
{{ openshift.common.client_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- get node {{ openshift.common.hostname | lower }} -o json
+ get node {{ openshift.node.nodename }} -o json
register: node_output
when: openshift_certificates_redeploy_ca | default(false) | bool
delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -215,7 +225,7 @@
- name: Prepare for node evacuation
command: >
{{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }}
+ manage-node {{ openshift.node.nodename }}
--schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -223,7 +233,7 @@
- name: Evacuate node
command: >
{{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }}
+ manage-node {{ openshift.node.nodename }}
--evacuate --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
@@ -231,7 +241,7 @@
- name: Set node schedulability
command: >
{{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
- manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+ manage-node {{ openshift.node.nodename }} --schedulable=true
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
new file mode 100644
index 000000000..6e953be69
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
@@ -0,0 +1,22 @@
+---
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Remove unused Docker images for Docker 1.10+ migration
+ shell: "docker rmi `docker images -aq`"
+ # Will fail on images still in use:
+ failed_when: false
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Check Docker image count
+ shell: "docker images -aq | wc -l"
+ register: docker_image_count
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- debug: var=docker_image_count.stdout
+ when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
new file mode 100644
index 000000000..78f6c46f3
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
@@ -0,0 +1,76 @@
+---
+- name: Create local temp directory for syncing certs
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: local_cert_sync_tmpdir
+ changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
+- name: Create service signer certificate
+ hosts: oo_first_master
+ tasks:
+ - name: Create remote temp directory for creating certs
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: remote_cert_create_tmpdir
+ changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
+ - name: Create service signer certificate
+ command: >
+ {{ openshift.common.admin_binary }} ca create-signer-cert
+ --cert=service-signer.crt
+ --key=service-signer.key
+ --name=openshift-service-serving-signer
+ --serial=service-signer.serial.txt
+ args:
+ chdir: "{{ remote_cert_create_tmpdir.stdout }}/"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
+ - name: Retrieve service signer certificate
+ fetch:
+ src: "{{ remote_cert_create_tmpdir.stdout }}/{{ item }}"
+ dest: "{{ hostvars.localhost.local_cert_sync_tmpdir.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ with_items:
+ - "service-signer.crt"
+ - "service-signer.key"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
+ - name: Delete remote temp directory
+ file:
+ name: "{{ remote_cert_create_tmpdir.stdout }}"
+ state: absent
+ changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
+- name: Deploy service signer certificate
+ hosts: oo_masters_to_config
+ tasks:
+ - name: Deploy service signer certificate
+ copy:
+ src: "{{ hostvars.localhost.local_cert_sync_tmpdir.stdout }}/{{ item }}"
+ dest: "{{ openshift.common.config_base }}/master/"
+ with_items:
+ - "service-signer.crt"
+ - "service-signer.key"
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
+
+- name: Delete local temp directory
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Delete local temp directory
+ file:
+ name: "{{ local_cert_sync_tmpdir.stdout }}"
+ state: absent
+ changed_when: false
+ when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index 06b3e244f..fc26d029e 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -1,7 +1,7 @@
---
# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_version to True if so.
+# variables, the available packages, and sets l_docker_upgrade to True if so.
- set_fact:
docker_upgrade: True
@@ -28,7 +28,7 @@
- fail:
msg: This playbook requires access to Docker 1.10 or later
# Disable the 1.10 requirement if the user set a specific Docker version
- when: avail_docker_version.stdout | version_compare('1.10','<') and docker_version is not defined
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<'))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
new file mode 100644
index 000000000..f3b3abe0d
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -0,0 +1,50 @@
+---
+- include: ../verify_ansible_version.yml
+
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: g_all_hosts | default([])
+
+- hosts: l_oo_all_hosts
+ gather_facts: no
+ tasks:
+ - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../evaluate_groups.yml
+ vars:
+ # Do not allow adding hosts during upgrade.
+ g_new_master_hosts: []
+ g_new_node_hosts: []
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Set oo_options
+ hosts: oo_all_hosts
+ tasks:
+ - set_fact:
+ openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}"
+ when: openshift_docker_additional_registries is not defined
+ - set_fact:
+ openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}"
+ when: openshift_docker_insecure_registries is not defined
+ - set_fact:
+ openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}"
+ when: openshift_docker_blocked_registries is not defined
+ - set_fact:
+ openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}"
+ when: openshift_docker_options is not defined
+ - set_fact:
+ openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}"
+ when: openshift_docker_log_driver is not defined
+ - set_fact:
+ openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
+ when: openshift_docker_log_options is not defined
+
+- include: ../initialize_facts.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
new file mode 100644
index 000000000..4e375ac26
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -0,0 +1,40 @@
+---
+- name: Filter list of nodes to be upgraded if necessary
+ hosts: oo_first_master
+ tasks:
+ - name: Retrieve list of openshift nodes matching upgrade label
+ command: >
+ {{ openshift.common.client_binary }}
+ get nodes
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ --selector={{ openshift_upgrade_nodes_label }}
+ -o jsonpath='{.items[*].metadata.name}'
+ register: matching_nodes
+ changed_when: false
+ when: openshift_upgrade_nodes_label is defined
+
+ - set_fact:
+ nodes_to_upgrade: "{{ matching_nodes.stdout.split(' ') }}"
+ when: openshift_upgrade_nodes_label is defined
+
+ # We got a list of nodes with the label, now we need to match these with inventory hosts
+ # using their openshift.common.hostname fact.
+ - name: Map labelled nodes to inventory hosts
+ add_host:
+ name: "{{ item }}"
+ groups: temp_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: " {{ groups['oo_nodes_to_config'] }}"
+ when: openshift_upgrade_nodes_label is defined and hostvars[item].openshift.common.hostname in nodes_to_upgrade
+ changed_when: false
+
+ # Build up the oo_nodes_to_upgrade group, use the list filtered by label if
+ # present, otherwise hit all nodes:
+ - name: Evaluate oo_nodes_to_upgrade
+ add_host:
+ name: "{{ item }}"
+ groups: oo_nodes_to_upgrade
+ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+ ansible_become: "{{ g_sudo | default(omit) }}"
+ with_items: "{{ groups['temp_nodes_to_upgrade'] | default(groups['oo_nodes_to_config']) }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index e43954453..e43954453 100644
--- a/playbooks/common/openshift-cluster/upgrades/post.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/pre.yml b/playbooks/common/openshift-cluster/upgrades/pre.yml
deleted file mode 100644
index 42a24eaf8..000000000
--- a/playbooks/common/openshift-cluster/upgrades/pre.yml
+++ /dev/null
@@ -1,311 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-
-- include: ../initialize_facts.yml
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_config
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- name: Evaluate additional groups for upgrade
- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: Evaluate etcd_hosts_to_backup
- add_host:
- name: "{{ item }}"
- groups: etcd_hosts_to_backup
- with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed on first master
- hosts: oo_first_master
- vars:
- g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
- gather_facts: no
- tasks:
- - fail:
- msg: >
- This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
- deployment types
- when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
-
- - fail:
- msg: >
- This upgrade does not support Pacemaker:
- https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
- when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
-
- # Error out in situations where the user has older versions specified in their
- # inventory in any of the openshift_release, openshift_image_tag, and
- # openshift_pkg_version variables. These must be removed or updated to proceed
- # with upgrade.
- # TODO: Should we block if you're *over* the next major release version as well?
- - fail:
- msg: >
- openshift_pkg_version is {{ openshift_pkg_version }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - fail:
- msg: >
- openshift_image_tag is {{ openshift_image_tag }} which is not a
- valid version for a {{ openshift_upgrade_target }} upgrade
- when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
- - set_fact:
- openshift_release: "{{ openshift_release[1:] }}"
- when: openshift_release is defined and openshift_release[0] == 'v'
-
- - fail:
- msg: >
- openshift_release is {{ openshift_release }} which is not a
- valid release for a {{ openshift_upgrade_target }} upgrade
- when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
-
-- include: ../../../common/openshift-cluster/initialize_openshift_version.yml
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
- # Docker role (a dependency) should be told not to do anything to installed version
- # of docker, we handle this separately during upgrade. (the inventory may have a
- # docker_version defined, we don't want to actually do it until later)
- docker_protect_installed_version: True
-
-- name: Verify master processes
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - openshift_facts:
- role: master
- local_facts:
- ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
- - name: Ensure Master is running
- service:
- name: "{{ openshift.common.service_type }}-master"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-
-- name: Verify node processes
- hosts: oo_nodes_to_config
- roles:
- - openshift_facts
- - openshift_docker_facts
- tasks:
- - name: Ensure Node is running
- service:
- name: "{{ openshift.common.service_type }}-node"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
-
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config
- vars:
- openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- pre_tasks:
- - fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
-
- - fail:
- msg: Verify the correct version was found
- when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
-
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- - set_fact:
- g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
- when: not openshift.common.is_containerized | bool
-
- - name: Verify containers are available for upgrade
- command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
- when: not openshift.common.is_containerized | bool
-
- - name: Verify OpenShift RPMs are available for upgrade
- fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
-
- - fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
- when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
-
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
-
- # Additional checks for Atomic hosts:
-
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
-
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
-
- - fail:
- msg: This playbook requires access to Docker 1.10 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
-
- - set_fact:
- pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
- hosts: localhost
- connection: local
- become: no
- vars:
- pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
- tasks:
- - set_fact:
- pre_upgrade_completed: "{{ hostvars
- | oo_select_keys(pre_upgrade_hosts)
- | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
- - set_fact:
- pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
- when: pre_upgrade_failed | length > 0
-
-###############################################################################
-# Backup etcd
-###############################################################################
-- name: Backup etcd
- hosts: etcd_hosts_to_backup
- vars:
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
- roles:
- - openshift_facts
- tasks:
- # Ensure we persist the etcd role for this host in openshift_facts
- - openshift_facts:
- role: etcd
- local_facts: {}
- when: "'etcd' not in openshift"
-
- - stat: path=/var/lib/openshift
- register: var_lib_openshift
-
- - stat: path=/var/lib/origin
- register: var_lib_origin
-
- - name: Create origin symlink if necessary
- file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
- when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
- # TODO: replace shell module with command and update later checks
- # We assume to be using the data dir for all backups.
- - name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
- register: avail_disk
-
- # TODO: replace shell module with command and update later checks
- - name: Check current embedded etcd disk usage
- shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
- register: etcd_disk_usage
- when: embedded_etcd | bool
-
- - name: Abort if insufficient disk space for etcd backup
- fail:
- msg: >
- {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
- {{ avail_disk.stdout }} Kb available.
- when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
- - name: Install etcd (for etcdctl)
- action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
- when: not openshift.common.is_atomic | bool
-
- - name: Generate etcd backup
- command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
- --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
- - set_fact:
- etcd_backup_complete: True
-
- - name: Display location of etcd backup
- debug:
- msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-##############################################################################
-# Gate on etcd backup
-##############################################################################
-- name: Gate on etcd backup
- hosts: localhost
- connection: local
- become: no
- tasks:
- - set_fact:
- etcd_backup_completed: "{{ hostvars
- | oo_select_keys(groups.etcd_hosts_to_backup)
- | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
- - set_fact:
- etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
- - fail:
- msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
- when: etcd_backup_failed | length > 0
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
new file mode 100644
index 000000000..8ecae4539
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
@@ -0,0 +1,6 @@
+---
+- name: Flag pre-upgrade checks complete for hosts without errors
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - set_fact:
+ pre_upgrade_complete: True
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/roles b/playbooks/common/openshift-cluster/upgrades/pre/roles
new file mode 120000
index 000000000..415645be6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/roles
@@ -0,0 +1 @@
+../../../../../roles/ \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
new file mode 100644
index 000000000..06eb5f936
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -0,0 +1,31 @@
+---
+- name: Verify master processes
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-api"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
+
+ - name: Ensure HA Master is running
+ service:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+ enabled: yes
+ when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
new file mode 100644
index 000000000..ba4d77617
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -0,0 +1,23 @@
+---
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ # Only check if docker upgrade is required if docker_upgrade is not
+ # already set to False.
+ - include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ # Additional checks for Atomic hosts:
+
+ - name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
+
+ - set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
+
+ - fail:
+ msg: This playbook requires access to Docker 1.10 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
new file mode 100644
index 000000000..9a959a959
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
@@ -0,0 +1,37 @@
+---
+- name: Verify upgrade can proceed on first master
+ hosts: oo_first_master
+ gather_facts: no
+ tasks:
+ - fail:
+ msg: >
+ This upgrade is only supported for origin, openshift-enterprise, and online
+ deployment types
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+ # Error out in situations where the user has older versions specified in their
+ # inventory in any of the openshift_release, openshift_image_tag, and
+ # openshift_pkg_version variables. These must be removed or updated to proceed
+ # with upgrade.
+ # TODO: Should we block if you're *over* the next major release version as well?
+ - fail:
+ msg: >
+ openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - fail:
+ msg: >
+ openshift_image_tag is {{ openshift_image_tag }} which is not a
+ valid version for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+ - set_fact:
+ openshift_release: "{{ openshift_release[1:] }}"
+ when: openshift_release is defined and openshift_release[0] == 'v'
+
+ - fail:
+ msg: >
+ openshift_release is {{ openshift_release }} which is not a
+ valid release for a {{ openshift_upgrade_target }} upgrade
+ when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
new file mode 100644
index 000000000..354af3cde
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml
@@ -0,0 +1,13 @@
+---
+- name: Verify node processes
+ hosts: oo_nodes_to_config
+ roles:
+ - openshift_facts
+ - openshift_docker_facts
+ tasks:
+ - name: Ensure Node is running
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: started
+ enabled: yes
+ when: openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
new file mode 100644
index 000000000..9632626a4
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -0,0 +1,45 @@
+---
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ vars:
+ openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
+ pre_tasks:
+ - fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
+
+ - fail:
+ msg: Verify the correct version was found
+ when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+
+ - set_fact:
+ g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+ when: not openshift.common.is_containerized | bool
+
+ - name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
+
+ - name: Check latest available OpenShift RPM version
+ command: >
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
+ when: not openshift.common.is_containerized | bool
+
+ - name: Verify OpenShift RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
+ when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+
+ - fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index cb5103e3a..2c641e21e 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -1,40 +1,94 @@
---
###############################################################################
-# The restart playbook should be run after this playbook completes.
+# Upgrade Masters
###############################################################################
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+- name: Evaluate additional groups for upgrade
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: Evaluate etcd_hosts_to_backup
+ add_host:
+ name: "{{ item }}"
+ groups: etcd_hosts_to_backup
+ with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+- name: Backup etcd
+ hosts: etcd_hosts_to_backup
+ vars:
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ roles:
+ - openshift_facts
tasks:
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ # Ensure we persist the etcd role for this host in openshift_facts
+ - openshift_facts:
+ role: etcd
+ local_facts: {}
+ when: "'etcd' not in openshift"
+
+ - stat: path=/var/lib/openshift
+ register: var_lib_openshift
+
+ - stat: path=/var/lib/origin
+ register: var_lib_origin
+
+ - name: Create origin symlink if necessary
+ file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+ when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+ # TODO: replace shell module with command and update later checks
+ # We assume to be using the data dir for all backups.
+ - name: Check available disk space for etcd backup
+ shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+ register: avail_disk
+
+ # TODO: replace shell module with command and update later checks
+ - name: Check current embedded etcd disk usage
+ shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+ register: etcd_disk_usage
+ when: embedded_etcd | bool
+
+ - name: Abort if insufficient disk space for etcd backup
+ fail:
+ msg: >
+ {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+ {{ avail_disk.stdout }} Kb available.
+ when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+ - name: Install etcd (for etcdctl)
+ action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+ when: not openshift.common.is_atomic | bool
+
+ - name: Generate etcd backup
+ command: >
+ etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - set_fact:
+ etcd_backup_complete: True
- - name: Remove unused Docker images for Docker 1.10+ migration
- shell: "docker rmi `docker images -aq`"
- # Will fail on images still in use:
- failed_when: false
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+ - name: Display location of etcd backup
+ debug:
+ msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
- - name: Check Docker image count
- shell: "docker images -aq | wc -l"
- register: docker_image_count
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
- - debug: var=docker_image_count.stdout
- when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+- name: Gate on etcd backup
+ hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
-###############################################################################
-# Upgrade Masters
-###############################################################################
-- name: Upgrade master
+- name: Upgrade master packages
hosts: oo_masters_to_config
handlers:
- include: ../../../../roles/openshift_master/handlers/main.yml
@@ -45,6 +99,27 @@
- include: rpm_upgrade.yml component=master
when: not openshift.common.is_containerized | bool
+- name: Determine if service signer cert must be created
+ hosts: oo_first_master
+ tasks:
+ - name: Determine if service signer certificate must be created
+ stat:
+ path: "{{ openshift.common.config_base }}/master/service-signer.crt"
+ register: service_signer_cert_stat
+ changed_when: false
+
+# Create service signer cert when missing. Service signer certificate
+# is added to master config in the master config hook for v3_3.
+- include: create_service_signer_cert.yml
+
+- name: Upgrade master config and systemd units
+ hosts: oo_masters_to_config
+ handlers:
+ - include: ../../../../roles/openshift_master/handlers/main.yml
+ static: yes
+ roles:
+ - openshift_facts
+ tasks:
- include: "{{ master_config_hook }}"
when: master_config_hook is defined
@@ -121,9 +196,9 @@
origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
ent_reconcile_bindings: true
openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
- # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role,
- # it will be updated when we perform node upgrade.
- docker_protect_installed_version: True
+ # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
+ # restart.
+ skip_docker_role: True
tasks:
- name: Verifying the correct commandline tools are available
shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
@@ -155,69 +230,6 @@
- set_fact:
reconcile_complete: True
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-
-# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
-- name: Perform upgrades that may require node evacuation
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
- serial: 1
- any_errors_fatal: true
- roles:
- - openshift_facts
- handlers:
- - include: ../../../../roles/openshift_node/handlers/main.yml
- static: yes
- tasks:
- # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
- # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
- # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
- - name: Determine if node is currently scheduleable
- command: >
- {{ openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} -o json
- register: node_output
- delegate_to: "{{ groups.oo_first_master.0 }}"
- changed_when: false
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - set_fact:
- was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Mark unschedulable if host is a node
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - name: Evacuate Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config
-
- - include: docker/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- - include: "{{ node_config_hook }}"
- when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
-
- - include: rpm_upgrade.yml
- vars:
- component: "node"
- openshift_version: "{{ openshift_pkg_version | default('') }}"
- when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
-
- - include: containerized_node_upgrade.yml
- when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
-
- - name: Set node schedulability
- command: >
- {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool
-
-
##############################################################################
# Gate on reconcile
##############################################################################
@@ -235,3 +247,13 @@
- fail:
msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
when: reconcile_failed | length > 0
+
+- name: Upgrade Docker on dedicated containerized etcd hosts
+ hosts: oo_etcd_to_config:!oo_nodes_to_upgrade
+ serial: 1
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ tasks:
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
new file mode 100644
index 000000000..9b572dcdf
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -0,0 +1,75 @@
+---
+- name: Evacuate and upgrade nodes
+ hosts: oo_nodes_to_upgrade
+ # This var must be set with -e on invocation, as it is not a per-host inventory var
+ # and is evaluated early. Values such as "20%" can also be used.
+ serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
+ any_errors_fatal: true
+ roles:
+ - openshift_facts
+ - docker
+ handlers:
+ - include: ../../../../roles/openshift_node/handlers/main.yml
+ static: yes
+ pre_tasks:
+ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+ # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+ - name: Determine if node is currently scheduleable
+ command: >
+ {{ openshift.common.client_binary }} get node {{ openshift.node.nodename | lower }} -o json
+ register: node_output
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ changed_when: false
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - set_fact:
+ was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - name: Mark unschedulable if host is a node
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+ # NOTE: There is a transient "object has been modified" error here, allow a couple
+ # retries for a more reliable upgrade.
+ register: node_unsched
+ until: node_unsched.rc == 0
+ retries: 3
+ delay: 1
+
+ - name: Evacuate Node for Kubelet upgrade
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade
+ tasks:
+ - include: docker/upgrade.yml
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
+
+ - include: "{{ node_config_hook }}"
+ when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_upgrade
+
+ - include: rpm_upgrade.yml
+ vars:
+ component: "node"
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
+
+ - include: containerized_node_upgrade.yml
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and openshift.common.is_containerized | bool
+
+ - meta: flush_handlers
+
+ - name: Set node schedulability
+ command: >
+ {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+ delegate_to: "{{ groups.oo_first_master.0 }}"
+ when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
+ register: node_sched
+ until: node_sched.rc == 0
+ retries: 3
+ delay: 1
+
+
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index 641e7cafc..684eea343 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -38,3 +38,13 @@
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps'
yaml_value: 300
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.servicesServingCert.signer.certFile'
+ yaml_value: service-signer.crt
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
+ yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/verify_ansible_version.yml b/playbooks/common/openshift-cluster/verify_ansible_version.yml
index 2a143b065..d75b23bf7 100644
--- a/playbooks/common/openshift-cluster/verify_ansible_version.yml
+++ b/playbooks/common/openshift-cluster/verify_ansible_version.yml
@@ -1,5 +1,6 @@
---
-- hosts: localhost
+- name: Verify Ansible version is greater than or equal to 2.1.0.0
+ hosts: localhost
connection: local
become: no
gather_facts: no
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index f4392173a..c414913bf 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,5 +1,16 @@
---
- name: Configure load balancers
hosts: oo_lb_to_config
+ vars:
+ openshift_loadbalancer_frontends: "{{ (openshift_master_api_port | default(8443)
+ | oo_openshift_loadbalancer_frontends(hostvars | oo_select_keys(groups['oo_masters']),
+ openshift_use_nuage | default(false),
+ nuage_mon_rest_server_port | default(none)))
+ + openshift_loadbalancer_additional_frontends | default([]) }}"
+ openshift_loadbalancer_backends: "{{ (openshift_master_api_port | default(8443)
+ | oo_openshift_loadbalancer_backends(hostvars | oo_select_keys(groups['oo_masters']),
+ openshift_use_nuage | default(false),
+ nuage_mon_rest_server_port | default(none)))
+ + openshift_loadbalancer_additional_backends | default([]) }}"
roles:
- role: openshift_loadbalancer
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 7f60cd9e4..a53c55c14 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,5 +1,5 @@
---
-- name: Set master facts
+- name: Gather and set facts for master hosts
hosts: oo_masters_to_config
vars:
t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}"
@@ -91,7 +91,7 @@
register: g_master_mktemp
changed_when: False
-- name: Check for cached session secrets
+- name: Determine if session secrets must be generated
hosts: oo_first_master
roles:
- role: openshift_facts
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index 7304fca56..56ed09e1b 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -40,6 +40,10 @@
--cacert {{ openshift.common.config_base }}/master/ca.crt
{% endif %}
{{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index ba7530ed7..000e46e80 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,5 +1,5 @@
---
-- name: Configure nfs hosts
+- name: Configure nfs
hosts: oo_nfs_to_config
roles:
- role: openshift_facts
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 94c30e268..364a62dd0 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -45,7 +45,7 @@
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
-- name: Configure node instances
+- name: Configure containerized nodes
hosts: oo_containerized_master_nodes
serial: 1
vars:
@@ -60,12 +60,12 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
+ - role: openshift_common
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- role: openshift_cloud_provider
- - role: openshift_common
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq
- role: os_firewall
@@ -85,7 +85,7 @@
when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
-- name: Configure node instances
+- name: Configure nodes
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
vars:
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
@@ -99,12 +99,12 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
+ - role: openshift_common
- role: openshift_clock
- role: openshift_docker
- role: openshift_node_certificates
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- role: openshift_cloud_provider
- - role: openshift_common
- role: openshift_node_dnsmasq
when: openshift.common.use_dnsmasq
- role: os_firewall
@@ -153,12 +153,10 @@
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
-- name: Set schedulability
+- name: Set node schedulability
hosts: oo_first_master
vars:
- openshift_nodes: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config'])
- | oo_collect('openshift.common.hostname') }}"
+ openshift_nodes: "{{ groups.oo_nodes_to_config | default([]) }}"
pre_tasks:
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
@@ -174,6 +172,10 @@
--cacert {{ openshift.common.config_base }}/master/ca.crt
{% endif %}
{{ openshift.master.api_url }}/healthz/ready
+ args:
+ # Disables the following warning:
+ # Consider using get_url or uri module rather than running curl
+ warn: no
register: api_available_output
until: api_available_output.stdout == 'ok'
retries: 120
diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml
index 1d79db353..bb3b1e780 100644
--- a/playbooks/common/openshift-node/scaleup.yml
+++ b/playbooks/common/openshift-node/scaleup.yml
@@ -6,6 +6,17 @@
roles:
- openshift_facts
+- name: Gather and set facts for first master
+ hosts: oo_first_master
+ vars:
+ openshift_master_count: "{{ groups.oo_masters | length }}"
+ pre_tasks:
+ - set_fact:
+ openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}"
+ when: openshift_master_default_subdomain is not defined
+ roles:
+ - openshift_master_facts
+
- name: Configure docker hosts
hosts: oo_nodes_to_config
vars:
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index c5c479052..60cf21a5b 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -1,7 +1,7 @@
---
- name: Launch instance(s)
gce:
- instance_names: "{{ instances }}"
+ instance_names: "{{ instances|join(',') }}"
machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
index 119b376aa..12c436eaf 100644
--- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/openstack/openshift-cluster/cluster_hosts.yml
@@ -1,21 +1,21 @@
---
-g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
- | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
+ | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
-g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
-g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
-g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
-g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
-g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
+g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
-g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
-g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
+g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
-g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
-g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"
+g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"
diff --git a/playbooks/openstack/openshift-cluster/dns.yml b/playbooks/openstack/openshift-cluster/dns.yml
deleted file mode 100644
index 446a1846f..000000000
--- a/playbooks/openstack/openshift-cluster/dns.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-- name: Populate oo_dns_hosts_to_update group
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- tasks:
- - name: Evaluate oo_dns_hosts_to_update
- add_host:
- name: "{{ item }}"
- groups: oo_dns_hosts_to_update
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ groups[cluster_id ~ '-dns'] }}"
-
- - name: Evaluate oo_hosts_to_add_in_dns
- add_host:
- name: "{{ item }}"
- groups: oo_hosts_to_add_in_dns
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ groups['tag_clusterid_' ~ cluster_id] }}"
-
-- name: Gather facts
- hosts: oo_hosts_to_add_in_dns
- vars_files:
- - vars.yml
- - cluster_hosts.yml
-
-- name: Configure the DNS
- hosts: oo_dns_hosts_to_update
- vars_files:
- - vars.yml
- - cluster_hosts.yml
- roles:
- # Explicitly calling openshift_facts because it appears that when
- # rhel_subscribe is skipped that the openshift_facts dependency for
- # openshift_repos is also skipped (this is the case at least for Ansible
- # 2.0.2)
- - openshift_facts
- - role: rhel_subscribe
- when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
- ansible_distribution == "RedHat" and
- lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
- default('no', True) | lower in ['no', 'false']
-
- - { role: dns,
- dns_forwarders: "{{ openstack_network_dns }}",
- dns_zones: [ novalocal, openstacklocal ],
- dns_all_hosts: "{{ g_all_hosts }}" }
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
index 458cf5ac7..755090f94 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml
@@ -88,11 +88,6 @@ parameters:
label: Infra image
description: Name of the image for the infra node servers
- dns_image:
- type: string
- label: DNS image
- description: Name of the image for the DNS server
-
etcd_flavor:
type: string
label: Etcd flavor
@@ -113,11 +108,6 @@ parameters:
label: Infra flavor
description: Flavor of the infra node servers
- dns_flavor:
- type: string
- label: DNS flavor
- description: Flavor of the DNS server
-
outputs:
etcd_names:
@@ -168,26 +158,6 @@ outputs:
description: Floating IPs of the nodes
value: { get_attr: [ infra_nodes, floating_ip ] }
- dns_name:
- description: Name of the DNS
- value:
- get_attr:
- - dns
- - name
-
- dns_floating_ip:
- description: Floating IP of the DNS
- value:
- get_attr:
- - dns
- - addresses
- - str_replace:
- template: openshift-ansible-cluster_id-net
- params:
- cluster_id: { get_param: cluster_id }
- - 1
- - addr
-
resources:
net:
@@ -213,22 +183,7 @@ resources:
template: subnet_24_prefix.0/24
params:
subnet_24_prefix: { get_param: subnet_24_prefix }
- allocation_pools:
- - start:
- str_replace:
- template: subnet_24_prefix.3
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- end:
- str_replace:
- template: subnet_24_prefix.254
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- dns_nameservers:
- - str_replace:
- template: subnet_24_prefix.2
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
+ dns_nameservers: { get_param: dns_nameservers }
router:
type: OS::Neutron::Router
@@ -428,44 +383,6 @@ resources:
port_range_min: 443
port_range_max: 443
- dns-secgrp:
- type: OS::Neutron::SecurityGroup
- properties:
- name:
- str_replace:
- template: openshift-ansible-cluster_id-dns-secgrp
- params:
- cluster_id: { get_param: cluster_id }
- description:
- str_replace:
- template: Security group for cluster_id cluster DNS
- params:
- cluster_id: { get_param: cluster_id }
- rules:
- - direction: ingress
- protocol: tcp
- port_range_min: 22
- port_range_max: 22
- remote_ip_prefix: { get_param: ssh_incoming }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: etcd-secgrp }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: master-secgrp }
- - direction: ingress
- protocol: udp
- port_range_min: 53
- port_range_max: 53
- remote_mode: remote_group_id
- remote_group_id: { get_resource: node-secgrp }
-
etcd:
type: OS::Heat::ResourceGroup
properties:
@@ -599,79 +516,3 @@ resources:
cluster_id: { get_param: cluster_id }
depends_on:
- interface
-
- dns:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: cluster_id-dns
- params:
- cluster_id: { get_param: cluster_id }
- key_name: { get_resource: keypair }
- image: { get_param: dns_image }
- flavor: { get_param: dns_flavor }
- networks:
- - port: { get_resource: dns-port }
- user_data: { get_resource: dns-config }
- user_data_format: RAW
-
- dns-port:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: net }
- fixed_ips:
- - subnet: { get_resource: subnet }
- ip_address:
- str_replace:
- template: subnet_24_prefix.2
- params:
- subnet_24_prefix: { get_param: subnet_24_prefix }
- security_groups:
- - { get_resource: dns-secgrp }
-
- dns-floating-ip:
- type: OS::Neutron::FloatingIP
- properties:
- floating_network: { get_param: external_net }
- port_id: { get_resource: dns-port }
-
- dns-config:
- type: OS::Heat::MultipartMime
- properties:
- parts:
- - config:
- str_replace:
- template: |
- #cloud-config
- disable_root: true
-
- system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
- write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
- - path: /etc/sysconfig/network-scripts/ifcfg-eth0
- content: |
- DEVICE="eth0"
- BOOTPROTO="dhcp"
- DNS1="$dns1"
- DNS2="$dns2"
- PEERDNS="no"
- ONBOOT="yes"
- runcmd:
- - [ "/usr/bin/systemctl", "restart", "network" ]
- params:
- $dns1:
- get_param:
- - dns_nameservers
- - 0
- $dns2:
- get_param:
- - dns_nameservers
- - 1
diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
index f83f2c984..435139849 100644
--- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
+++ b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
@@ -107,7 +107,7 @@ resources:
flavor: { get_param: flavor }
networks:
- port: { get_resource: port }
- user_data: { get_file: user-data }
+ user_data: { get_resource: config }
user_data_format: RAW
metadata:
environment: { get_param: cluster_env }
@@ -128,3 +128,25 @@ resources:
properties:
floating_network: { get_param: floating_network }
port_id: { get_resource: port }
+
+ config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ disable_root: true
+
+ hostname: { get_param: name }
+
+ system_info:
+ default_user:
+ name: openshift
+ sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ write_files:
+ - path: /etc/sudoers.d/00-openshift-no-requiretty
+ permissions: 440
+ # content: Defaults:openshift !requiretty
+ # Encoded in base64 to be sure that we do not forget the trailing newline or
+ # sudo will not be able to parse that file
+ encoding: b64
+ content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg==
diff --git a/playbooks/openstack/openshift-cluster/files/user-data b/playbooks/openstack/openshift-cluster/files/user-data
deleted file mode 100644
index eb65f7cec..000000000
--- a/playbooks/openstack/openshift-cluster/files/user-data
+++ /dev/null
@@ -1,13 +0,0 @@
-#cloud-config
-disable_root: true
-
-system_info:
- default_user:
- name: openshift
- sudo: ["ALL=(ALL) NOPASSWD: ALL"]
-
-write_files:
- - path: /etc/sudoers.d/00-openshift-no-requiretty
- permissions: 440
- content: |
- Defaults:openshift !requiretty
diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml
index 5cf543204..eb2c4269a 100644
--- a/playbooks/openstack/openshift-cluster/launch.yml
+++ b/playbooks/openstack/openshift-cluster/launch.yml
@@ -42,12 +42,10 @@
-P master_image={{ deployment_vars[deployment_type].image }}
-P node_image={{ deployment_vars[deployment_type].image }}
-P infra_image={{ deployment_vars[deployment_type].image }}
- -P dns_image={{ deployment_vars[deployment_type].image }}
-P etcd_flavor={{ openstack_flavor["etcd"] }}
-P master_flavor={{ openstack_flavor["master"] }}
-P node_flavor={{ openstack_flavor["node"] }}
-P infra_flavor={{ openstack_flavor["infra"] }}
- -P dns_flavor={{ openstack_flavor["dns"] }}
openshift-ansible-{{ cluster_id }}-stack'
args:
chdir: '{{ playbook_dir }}'
@@ -106,7 +104,7 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'tag_environment_{{ cluster_env }}, tag_host-type_etcd, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
+ groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "etcd"
with_together:
@@ -120,7 +118,7 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'tag_environment_{{ cluster_env }}, tag_host-type_master, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
+ groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "master"
with_together:
@@ -134,7 +132,7 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_compute, tag_clusterid_{{ cluster_id }}'
+ groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "compute"
with_together:
@@ -148,7 +146,7 @@
ansible_ssh_host: '{{ item[2] }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_infra, tag_clusterid_{{ cluster_id }}'
+ groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}'
openshift_node_labels:
type: "infra"
with_together:
@@ -156,14 +154,6 @@
- '{{ parsed_outputs.infra_ips }}'
- '{{ parsed_outputs.infra_floating_ips }}'
- - name: Add DNS groups and variables
- add_host:
- hostname: '{{ parsed_outputs.dns_name }}'
- ansible_ssh_host: '{{ parsed_outputs.dns_floating_ip }}'
- ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
- ansible_become: "{{ deployment_vars[deployment_type].become }}"
- groups: '{{ cluster_id }}-dns'
-
- name: Wait for ssh
wait_for:
host: '{{ item }}'
@@ -172,7 +162,6 @@
- '{{ parsed_outputs.master_floating_ips }}'
- '{{ parsed_outputs.node_floating_ips }}'
- '{{ parsed_outputs.infra_floating_ips }}'
- - '{{ parsed_outputs.dns_floating_ip }}'
- name: Wait for user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -184,7 +173,6 @@
- '{{ parsed_outputs.master_floating_ips }}'
- '{{ parsed_outputs.node_floating_ips }}'
- '{{ parsed_outputs.infra_floating_ips }}'
- - '{{ parsed_outputs.dns_floating_ip }}'
- include: update.yml
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index 60372e262..de68f5207 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -7,7 +7,7 @@
vars_files:
- vars.yml
tasks:
- - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
+ - set_fact: scratch_group=meta-clusterid_{{ cluster_id }}
when: cluster_id != ''
- set_fact: scratch_group=all
when: cluster_id == ''
@@ -31,4 +31,4 @@
- vars.yml
tasks:
- debug:
- msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}"
diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml
index 980ab7337..4527f4a28 100644
--- a/playbooks/openstack/openshift-cluster/terminate.yml
+++ b/playbooks/openstack/openshift-cluster/terminate.yml
@@ -11,7 +11,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: "{{ (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) }}"
+ with_items: "{{ (groups['meta-environment_' ~ cluster_env]|default([])) | intersect(groups['meta-clusterid_' ~ cluster_id ]|default([])) }}"
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml
index 6d4d23963..332f27da7 100644
--- a/playbooks/openstack/openshift-cluster/update.yml
+++ b/playbooks/openstack/openshift-cluster/update.yml
@@ -15,8 +15,6 @@
- include_vars: vars.yml
- include_vars: cluster_hosts.yml
-- include: dns.yml
-
- name: Populate oo_hosts_to_update group
hosts: localhost
connection: local
diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml
index 17063ef34..62111dacf 100644
--- a/playbooks/openstack/openshift-cluster/vars.yml
+++ b/playbooks/openstack/openshift-cluster/vars.yml
@@ -15,7 +15,6 @@ openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') |
openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') |
default('0.0.0.0/0', True) }}"
openstack_flavor:
- dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}"
etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}"
master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}"
infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}"