diff options
Diffstat (limited to 'playbooks/common')
31 files changed, 400 insertions, 142 deletions
diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index ef8233b67..6d82fa928 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -17,6 +17,8 @@ - name: Create service signer certificate hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Create remote temp directory for creating certs command: mktemp -d /tmp/openshift-ansible-XXXXXXX diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index ffb11670d..8392e21ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -51,13 +51,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade register: l_docker_upgrade_drain_result until: not (l_docker_upgrade_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_docker_upgrade_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 - include_tasks: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 8ee83819e..ba783638d 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -5,7 +5,8 @@ g_new_master_hosts: [] g_new_node_hosts: [] -- import_playbook: ../../../init/facts.yml +- import_playbook: ../../../init/basic_facts.yml +- import_playbook: ../../../init/cluster_facts.yml - name: Ensure firewall is not switched during upgrade hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index fc1cbf32a..07be0b0d4 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -31,7 +31,7 @@ with_items: " {{ groups['oo_nodes_to_config'] }}" when: - hostvars[item].openshift is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 1b57521df..9c927c0a1 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -1,7 +1,15 @@ --- -############################################################################### -# Post upgrade - Upgrade default router, default registry and examples -############################################################################### +#################################################################################### +# Post upgrade - Upgrade web console, default router, default registry, and examples +#################################################################################### +- name: Upgrade web console + hosts: oo_first_master + roles: + - role: openshift_web_console + when: + - openshift_web_console_install | default(true) | bool + - openshift_upgrade_target is version_compare('3.9','>=') + - name: Upgrade default router and default registry hosts: oo_first_master vars: @@ -105,6 +113,25 @@ registry_url: "{{ openshift.master.registry_url }}" openshift_hosted_templates_import_command: replace + post_tasks: + # we need to migrate customers to the new pattern of pushing to the registry via dns + # Step 1: verify the certificates have the docker registry service name + - shell: > + echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' + register: cert_output + changed_when: false + failed_when: + - cert_output.rc not in [0, 1] + + # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs + - name: set a fact to include the registry certs playbook if needed + set_fact: + openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}" + +# Run the redeploy certs based upon the certificates +- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry + import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml + # Check for warnings to be printed at the end of the upgrade: - name: Clean up and display warnings hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index da63450b8..44af37b2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -5,8 +5,6 @@ # Pre-upgrade - import_playbook: ../initialize_nodes_to_upgrade.yml -- import_playbook: verify_cluster.yml - - name: Update repos on upgrade hosts hosts: "{{ l_upgrade_repo_hosts }}" roles: @@ -49,10 +47,12 @@ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False + # openshift_protect_installed_version is passed n via upgrade_control_plane.yml # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml +- import_playbook: verify_cluster.yml + # If we're only upgrading nodes, we need to ensure masters are already upgraded - name: Verify masters are already upgraded hosts: oo_masters_to_config @@ -60,7 +60,7 @@ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." when: - l_upgrade_nodes_only | default(False) | bool - - openshift.common.version != openshift_version + - not openshift.common.version | match(openshift_version) # If we're only upgrading nodes, skip this. - import_playbook: ../../../../openshift-master/private/validate_restart.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 693ab2d96..463a05688 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -17,6 +17,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<') - fail: @@ -25,6 +26,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<') - set_fact: @@ -92,3 +94,25 @@ state: started enabled: yes with_items: "{{ master_services }}" + +# Until openshift-ansible is determining which host is the CA host we +# must (unfortunately) ensure that the first host in the etcd group is +# the etcd CA host. +# https://bugzilla.redhat.com/show_bug.cgi?id=1469358 +- name: Verify we can proceed on first etcd + hosts: oo_first_etcd + gather_facts: no + tasks: + - name: Ensure CA exists on first etcd + stat: + path: /etc/etcd/generated_certs + register: __etcd_ca_stat + + - fail: + msg: > + In order to correct an etcd certificate signing problem + upgrading may require re-generating etcd certificates. Please + ensure that the /etc/etcd/generated_certs directory exists on + the first host defined in your [etcd] group. + when: + - not __etcd_ca_stat.stat.exists | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 4c1156f4b..45ddf7eea 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -21,7 +21,7 @@ block: - name: Check latest available OpenShift RPM version repoquery: - name: "{{ openshift_service_type }}" + name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}" ignore_excluders: true register: repoquery_out diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 412075d41..40e245d75 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,6 +2,7 @@ ############################################################################### # Upgrade Masters ############################################################################### + - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -22,6 +23,8 @@ # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 - name: Pre master upgrade - Upgrade all storage hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Upgrade all storage command: > @@ -30,7 +33,6 @@ register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool @@ -46,13 +48,10 @@ # support for optional hooks to be defined. - name: Upgrade master hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 + roles: + - openshift_facts tasks: - - import_role: - name: openshift_facts - # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined @@ -71,6 +70,12 @@ - include_tasks: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined + - name: Disable master controller + service: + name: "{{ openshift_service_type }}-master-controllers" + enabled: false + when: openshift.common.rolling_restart_mode == 'system' + - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' @@ -93,7 +98,6 @@ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - openshift_version is version_compare('3.7','<') failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true @@ -127,6 +131,7 @@ hosts: oo_masters_to_config roles: - { role: openshift_cli } + - { role: openshift_facts } vars: __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: @@ -228,7 +233,6 @@ register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool @@ -289,12 +293,18 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not (l_upgrade_control_plane_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_control_plane_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 roles: - openshift_facts @@ -302,13 +312,9 @@ - import_role: name: openshift_node tasks_from: upgrade.yml - - name: Set node schedulability - oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" - schedulable: True - delegate_to: "{{ groups.oo_first_master.0 }}" - retries: 10 - delay: 5 - register: node_schedulable - until: node_schedulable is succeeded - when: node_unschedulable is changed + - import_role: + name: openshift_manage_node + tasks_from: config.yml + vars: + openshift_master_host: "{{ groups.oo_first_master.0 }}" + openshift_manage_node_is_master: true diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 464af3ae6..915fae9fd 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -33,27 +33,28 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_nodes_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 post_tasks: - import_role: name: openshift_node tasks_from: upgrade.yml - - name: Set node schedulability - oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" - schedulable: True - delegate_to: "{{ groups.oo_first_master.0 }}" - retries: 10 - delay: 5 - register: node_schedulable - until: node_schedulable is succeeded - when: node_unschedulable is changed + - import_role: + name: openshift_manage_node + tasks_from: config.yml + vars: + openshift_master_host: "{{ groups.oo_first_master.0 }}" - name: Re-enable excluders hosts: oo_nodes_to_upgrade:!oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index 6d59bfd0b..e259b5d09 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -50,11 +50,11 @@ delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}" + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" delay: 5 failed_when: - l_upgrade_nodes_drain_result is failed - - openshift_upgrade_nodes_drain_timeout | default(0) == '0' + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 # Alright, let's clean up! - name: clean up the old scale group diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/roles b/playbooks/common/openshift-cluster/upgrades/v3_10/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml new file mode 100644 index 000000000..ec1da6d39 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: upgrade_control_plane.yml + +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml new file mode 100644 index 000000000..64ee03562 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml @@ -0,0 +1,58 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + +- name: Configure the upgrade target for the common upgrade tasks 3.10 + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tasks: + - meta: clear_facts + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. + vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +- import_playbook: ../upgrade_control_plane.yml + vars: + openshift_release: '3.10' + +- import_playbook: ../post_control_plane.yml + +- hosts: oo_masters + tasks: + - import_role: + name: openshift_web_console + tasks_from: remove_old_asset_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml new file mode 100644 index 000000000..eea1b250e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml @@ -0,0 +1,35 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../init.yml + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + vars: + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +# Pre-upgrade completed + +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index d520c6aee..a2d21b69f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -23,6 +23,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index eb5f07ae0..9aa5a3b64 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -14,7 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -35,6 +35,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 4daa9e490..cc2ec2709 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -23,6 +23,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 8d42e4c91..b1ecc75d3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -14,7 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -35,6 +35,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 49e691352..9c7688981 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -7,6 +7,7 @@ hosts: oo_first_master roles: - { role: lib_openshift } + - { role: openshift_facts } tasks: - name: Check for invalid namespaces and SDN errors diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 0f74e0137..a73b7d63a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -23,6 +23,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml @@ -35,8 +36,6 @@ # Pre-upgrade completed - import_playbook: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_7/master_config_upgrade.yml" # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index a2f316c25..723b2e533 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -14,7 +14,8 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + when: not skip_version_info | default(false) - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -35,6 +36,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml @@ -47,8 +49,6 @@ # Pre-upgrade completed - import_playbook: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_7/master_config_upgrade.yml" # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml index 1d4d1919c..ed97d539c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml @@ -1,20 +1 @@ --- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.election.lockName' - yaml_value: 'openshift-master-controllers' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key - -- modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index 0aea5069d..ec1da6d39 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -2,54 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- import_playbook: ../init.yml +- import_playbook: upgrade_control_plane.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_upgrade_target: '3.9' - openshift_upgrade_min: '3.7' - -- import_playbook: ../pre/config.yml - vars: - l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" - l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" - -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - set_fact: - pre_upgrade_complete: True - -# Pre-upgrade completed - -- import_playbook: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_7/master_config_upgrade.yml" - -# All controllers must be stopped at the same time then restarted -- name: Cycle all controller services to force new leader election mode - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_facts - tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - -- import_playbook: ../upgrade_nodes.yml - -- import_playbook: ../post_control_plane.yml +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index ef9871008..8792295c6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -14,14 +14,29 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -- name: Configure the upgrade target for the common upgrade tasks +## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan +## If they've specified pkg_version or image_tag preserve that for later use +- name: Configure the upgrade target for the common upgrade tasks 3.8 hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tasks: - set_fact: - openshift_upgrade_target: '3.9' + openshift_upgrade_target: '3.8' openshift_upgrade_min: '3.7' + openshift_release: '3.8' + _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" + openshift_pkg_version: '' + _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" + l_double_upgrade_cp: True + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') + + - name: set l_force_image_tag_to_version = True + set_fact: + # Need to set this during 3.8 upgrade to ensure image_tag is set correctly + # to match 3.8 version + l_force_image_tag_to_version: True + when: _requested_image_tag is defined - import_playbook: ../pre/config.yml # These vars a meant to exclude oo_nodes from plays that would otherwise include @@ -35,21 +50,70 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors +- name: Flag pre-upgrade checks complete for hosts without errors 3.8 hosts: oo_masters_to_config:oo_etcd_to_config tasks: - set_fact: pre_upgrade_complete: True + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') # Pre-upgrade completed +- name: Intermediate 3.8 Upgrade + import_playbook: ../upgrade_control_plane.yml + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') -- import_playbook: ../upgrade_control_plane.yml +## 3.8 upgrade complete we should now be able to upgrade to 3.9 + +- name: Configure the upgrade target for the common upgrade tasks 3.9 + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tasks: + - meta: clear_facts + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.8' + openshift_release: '3.9' + openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}" + # Set the user's specified image_tag for 3.9 upgrade if it was provided. + - set_fact: + openshift_image_tag: "{{ _requested_image_tag }}" + l_force_image_tag_to_version: False + when: _requested_image_tag is defined + # If the user didn't specify an image_tag, we need to force update image_tag + # because it will have already been set during 3.8. If we aren't running + # a double upgrade, then we can preserve image_tag because it will still + # be the user provided value. + - set_fact: + l_force_image_tag_to_version: True + when: + - l_double_upgrade_cp is defined and l_double_upgrade_cp + - _requested_image_tag is not defined + +- import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: - master_config_hook: "v3_7/master_config_upgrade.yml" + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + openshift_version_reinit: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +- import_playbook: ../upgrade_control_plane.yml # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode @@ -58,13 +122,21 @@ roles: - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started + - name: Restart master controllers to force new leader election mode + service: + name: "{{ openshift_service_type }}-master-controllers" + state: restart + when: openshift.common.rolling_restart_mode == 'service' + - name: Re-enable master controllers to force new leader election mode + service: + name: "{{ openshift_service_type }}-master-controllers" + enabled: true + when: openshift.common.rolling_restart_mode == 'system' - import_playbook: ../post_control_plane.yml + +- hosts: oo_masters + tasks: + - import_role: + name: openshift_web_console + tasks_from: remove_old_asset_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml index 1d1b255c1..859b1d88b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -12,6 +12,7 @@ - set_fact: openshift_upgrade_target: '3.9' openshift_upgrade_min: '3.7' + openshift_release: '3.9' - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml index 4bd2d87b1..d8540abfb 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml @@ -1,5 +1,5 @@ --- -- name: Verify 3.9 specific upgrade checks +- name: Verify 3.8 specific upgrade checks hosts: oo_first_master roles: - { role: lib_openshift } diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml new file mode 100644 index 000000000..089645d07 --- /dev/null +++ b/playbooks/common/private/components.yml @@ -0,0 +1,38 @@ +--- +# These are the core component plays that configure the layers above the control +# plane. A component is generally considered any part of OpenShift that runs on +# top of the cluster and may be considered optional. Over time, much of OpenShift +# above the Kubernetes apiserver and masters may be considered components. +# +# Preconditions: +# +# 1. The control plane is configured and reachable from nodes inside the cluster +# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can +# perform root level actions against the cluster +# 3. On cloud providers, persistent volume provisioners are configured +# 4. A subset of nodes is available to allow components to schedule - this must +# include the masters and usually includes infra nodes. +# 5. The init/main.yml playbook has been invoked + +- import_playbook: ../../openshift-glusterfs/private/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-hosted/private/config.yml + +- import_playbook: ../../openshift-web-console/private/config.yml + when: openshift_web_console_install | default(true) | bool + +- import_playbook: ../../openshift-metrics/private/config.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- import_playbook: ../../openshift-logging/private/config.yml + when: openshift_logging_install_logging | default(false) | bool + +- import_playbook: ../../openshift-prometheus/private/config.yml + when: openshift_hosted_prometheus_deploy | default(false) | bool + +- import_playbook: ../../openshift-service-catalog/private/config.yml + when: openshift_enable_service_catalog | default(true) | bool + +- import_playbook: ../../openshift-management/private/config.yml + when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml new file mode 100644 index 000000000..0a5f1142b --- /dev/null +++ b/playbooks/common/private/control_plane.yml @@ -0,0 +1,34 @@ +--- +# These are the control plane plays that configure a control plane on top of hosts +# identified as masters. Over time, some of the pieces of the current control plane +# may be moved to the components list. +# +# It is not required for any nodes to be configured, or passed to be configured, +# when this playbook is invoked. +# +# Preconditions: +# +# 1. A set of machines have been identified to act as masters +# 2. On cloud providers, a load balancer has been configured to point to the masters +# and that load balancer has a DNS name +# 3. The init/main.yml playbook has been invoked +# +# Postconditions: +# +# 1. The control plane is reachable from the outside of the cluster +# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin +# access. + +- import_playbook: ../../openshift-checks/private/install.yml + +- import_playbook: ../../openshift-etcd/private/config.yml + +- import_playbook: ../../openshift-nfs/private/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-loadbalancer/private/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-master/private/config.yml + +- import_playbook: ../../openshift-master/private/additional_config.yml |