diff options
Diffstat (limited to 'playbooks/common')
45 files changed, 484 insertions, 1462 deletions
diff --git a/playbooks/common/openshift-cluster/filter_plugins b/playbooks/common/openshift-cluster/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-cluster/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/library b/playbooks/common/openshift-cluster/library deleted file mode 120000 index d0b7393d3..000000000 --- a/playbooks/common/openshift-cluster/library +++ /dev/null @@ -1 +0,0 @@ -../../../library/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/lookup_plugins b/playbooks/common/openshift-cluster/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-cluster/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml index 6e953be69..ed97d539c 100644 --- a/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml +++ b/playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml @@ -1,22 +1 @@  --- -- name: Check Docker image count -  shell: "docker images -aq | wc -l" -  register: docker_image_count -  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- debug: var=docker_image_count.stdout -  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- name: Remove unused Docker images for Docker 1.10+ migration -  shell: "docker rmi `docker images -aq`" -  # Will fail on images still in use: -  failed_when: false -  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- name: Check Docker image count -  shell: "docker images -aq | wc -l" -  register: docker_image_count -  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool - -- debug: var=docker_image_count.stdout -  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 23cf8cf76..ef8233b67 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -2,7 +2,6 @@  - name: Create local temp directory for syncing certs    hosts: localhost    connection: local -  become: no    gather_facts: no    tasks:    - name: Create local temp directory for syncing certs @@ -11,6 +10,11 @@      changed_when: false      when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) +  - name: Chmod local temp directory +    local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}" +    changed_when: false +    when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) +  - name: Create service signer certificate    hosts: oo_first_master    tasks: @@ -22,7 +26,7 @@    - name: Create service signer certificate      command: > -      {{ openshift.common.client_binary }} adm ca create-signer-cert +      {{ openshift_client_binary }} adm ca create-signer-cert        --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt        --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key        --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer @@ -65,7 +69,6 @@  - name: Delete local temp directory    hosts: localhost    connection: local -  become: no    gather_facts: no    tasks:    - name: Delete local temp directory diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml index 33ed6a283..858912379 100644 --- a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml +++ b/playbooks/common/openshift-cluster/upgrades/disable_excluders.yml @@ -1,6 +1,6 @@  ---  - name: Disable excluders -  hosts: oo_masters_to_config +  hosts: "{{ l_upgrade_excluder_hosts }}"    gather_facts: no    roles:    - role: openshift_excluder diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml deleted file mode 100644 index ab3171c9a..000000000 --- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Disable excluders -  hosts: oo_nodes_to_upgrade:!oo_masters_to_config -  gather_facts: no -  roles: -  - role: openshift_excluder -    r_openshift_excluder_action: disable -    r_openshift_excluder_verify_upgrade: true -    r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" -    r_openshift_excluder_package_state: latest -    r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 5c6def484..ffb11670d 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -12,14 +12,13 @@    roles:    - openshift_facts    tasks: -  - set_fact: -      repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" -    - fail:        msg: Cannot upgrade Docker on Atomic operating systems. -    when: openshift.common.is_atomic | bool +    when: openshift_is_atomic | bool -  - include_tasks: upgrade_check.yml +  - import_role: +      name: container_runtime +      tasks_from: docker_upgrade_check.yml      when: docker_upgrade is not defined or docker_upgrade | bool @@ -32,6 +31,7 @@    any_errors_fatal: true    roles: +  - openshift_facts    - lib_openshift    tasks: @@ -43,7 +43,7 @@      retries: 10      delay: 5      register: node_unschedulable -    until: node_unschedulable|succeeded +    until: node_unschedulable is succeeded      when:      - l_docker_upgrade is defined      - l_docker_upgrade | bool @@ -51,11 +51,11 @@    - name: Drain Node for Kubelet upgrade      command: > -      {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets +      {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets      delegate_to: "{{ groups.oo_first_master.0 }}"      when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade      register: l_docker_upgrade_drain_result -    until: not l_docker_upgrade_drain_result | failed +    until: not (l_docker_upgrade_drain_result is failed)      retries: 60      delay: 60 @@ -70,5 +70,5 @@      retries: 10      delay: 5      register: node_schedulable -    until: node_schedulable|succeeded -    when: node_unschedulable|changed +    until: node_schedulable is succeeded +    when: node_unschedulable is changed diff --git a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh deleted file mode 100644 index 8635eab0d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Stop any running containers -running_container_ids=`docker ps -q` -if test -n "$running_container_ids" -then -    docker stop $running_container_ids -fi - -# Delete all containers -container_ids=`docker ps -a -q` -if test -n "$container_ids" -then -    docker rm -f -v $container_ids -fi - -# Delete all images (forcefully) -image_ids=`docker images -aq` -if test -n "$image_ids" -then -    # Some layers are deleted recursively and are no longer present -    # when docker goes to remove them: -    docker rmi -f `docker images -aq` || true -fi - diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index dbc4f39c7..3b47a11e0 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -2,7 +2,7 @@  - name: Restart docker    service: name=docker state=restarted    register: l_docker_restart_docker_in_upgrade_result -  until: not l_docker_restart_docker_in_upgrade_result | failed +  until: not (l_docker_restart_docker_in_upgrade_result is failed)    retries: 3    delay: 30 @@ -15,7 +15,7 @@      - "{{ openshift_service_type }}-master-controllers"      - "{{ openshift_service_type }}-node"    failed_when: false -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool  - name: Wait for master API to come back online    wait_for: diff --git a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 4856a4b51..54eeb2ef5 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -10,7 +10,7 @@      - etcd_container      - openvswitch    failed_when: false -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool  - name: Check Docker image count    shell: "docker images -aq | wc -l" @@ -35,14 +35,14 @@      name: docker      state: stopped    register: l_pb_docker_upgrade_stop_result -  until: not l_pb_docker_upgrade_stop_result | failed +  until: not (l_pb_docker_upgrade_stop_result is failed)    retries: 3    delay: 30  - name: Upgrade Docker    package: name=docker{{ '-' + docker_version }} state=present    register: result -  until: result | success +  until: result is succeeded  - include_tasks: restart.yml    when: not skip_docker_restart | default(False) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index 2e3a7ae8b..ed97d539c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -1,58 +1 @@  --- - -# This snippet determines if a Docker upgrade is required by checking the inventory -# variables, the available packages, and sets l_docker_upgrade to True if so. - -- set_fact: -    docker_upgrade: True -  when: docker_upgrade is not defined - -- name: Check if Docker is installed -  command: rpm -q docker -  args: -    warn: no -  register: pkg_check -  failed_when: pkg_check.rc > 1 -  changed_when: no - -- name: Get current version of Docker -  command: "{{ repoquery_installed }} --qf '%{version}' docker" -  register: curr_docker_version -  retries: 4 -  until: curr_docker_version | succeeded -  changed_when: false - -- name: Get latest available version of Docker -  command: > -    {{ repoquery_cmd }} --qf '%{version}' "docker" -  register: avail_docker_version -  retries: 4 -  until: avail_docker_version | succeeded -  # Don't expect docker rpm to be available on hosts that don't already have it installed: -  when: pkg_check.rc == 0 -  failed_when: false -  changed_when: false - -- fail: -    msg: This playbook requires access to Docker 1.12 or later -  # Disable the 1.12 requirement if the user set a specific Docker version -  when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<'))) - -# Default l_docker_upgrade to False, we'll set to True if an upgrade is required: -- set_fact: -    l_docker_upgrade: False - -# Make sure a docker_version is set if none was requested: -- set_fact: -    docker_version: "{{ avail_docker_version.stdout }}" -  when: pkg_check.rc == 0 and docker_version is not defined - -- name: Flag for Docker upgrade if necessary -  set_fact: -    l_docker_upgrade: True -  when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<') - -- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary -  set_fact: -    docker_upgrade_nuke_images: True -  when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=') diff --git a/playbooks/common/openshift-cluster/upgrades/filter_plugins b/playbooks/common/openshift-cluster/upgrades/filter_plugins deleted file mode 120000 index b1213dedb..000000000 --- a/playbooks/common/openshift-cluster/upgrades/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 5454a6680..8ee83819e 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -8,7 +8,7 @@  - import_playbook: ../../../init/facts.yml  - name: Ensure firewall is not switched during upgrade -  hosts: oo_all_hosts +  hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"    vars:      openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"    tasks: diff --git a/playbooks/common/openshift-cluster/upgrades/lookup_plugins b/playbooks/common/openshift-cluster/upgrades/lookup_plugins deleted file mode 120000 index aff753026..000000000 --- a/playbooks/common/openshift-cluster/upgrades/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 344ddea3c..1b57521df 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -27,8 +27,8 @@    - set_fact:        haproxy_routers: "{{ all_routers.results.results[0]['items'] | -                           oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | -                           oo_select_keys_from_list(['metadata']) }}" +                           lib_utils_oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | +                           lib_utils_oo_select_keys_from_list(['metadata']) }}"      when:      - all_routers.results.returncode == 0 @@ -126,7 +126,7 @@      debug:        msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."      when: -    - not grep_plugin_order_override | skipped +    - not (grep_plugin_order_override is skipped)      - grep_plugin_order_override.rc == 0    - name: Warn if shared-resource-viewer could not be updated diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml new file mode 100644 index 000000000..da63450b8 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -0,0 +1,81 @@ +--- +# for control-plane upgrade, several variables may be passed in to this play +# why may affect the tasks here and in imported playbooks. + +# Pre-upgrade +- import_playbook: ../initialize_nodes_to_upgrade.yml + +- import_playbook: verify_cluster.yml + +- name: Update repos on upgrade hosts +  hosts: "{{ l_upgrade_repo_hosts }}" +  roles: +  - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames +  hosts: "{{ l_upgrade_no_proxy_hosts }}" +  tasks: +  - set_fact: +      openshift_no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config'] +                                                    | union(groups['oo_masters_to_config']) +                                                    | union(groups['oo_etcd_to_config'] | default([]))) +                                                | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',') +                                                }}" +    when: +    - openshift_http_proxy is defined or openshift_https_proxy is defined +    - openshift_generate_no_proxy_hosts | default(True) | bool + +- name: OpenShift Health Checks +  hosts: "{{ l_upgrade_health_check_hosts }}" +  any_errors_fatal: true +  roles: +  - openshift_health_checker +  vars: +  - r_openshift_health_checker_playbook_context: upgrade +  post_tasks: +  - name: Run health checks (upgrade) +    action: openshift_health_check +    args: +      checks: +      - disk_availability +      - memory_availability +      - docker_image_availability + +- import_playbook: ../disable_excluders.yml + +- import_playbook: ../../../../init/version.yml +  vars: +    # Request specific openshift_release and let the openshift_version role handle converting this +    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if +    # defined, and overriding the normal behavior of protecting the installed version +    openshift_release: "{{ openshift_upgrade_target }}" +    openshift_protect_installed_version: False +    # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml +    # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml + +# If we're only upgrading nodes, we need to ensure masters are already upgraded +- name: Verify masters are already upgraded +  hosts: oo_masters_to_config +  tasks: +  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." +    when: +    - l_upgrade_nodes_only | default(False) | bool +    - openshift.common.version != openshift_version + +# If we're only upgrading nodes, skip this. +- import_playbook: ../../../../openshift-master/private/validate_restart.yml +  when: not (l_upgrade_nodes_only | default(False)) | bool + +- name: Verify upgrade targets +  hosts: "{{ l_upgrade_verify_targets_hosts }}" +  roles: +  - role: openshift_facts +  tasks: +  - include_tasks: verify_upgrade_targets.yml + +- name: Verify docker upgrade targets +  hosts: "{{ l_upgrade_docker_target_hosts }}" +  tasks: +  - import_role: +      name: container_runtime +      tasks_from: docker_upgrade_check.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml deleted file mode 100644 index 8ecae4539..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Flag pre-upgrade checks complete for hosts without errors -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - set_fact: -      pre_upgrade_complete: True diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml deleted file mode 100644 index 18a08eb99..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -# Only check if docker upgrade is required if docker_upgrade is not -# already set to False. -- include_tasks: ../../docker/upgrade_check.yml -  when: -  - docker_upgrade is not defined or (docker_upgrade | bool) -  - not (openshift.common.is_atomic | bool) - -# Additional checks for Atomic hosts: - -- name: Determine available Docker -  shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" -  register: g_atomic_docker_version_result -  when: openshift.common.is_atomic | bool - -- set_fact: -    l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" -  when: openshift.common.is_atomic | bool - -- fail: -    msg: This playbook requires access to Docker 1.12 or later -  when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml new file mode 100644 index 000000000..693ab2d96 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -0,0 +1,94 @@ +--- +# Verify a few items before we proceed with upgrade process. + +- name: Verify upgrade can proceed on first master +  hosts: oo_first_master +  gather_facts: no +  tasks: + +  # Error out in situations where the user has older versions specified in their +  # inventory in any of the openshift_release, openshift_image_tag, and +  # openshift_pkg_version variables. These must be removed or updated to proceed +  # with upgrade. +  # TODO: Should we block if you're *over* the next major release version as well? +  - fail: +      msg: > +        openshift_pkg_version is {{ openshift_pkg_version }} which is not a +        valid version for a {{ openshift_upgrade_target }} upgrade +    when: +    - openshift_pkg_version is defined +    - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<') + +  - fail: +      msg: > +        openshift_image_tag is {{ openshift_image_tag }} which is not a +        valid version for a {{ openshift_upgrade_target }} upgrade +    when: +    - openshift_image_tag is defined +    - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<') + +  - set_fact: +      openshift_release: "{{ openshift_release[1:] }}" +    when: openshift_release is defined and openshift_release[0] == 'v' + +  - fail: +      msg: > +        openshift_release is {{ openshift_release }} which is not a +        valid release for a {{ openshift_upgrade_target }} upgrade +    when: +    - openshift_release is defined +    - not (openshift_release is version_compare(openshift_upgrade_target ,'=')) + +- name: Verify master processes +  hosts: oo_masters_to_config +  roles: +  - lib_utils +  - openshift_facts +  tasks: +  - name: Read master storage backend setting +    yedit: +      state: list +      src: /etc/origin/master/master-config.yaml +      key: kubernetesMasterConfig.apiServerArguments.storage-backend +    register: _storage_backend + +  - fail: +      msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue" +    when: +    # assuming the master-config.yml is properly configured, i.e. the value is a list +    - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3" + +  - debug: +      msg: "Storage backend is set to etcd3" + +  - openshift_facts: +      role: master +      local_facts: +        ha: "{{ groups.oo_masters_to_config | length > 1 }}" + +  - when: openshift_is_containerized | bool +    block: +    - set_fact: +        master_services: +        - "{{ openshift_service_type }}-master" + +    # In case of the non-ha to ha upgrade. +    - name: Check if the {{ openshift_service_type }}-master-api.service exists +      command: > +        systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend +      register: master_api_service_status + +    - set_fact: +        master_services: +        - "{{ openshift_service_type }}-master-api" +        - "{{ openshift_service_type }}-master-controllers" +      when: +      - master_api_service_status.stdout_lines | length > 0 +      - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] + +    - name: Ensure Master is running +      service: +        name: "{{ item }}" +        state: started +        enabled: yes +      with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml deleted file mode 100644 index bef95546d..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: Verify master processes -  hosts: oo_masters_to_config -  roles: -  - openshift_facts -  tasks: -  - openshift_facts: -      role: master -      local_facts: -        ha: "{{ groups.oo_masters_to_config | length > 1 }}" - -  - when: openshift.common.is_containerized | bool -    block: -    - set_fact: -        master_services: -        - "{{ openshift_service_type }}-master" - -    # In case of the non-ha to ha upgrade. -    - name: Check if the {{ openshift_service_type }}-master-api.service exists -      command: > -        systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend -      register: master_api_service_status - -    - set_fact: -        master_services: -        - "{{ openshift_service_type }}-master-api" -        - "{{ openshift_service_type }}-master-controllers" -      when: -      - master_api_service_status.stdout_lines | length > 0 -      - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] - -    - name: Ensure Master is running -      service: -        name: "{{ item }}" -        state: started -        enabled: yes -      with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml deleted file mode 100644 index f75ae3b15..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: Verify all masters has etcd3 storage backend set -  hosts: oo_masters_to_config -  gather_facts: no -  roles: -  - lib_utils -  tasks: -  - name: Read master storage backend setting -    yedit: -      state: list -      src: /etc/origin/master/master-config.yaml -      key: kubernetesMasterConfig.apiServerArguments.storage-backend -    register: _storage_backend - -  - fail: -      msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue" -    when: -    # assuming the master-config.yml is properly configured, i.e. the value is a list -    - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3" - -  - debug: -      msg: "Storage backend is set to etcd3" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml deleted file mode 100644 index 2a8de50a2..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: OpenShift Health Checks -  hosts: oo_all_hosts -  any_errors_fatal: true -  roles: -  - openshift_health_checker -  vars: -  - r_openshift_health_checker_playbook_context: upgrade -  post_tasks: -  - name: Run health checks (upgrade) -    action: openshift_health_check -    args: -      checks: -      - disk_availability -      - memory_availability -      - docker_image_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml deleted file mode 100644 index 3c0017891..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: Verify upgrade can proceed on first master -  hosts: oo_first_master -  gather_facts: no -  tasks: -  - fail: -      msg: > -        This upgrade is only supported for origin and openshift-enterprise -        deployment types -    when: deployment_type not in ['origin','openshift-enterprise'] - -  # Error out in situations where the user has older versions specified in their -  # inventory in any of the openshift_release, openshift_image_tag, and -  # openshift_pkg_version variables. These must be removed or updated to proceed -  # with upgrade. -  # TODO: Should we block if you're *over* the next major release version as well? -  - fail: -      msg: > -        openshift_pkg_version is {{ openshift_pkg_version }} which is not a -        valid version for a {{ openshift_upgrade_target }} upgrade -    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<') - -  - fail: -      msg: > -        openshift_image_tag is {{ openshift_image_tag }} which is not a -        valid version for a {{ openshift_upgrade_target }} upgrade -    when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<') - -  - set_fact: -      openshift_release: "{{ openshift_release[1:] }}" -    when: openshift_release is defined and openshift_release[0] == 'v' - -  - fail: -      msg: > -        openshift_release is {{ openshift_release }} which is not a -        valid release for a {{ openshift_upgrade_target }} upgrade -    when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 96f970506..4c1156f4b 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -5,7 +5,7 @@    when: openshift.common.version is not defined  - name: Update oreg_auth docker login credentials if necessary -  include_role: +  import_role:      name: container_runtime      tasks_from: registry_auth.yml    when: oreg_auth_user is defined @@ -15,9 +15,9 @@      docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Downloaded newer image' in pull_result.stdout" -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool -- when: not openshift.common.is_containerized | bool +- when: not openshift_is_containerized | bool    block:    - name: Check latest available OpenShift RPM version      repoquery: @@ -43,11 +43,11 @@      fail:        msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"      when: -    - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<') +    - (openshift_pkg_version | default('-0.0', True)).split('-')[1] is version_compare(openshift_release, '<')  - name: Fail when openshift version does not meet minium requirement for Origin upgrade    fail:      msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"    when: -  - deployment_type == 'origin' -  - openshift.common.version | version_compare(openshift_upgrade_min,'<') +  - openshift_deployment_type == 'origin' +  - openshift.common.version is version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 37fc8a0f6..412075d41 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,20 +2,6 @@  ###############################################################################  # Upgrade Masters  ############################################################################### - -# If facts cache were for some reason deleted, this fact may not be set, and if not set -# it will always default to true. This causes problems for the etcd data dir fact detection -# so we must first make sure this is set correctly before attempting the backup. -- name: Set master embedded_etcd fact -  hosts: oo_masters_to_config -  roles: -  - openshift_facts -  tasks: -  - openshift_facts: -      role: master -      local_facts: -        embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" -  - name: Backup and upgrade etcd    import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -39,7 +25,7 @@    tasks:    - name: Upgrade all storage      command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        migrate storage --include=* --confirm      register: l_pb_upgrade_control_plane_pre_upgrade_storage      when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool @@ -64,7 +50,7 @@      openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"    serial: 1    tasks: -  - include_role: +  - import_role:        name: openshift_facts    # Run the pre-upgrade hook if defined: @@ -74,7 +60,7 @@    - include_tasks: "{{ openshift_master_upgrade_pre_hook }}"      when: openshift_master_upgrade_pre_hook is defined -  - include_role: +  - import_role:        name: openshift_master        tasks_from: upgrade.yml @@ -100,12 +86,12 @@    - name: Post master upgrade - Upgrade clusterpolicies storage      command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        migrate storage --include=clusterpolicies --confirm      register: l_pb_upgrade_control_plane_post_upgrade_storage      when:      - openshift_upgrade_post_storage_migration_enabled | default(true) | bool -    - openshift_version | version_compare('3.7','<') +    - openshift_version is version_compare('3.7','<')      failed_when:      - openshift_upgrade_post_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 @@ -122,12 +108,11 @@  - name: Gate on master update    hosts: localhost    connection: local -  become: no    tasks:    - set_fact:        master_update_completed: "{{ hostvars -                                 | oo_select_keys(groups.oo_masters_to_config) -                                 | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" +                                 | lib_utils_oo_select_keys(groups.oo_masters_to_config) +                                 | lib_utils_oo_collect('inventory_hostname', {'master_update_complete': true}) }}"    - set_fact:        master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}"    - fail: @@ -147,10 +132,10 @@    tasks:    - name: Reconcile Cluster Roles      command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        policy reconcile-cluster-roles --additive-only=true --confirm -o name      register: reconcile_cluster_role_result -    when: openshift_version | version_compare('3.7','<') +    when: openshift_version is version_compare('3.7','<')      changed_when:      - reconcile_cluster_role_result.stdout != ''      - reconcile_cluster_role_result.rc == 0 @@ -158,14 +143,14 @@    - name: Reconcile Cluster Role Bindings      command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        policy reconcile-cluster-role-bindings        --exclude-groups=system:authenticated        --exclude-groups=system:authenticated:oauth        --exclude-groups=system:unauthenticated        --exclude-users=system:anonymous        --additive-only=true --confirm -o name -    when: openshift_version | version_compare('3.7','<') +    when: openshift_version is version_compare('3.7','<')      register: reconcile_bindings_result      changed_when:      - reconcile_bindings_result.stdout != '' @@ -174,16 +159,16 @@    - name: Reconcile Jenkins Pipeline Role Bindings      command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name +      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name      run_once: true      register: reconcile_jenkins_role_binding_result      changed_when:      - reconcile_jenkins_role_binding_result.stdout != ''      - reconcile_jenkins_role_binding_result.rc == 0      when: -    - openshift_version | version_compare('3.7','<') +    - openshift_version is version_compare('3.7','<') -  - when: openshift_upgrade_target | version_compare('3.7','<') +  - when: openshift_upgrade_target is version_compare('3.7','<')      block:      - name: Retrieve shared-resource-viewer        oc_obj: @@ -228,7 +213,7 @@    - name: Reconcile Security Context Constraints      command: > -      {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name +      {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name      register: reconcile_scc_result      changed_when:      - reconcile_scc_result.stdout != '' @@ -237,7 +222,7 @@    - name: Migrate storage post policy reconciliation      command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        migrate storage --include=* --confirm      run_once: true      register: l_pb_upgrade_control_plane_post_upgrade_storage @@ -256,12 +241,11 @@  - name: Gate on reconcile    hosts: localhost    connection: local -  become: no    tasks:    - set_fact:        reconcile_completed: "{{ hostvars -                                 | oo_select_keys(groups.oo_masters_to_config) -                                 | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" +                                 | lib_utils_oo_select_keys(groups.oo_masters_to_config) +                                 | lib_utils_oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"    - set_fact:        reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}"    - fail: @@ -276,7 +260,7 @@    - openshift_facts    tasks:    - include_tasks: docker/tasks/upgrade.yml -    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool +    when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift_is_atomic | bool  - name: Drain and upgrade master nodes    hosts: oo_masters_to_config:&oo_nodes_to_upgrade @@ -301,25 +285,23 @@      retries: 10      delay: 5      register: node_unschedulable -    until: node_unschedulable|succeeded +    until: node_unschedulable is succeeded    - name: Drain Node for Kubelet upgrade      command: > -      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets +      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets      delegate_to: "{{ groups.oo_first_master.0 }}"      register: l_upgrade_control_plane_drain_result -    until: not l_upgrade_control_plane_drain_result | failed +    until: not (l_upgrade_control_plane_drain_result is failed)      retries: 60      delay: 60    roles:    - openshift_facts    post_tasks: -  - include_role: +  - import_role:        name: openshift_node        tasks_from: upgrade.yml -    vars: -      openshift_node_upgrade_in_progress: True    - name: Set node schedulability      oc_adm_manage_node:        node: "{{ openshift.node.nodename | lower }}" @@ -328,5 +310,5 @@      retries: 10      delay: 5      register: node_schedulable -    until: node_schedulable|succeeded -    when: node_unschedulable|changed +    until: node_schedulable is succeeded +    when: node_unschedulable is changed diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index f7a85545b..464af3ae6 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -1,16 +1,23 @@  --- +- name: Prepull images and rpms before doing rolling restart +  hosts: oo_nodes_to_upgrade:!oo_masters_to_config +  roles: +  - role: openshift_facts +  tasks: +  - import_role: +      name: openshift_node +      tasks_from: upgrade_pre.yml +  - name: Drain and upgrade nodes    hosts: oo_nodes_to_upgrade:!oo_masters_to_config    # This var must be set with -e on invocation, as it is not a per-host inventory var    # and is evaluated early. Values such as "20%" can also be used.    serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"    max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" - +  roles: +  - lib_openshift +  - openshift_facts    pre_tasks: -  - name: Load lib_openshift modules -    import_role: -      name: lib_openshift -    # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node    # or docker actually needs an upgrade before proceeding. Perhaps best to save this until    # we merge upgrade functionality into the base roles and a normal config.yml playbook run. @@ -22,29 +29,21 @@      retries: 10      delay: 5      register: node_unschedulable -    until: node_unschedulable|succeeded +    until: node_unschedulable is succeeded    - name: Drain Node for Kubelet upgrade      command: > -      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets +      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets      delegate_to: "{{ groups.oo_first_master.0 }}"      register: l_upgrade_nodes_drain_result -    until: not l_upgrade_nodes_drain_result | failed +    until: not (l_upgrade_nodes_drain_result is failed)      retries: 60      delay: 60 -  roles: -  - openshift_facts    post_tasks: -  - include_role: +  - import_role:        name: openshift_node        tasks_from: upgrade.yml -    vars: -      openshift_node_upgrade_in_progress: True -  - include_role: -      name: openshift_excluder -    vars: -      r_openshift_excluder_action: enable    - name: Set node schedulability      oc_adm_manage_node:        node: "{{ openshift.node.nodename | lower }}" @@ -53,5 +52,13 @@      retries: 10      delay: 5      register: node_schedulable -    until: node_schedulable|succeeded -    when: node_unschedulable|changed +    until: node_schedulable is succeeded +    when: node_unschedulable is changed + +- name: Re-enable excluders +  hosts: oo_nodes_to_upgrade:!oo_masters_to_config +  tasks: +  - import_role: +      name: openshift_excluder +    vars: +      r_openshift_excluder_action: enable diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index 47410dff3..6d59bfd0b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -3,7 +3,7 @@    hosts: localhost    tasks:    - name: build upgrade scale groups -    include_role: +    import_role:        name: openshift_aws        tasks_from: upgrade_node_group.yml @@ -11,25 +11,19 @@        msg: "Ensure that new scale groups were provisioned before proceeding to update."      when:      - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0" +    - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0" +    - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes  - name: initialize upgrade bits    import_playbook: init.yml -- name: Drain and upgrade nodes +- name: unschedule nodes    hosts: oo_sg_current_nodes -  # This var must be set with -e on invocation, as it is not a per-host inventory var -  # and is evaluated early. Values such as "20%" can also be used. -  serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" -  max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" - -  pre_tasks: +  tasks:    - name: Load lib_openshift modules -    include_role: +    import_role:        name: ../roles/lib_openshift -  # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node -  # or docker actually needs an upgrade before proceeding. Perhaps best to save this until -  # we merge upgrade functionality into the base roles and a normal config.yml playbook run.    - name: Mark node unschedulable      oc_adm_manage_node:        node: "{{ openshift.node.nodename | lower }}" @@ -38,22 +32,35 @@      retries: 10      delay: 5      register: node_unschedulable -    until: node_unschedulable|succeeded +    until: node_unschedulable is succeeded +- name: Drain nodes +  hosts: oo_sg_current_nodes +  # This var must be set with -e on invocation, as it is not a per-host inventory var +  # and is evaluated early. Values such as "20%" can also be used. +  serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" +  max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" +  tasks:    - name: Drain Node for Kubelet upgrade      command: > -      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets +      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} +      --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      --force --delete-local-data --ignore-daemonsets +      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s      delegate_to: "{{ groups.oo_first_master.0 }}"      register: l_upgrade_nodes_drain_result -    until: not l_upgrade_nodes_drain_result | failed -    retries: 60 -    delay: 60 +    until: not (l_upgrade_nodes_drain_result is failed) +    retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0  | int }}" +    delay: 5 +    failed_when: +    - l_upgrade_nodes_drain_result is failed +    - openshift_upgrade_nodes_drain_timeout | default(0) == '0'  # Alright, let's clean up!  - name: clean up the old scale group    hosts: localhost    tasks:    - name: clean up scale group -    include_role: +    import_role:        name: openshift_aws        tasks_from: remove_scale_group.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins deleted file mode 120000 index 7de3c1dd7..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 9f9399ff9..d520c6aee 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -13,101 +13,26 @@    tasks:    - set_fact:        openshift_upgrade_target: '3.6' -      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" +      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" -# Pre-upgrade - -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos and initialize facts on all hosts -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"  - import_playbook: validator.yml -  tags: -  - pre_upgrade -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - include_tasks: ../cleanup_unused_images.yml +  - set_fact: +      pre_upgrade_complete: True + +# Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml    vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 7374160d6..eb5f07ae0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -12,106 +12,39 @@  # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.  #  - import_playbook: ../init.yml -  tags: -  - pre_upgrade +  vars: +    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_upgrade_target: '3.6' -      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on control plane hosts    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade    tasks:    - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade +      openshift_upgrade_target: '3.6' +      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_masters_to_config"  - import_playbook: validator.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_etcd_to_config    tasks: -  - include_tasks: ../cleanup_unused_images.yml +  - set_fact: +      pre_upgrade_complete: True + +# Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml    vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index de9bf098e..4febe76ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -15,95 +15,24 @@    tasks:    - set_fact:        openshift_upgrade_target: '3.6' -      openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" +      openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on nodes -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  roles: -  - openshift_repos -  tags: -  - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- name: Verify masters are already upgraded -  hosts: oo_masters_to_config -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_nodes_to_config" +    l_upgrade_no_proxy_hosts: "oo_all_hosts" +    l_upgrade_health_check_hosts: "oo_nodes_to_config" +    l_upgrade_verify_targets_hosts: "oo_nodes_to_config" +    l_upgrade_docker_target_hosts: "oo_nodes_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" +    l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." -    when: openshift.common.version != openshift_version - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. +  - set_fact: +      pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../cleanup_unused_images.yml +# Pre-upgrade completed  - import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins deleted file mode 120000 index 7de3c1dd7..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 0c1a99272..4daa9e490 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -15,103 +15,24 @@        openshift_upgrade_target: '3.7'        openshift_upgrade_min: '3.6' -# Pre-upgrade - -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_etcd3_backend.yml -  tags: -  - pre_upgrade - -- name: Update repos and initialize facts on all hosts -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"  - import_playbook: validator.yml -  tags: -  - pre_upgrade -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - include_tasks: ../cleanup_unused_images.yml +  - set_fact: +      pre_upgrade_complete: True + +# Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml    vars: @@ -121,6 +42,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 9dcad352c..8d42e4c91 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -12,110 +12,39 @@  # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.  #  - import_playbook: ../init.yml -  tags: -  - pre_upgrade +  vars: +    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config    tasks:    - set_fact:        openshift_upgrade_target: '3.7'        openshift_upgrade_min: '3.6' -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_etcd3_backend.yml -  tags: -  - pre_upgrade - -- name: Update repos on control plane hosts -  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_masters_to_config"  - import_playbook: validator.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_etcd_to_config    tasks: -  - include_tasks: ../cleanup_unused_images.yml +  - set_fact: +      pre_upgrade_complete: True + +# Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml    vars: @@ -125,6 +54,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml index 27a7f67ea..16d95514c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -17,93 +17,22 @@        openshift_upgrade_target: '3.7'        openshift_upgrade_min: '3.6' -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on nodes -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  roles: -  - openshift_repos -  tags: -  - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- name: Verify masters are already upgraded -  hosts: oo_masters_to_config -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_nodes_to_config" +    l_upgrade_no_proxy_hosts: "oo_all_hosts" +    l_upgrade_health_check_hosts: "oo_nodes_to_config" +    l_upgrade_verify_targets_hosts: "oo_nodes_to_config" +    l_upgrade_docker_target_hosts: "oo_nodes_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" +    l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." -    when: openshift.common.version != openshift_version - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. +  - set_fact: +      pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../cleanup_unused_images.yml +# Pre-upgrade completed  - import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 74d0cd8ad..49e691352 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -14,9 +14,9 @@    # DO NOT DISABLE THIS, YOUR UPGRADE WILL FAIL IF YOU DO SO    - name: Confirm OpenShift authorization objects are in sync      command: > -      {{ openshift.common.client_binary }} adm migrate authorization +      {{ openshift_client_binary }} adm migrate authorization      when: -    - openshift_currently_installed_version | version_compare('3.7','<') +    - openshift_currently_installed_version is version_compare('3.7','<')      - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool      changed_when: false      register: l_oc_result diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins deleted file mode 120000 index 7de3c1dd7..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index ead2efbd0..0f74e0137 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -15,103 +15,24 @@        openshift_upgrade_target: '3.8'        openshift_upgrade_min: '3.7' -# Pre-upgrade - -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_etcd3_backend.yml -  tags: -  - pre_upgrade - -- name: Update repos and initialize facts on all hosts -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"  - import_playbook: validator.yml -  tags: -  - pre_upgrade -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - include_tasks: ../cleanup_unused_images.yml +  - set_fact: +      pre_upgrade_complete: True + +# Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml    vars: @@ -121,6 +42,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index ae37b1359..a2f316c25 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -12,110 +12,39 @@  # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.  #  - import_playbook: ../init.yml -  tags: -  - pre_upgrade +  vars: +    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config    tasks:    - set_fact:        openshift_upgrade_target: '3.8'        openshift_upgrade_min: '3.7' -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_etcd3_backend.yml -  tags: -  - pre_upgrade - -- name: Update repos on control plane hosts -  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_masters_to_config"  - import_playbook: validator.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_etcd_to_config    tasks: -  - include_tasks: ../cleanup_unused_images.yml +  - set_fact: +      pre_upgrade_complete: True + +# Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml    vars: @@ -125,6 +54,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift_service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml index dd716b241..b5f1038fd 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml @@ -17,93 +17,22 @@        openshift_upgrade_target: '3.8'        openshift_upgrade_min: '3.7' -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on nodes -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  roles: -  - openshift_repos -  tags: -  - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -- name: Verify masters are already upgraded -  hosts: oo_masters_to_config -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_nodes_to_config" +    l_upgrade_no_proxy_hosts: "oo_all_hosts" +    l_upgrade_health_check_hosts: "oo_nodes_to_config" +    l_upgrade_verify_targets_hosts: "oo_nodes_to_config" +    l_upgrade_docker_target_hosts: "oo_nodes_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" +    l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." -    when: openshift.common.version != openshift_version - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. +  - set_fact: +      pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../cleanup_unused_images.yml +# Pre-upgrade completed  - import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins deleted file mode 120000 index 7de3c1dd7..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index eb688f189..0aea5069d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -3,121 +3,32 @@  # Full Control Plane + Nodes Upgrade  #  - import_playbook: ../init.yml -  tags: -  - pre_upgrade  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_all_hosts -  tags: -  - pre_upgrade    tasks:    - set_fact:        openshift_upgrade_target: '3.9'        openshift_upgrade_min: '3.7' -# Pre-upgrade - -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_etcd3_backend.yml -  tags: -  - pre_upgrade - -- name: Update repos and initialize facts on all hosts -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"  - import_playbook: validator.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - include_tasks: ../cleanup_unused_images.yml +  - set_fact: +      pre_upgrade_complete: True + +# Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml    vars: @@ -127,6 +38,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift.common.service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 983bb4a63..ef9871008 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -12,116 +12,40 @@  # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.  #  - import_playbook: ../init.yml -  tags: -  - pre_upgrade +  vars: +    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tags: -  - pre_upgrade +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config    tasks:    - set_fact:        openshift_upgrade_target: '3.9'        openshift_upgrade_min: '3.7' -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_etcd3_backend.yml -  tags: -  - pre_upgrade - -- name: Update repos on control plane hosts -  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config -  tags: -  - pre_upgrade -  roles: -  - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_master_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_masters_to_config" -- import_playbook: ../../../../openshift-master/private/validate_restart.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_masters_to_config -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade +- import_playbook: validator.yml -- name: Verify docker upgrade targets +- name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_etcd_to_config    tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- import_playbook: validator.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade +  - set_fact: +      pre_upgrade_complete: True -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. +# Pre-upgrade completed -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_masters_to_config:oo_etcd_to_config -  tasks: -  - include_tasks: ../cleanup_unused_images.yml  - import_playbook: ../upgrade_control_plane.yml    vars: @@ -131,6 +55,8 @@  - name: Cycle all controller services to force new leader election mode    hosts: oo_masters_to_config    gather_facts: no +  roles: +  - role: openshift_facts    tasks:    - name: Stop {{ openshift.common.service_type }}-master-controllers      systemd: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml index d95cfa4e1..1d1b255c1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -5,111 +5,30 @@  # Upgrades nodes only, but requires the control plane to have already been upgraded.  #  - import_playbook: ../init.yml -  tags: -  - pre_upgrade  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_all_hosts -  tags: -  - pre_upgrade    tasks:    - set_fact:        openshift_upgrade_target: '3.9'        openshift_upgrade_min: '3.7' -# Pre-upgrade -- import_playbook: ../initialize_nodes_to_upgrade.yml -  tags: -  - pre_upgrade - -- name: Update repos on nodes -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config -  roles: -  - openshift_repos -  tags: -  - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames -  hosts: oo_masters_to_config:oo_nodes_to_upgrade -  tags: -  - pre_upgrade -  tasks: -  - set_fact: -      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] -                                                    | union(groups['oo_masters_to_config']) -                                                    | union(groups['oo_etcd_to_config'] | default([]))) -                                                | oo_collect('openshift.common.hostname') | default([]) | join (',') -                                                }}" -    when: -    - openshift_http_proxy is defined or openshift_https_proxy is defined -    - openshift_generate_no_proxy_hosts | default(True) | bool - -- import_playbook: ../pre/verify_inventory_vars.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/verify_health_checks.yml -  tags: -  - pre_upgrade - -- import_playbook: ../disable_node_excluders.yml -  tags: -  - pre_upgrade - -- import_playbook: ../../../../init/version.yml -  tags: -  - pre_upgrade +- import_playbook: ../pre/config.yml    vars: -    # Request specific openshift_release and let the openshift_version role handle converting this -    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if -    # defined, and overriding the normal behavior of protecting the installed version -    openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False - -    # We skip the docker role at this point in upgrade to prevent -    # unintended package, container, or config upgrades which trigger -    # docker restarts. At this early stage of upgrade we can assume -    # docker is configured and running. -    skip_docker_role: True - -- name: Verify masters are already upgraded -  hosts: oo_masters_to_config -  tags: -  - pre_upgrade +    l_upgrade_repo_hosts: "oo_nodes_to_config" +    l_upgrade_no_proxy_hosts: "oo_all_hosts" +    l_upgrade_health_check_hosts: "oo_nodes_to_config" +    l_upgrade_verify_targets_hosts: "oo_nodes_to_config" +    l_upgrade_docker_target_hosts: "oo_nodes_to_config" +    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" +    l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors +  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config    tasks: -  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." -    when: openshift.common.version != openshift_version - -- import_playbook: ../pre/verify_control_plane_running.yml -  tags: -  - pre_upgrade - -- name: Verify upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/verify_upgrade_targets.yml -  tags: -  - pre_upgrade - -- name: Verify docker upgrade targets -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml -  tags: -  - pre_upgrade - -- import_playbook: ../pre/gate_checks.yml -  tags: -  - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. +  - set_fact: +      pre_upgrade_complete: True -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images -  hosts: oo_nodes_to_upgrade -  tasks: -  - include_tasks: ../cleanup_unused_images.yml +# Pre-upgrade completed  - import_playbook: ../upgrade_nodes.yml  | 
