diff options
Diffstat (limited to 'playbooks/common')
12 files changed, 70 insertions, 22 deletions
| diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 372a39e74..6d82fa928 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -2,7 +2,6 @@  - name: Create local temp directory for syncing certs    hosts: localhost    connection: local -  become: no    gather_facts: no    tasks:    - name: Create local temp directory for syncing certs @@ -11,8 +10,15 @@      changed_when: false      when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) +  - name: Chmod local temp directory +    local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}" +    changed_when: false +    when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) +  - name: Create service signer certificate    hosts: oo_first_master +  roles: +  - openshift_facts    tasks:    - name: Create remote temp directory for creating certs      command: mktemp -d /tmp/openshift-ansible-XXXXXXX @@ -65,7 +71,6 @@  - name: Delete local temp directory    hosts: localhost    connection: local -  become: no    gather_facts: no    tasks:    - name: Delete local temp directory diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index ffb11670d..8392e21ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -51,13 +51,19 @@    - name: Drain Node for Kubelet upgrade      command: > -      {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets +      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} +      --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      --force --delete-local-data --ignore-daemonsets +      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s      delegate_to: "{{ groups.oo_first_master.0 }}"      when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade      register: l_docker_upgrade_drain_result      until: not (l_docker_upgrade_drain_result is failed) -    retries: 60 -    delay: 60 +    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" +    delay: 5 +    failed_when: +    - l_docker_upgrade_drain_result is failed +    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0    - include_tasks: tasks/upgrade.yml      when: l_docker_upgrade is defined and l_docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index cfc0c8745..da63450b8 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -1,4 +1,6 @@  --- +# for control-plane upgrade, several variables may be passed in to this play +# why may affect the tasks here and in imported playbooks.  # Pre-upgrade  - import_playbook: ../initialize_nodes_to_upgrade.yml @@ -48,6 +50,8 @@      # defined, and overriding the normal behavior of protecting the installed version      openshift_release: "{{ openshift_upgrade_target }}"      openshift_protect_installed_version: False +    # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml +    # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml  # If we're only upgrading nodes, we need to ensure masters are already upgraded  - name: Verify masters are already upgraded diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 50be0dee0..e89f06f17 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -22,6 +22,8 @@  # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060  - name: Pre master upgrade - Upgrade all storage    hosts: oo_first_master +  roles: +  - openshift_facts    tasks:    - name: Upgrade all storage      command: > @@ -49,10 +51,9 @@    vars:      openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"    serial: 1 +  roles: +  - openshift_facts    tasks: -  - import_role: -      name: openshift_facts -    # Run the pre-upgrade hook if defined:    - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"      when: openshift_master_upgrade_pre_hook is defined @@ -108,7 +109,6 @@  - name: Gate on master update    hosts: localhost    connection: local -  become: no    tasks:    - set_fact:        master_update_completed: "{{ hostvars @@ -128,6 +128,7 @@    hosts: oo_masters_to_config    roles:    - { role: openshift_cli } +  - { role: openshift_facts }    vars:      __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"    tasks: @@ -242,7 +243,6 @@  - name: Gate on reconcile    hosts: localhost    connection: local -  become: no    tasks:    - set_fact:        reconcile_completed: "{{ hostvars @@ -291,12 +291,18 @@    - name: Drain Node for Kubelet upgrade      command: > -      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets +      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} +      --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      --force --delete-local-data --ignore-daemonsets +      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s      delegate_to: "{{ groups.oo_first_master.0 }}"      register: l_upgrade_control_plane_drain_result      until: not (l_upgrade_control_plane_drain_result is failed) -    retries: 60 -    delay: 60 +    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" +    delay: 5 +    failed_when: +    - l_upgrade_control_plane_drain_result is failed +    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0    roles:    - openshift_facts diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 464af3ae6..850442b3b 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -33,12 +33,18 @@    - name: Drain Node for Kubelet upgrade      command: > -      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets +      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} +      --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      --force --delete-local-data --ignore-daemonsets +      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s      delegate_to: "{{ groups.oo_first_master.0 }}"      register: l_upgrade_nodes_drain_result      until: not (l_upgrade_nodes_drain_result is failed) -    retries: 60 -    delay: 60 +    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" +    delay: 5 +    failed_when: +    - l_upgrade_nodes_drain_result is failed +    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0    post_tasks:    - import_role: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index 6d59bfd0b..e259b5d09 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -50,11 +50,11 @@      delegate_to: "{{ groups.oo_first_master.0 }}"      register: l_upgrade_nodes_drain_result      until: not (l_upgrade_nodes_drain_result is failed) -    retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0  | int }}" +    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"      delay: 5      failed_when:      - l_upgrade_nodes_drain_result is failed -    - openshift_upgrade_nodes_drain_timeout | default(0) == '0' +    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0  # Alright, let's clean up!  - name: clean up the old scale group diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index a956fdde5..eb5f07ae0 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -14,6 +14,7 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@        openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}"  - import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"      l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"      l_upgrade_no_proxy_hosts: "oo_masters_to_config"      l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 1750148d4..8d42e4c91 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -14,6 +14,7 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@        openshift_upgrade_min: '3.6'  - import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"      l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"      l_upgrade_no_proxy_hosts: "oo_masters_to_config"      l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 49e691352..9c7688981 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -7,6 +7,7 @@    hosts: oo_first_master    roles:    - { role: lib_openshift } +  - { role: openshift_facts }    tasks:    - name: Check for invalid namespaces and SDN errors diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 08bfd239f..a2f316c25 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -14,6 +14,7 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@        openshift_upgrade_min: '3.7'  - import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"      l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"      l_upgrade_no_proxy_hosts: "oo_masters_to_config"      l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index 0aea5069d..552bea5e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -41,13 +41,13 @@    roles:    - role: openshift_facts    tasks: -  - name: Stop {{ openshift.common.service_type }}-master-controllers +  - name: Stop {{ openshift_service_type }}-master-controllers      systemd: -      name: "{{ openshift.common.service_type }}-master-controllers" +      name: "{{ openshift_service_type }}-master-controllers"        state: stopped -  - name: Start {{ openshift.common.service_type }}-master-controllers +  - name: Start {{ openshift_service_type }}-master-controllers      systemd: -      name: "{{ openshift.common.service_type }}-master-controllers" +      name: "{{ openshift_service_type }}-master-controllers"        state: started  - import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 05aa737c6..ef9871008 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -14,6 +14,7 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,7 +24,11 @@        openshift_upgrade_min: '3.7'  - import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default.    vars: +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master"      l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"      l_upgrade_no_proxy_hosts: "oo_masters_to_config"      l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" | 
