diff options
Diffstat (limited to 'playbooks/common')
21 files changed, 232 insertions, 65 deletions
| diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 8ee83819e..ba783638d 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -5,7 +5,8 @@      g_new_master_hosts: []      g_new_node_hosts: [] -- import_playbook: ../../../init/facts.yml +- import_playbook: ../../../init/basic_facts.yml +- import_playbook: ../../../init/cluster_facts.yml  - name: Ensure firewall is not switched during upgrade    hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index fc1cbf32a..07be0b0d4 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -31,7 +31,7 @@        with_items: " {{ groups['oo_nodes_to_config'] }}"        when:        - hostvars[item].openshift is defined -      - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list +      - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list        changed_when: false    # Build up the oo_nodes_to_upgrade group, use the list filtered by label if diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index f790fd98d..de612da21 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -6,7 +6,9 @@    hosts: oo_first_master    roles:    - role: openshift_web_console -    when: openshift_web_console_install | default(true) | bool +    when: +    - openshift_web_console_install | default(true) | bool +    - openshift_upgrade_target is version_compare('3.9','>=')  - name: Upgrade default router and default registry    hosts: oo_first_master diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index da63450b8..2b27f8dd0 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -49,7 +49,7 @@      # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if      # defined, and overriding the normal behavior of protecting the installed version      openshift_release: "{{ openshift_upgrade_target }}" -    openshift_protect_installed_version: False +    # openshift_protect_installed_version is passed n via upgrade_control_plane.yml      # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml      # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 693ab2d96..5ee8a9d78 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -92,3 +92,25 @@          state: started          enabled: yes        with_items: "{{ master_services }}" + +# Until openshift-ansible is determining which host is the CA host we +# must (unfortunately) ensure that the first host in the etcd group is +# the etcd CA host. +# https://bugzilla.redhat.com/show_bug.cgi?id=1469358 +- name: Verify we can proceed on first etcd +  hosts: oo_first_etcd +  gather_facts: no +  tasks: +  - name: Ensure CA exists on first etcd +    stat: +      path: /etc/etcd/generated_certs +    register: __etcd_ca_stat + +  - fail: +      msg: > +        In order to correct an etcd certificate signing problem +        upgrading may require re-generating etcd certificates. Please +        ensure that the /etc/etcd/generated_certs directory exists on +        the first host defined in your [etcd] group. +    when: +    - not __etcd_ca_stat.stat.exists | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 4c1156f4b..45ddf7eea 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -21,7 +21,7 @@    block:    - name: Check latest available OpenShift RPM version      repoquery: -      name: "{{ openshift_service_type }}" +      name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}"        ignore_excluders: true      register: repoquery_out diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index e89f06f17..a10fd4bee 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,6 +2,30 @@  ###############################################################################  # Upgrade Masters  ############################################################################### + +# Prior to 3.6, openshift-ansible created etcd serving certificates +# without a SubjectAlternativeName entry for the system hostname. The +# SAN list in Go 1.8 is now (correctly) authoritative and since +# openshift-ansible configures masters to talk to etcd hostnames +# rather than IP addresses, we must correct etcd certificates. +# +# This play examines the etcd serving certificate SANs on each etcd +# host and records whether or not the system hostname is missing. +- name: Examine etcd serving certificate SAN +  hosts: oo_etcd_to_config +  tasks: +  - slurp: +      src: /etc/etcd/server.crt +    register: etcd_serving_cert +  - set_fact: +      __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" + +# Redeploy etcd certificates when hostnames were missing from etcd +# serving certificate SANs. +- import_playbook: ../../../openshift-etcd/redeploy-certificates.yml +  when: +  - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) +  - name: Backup and upgrade etcd    import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -310,13 +334,8 @@    - import_role:        name: openshift_node        tasks_from: upgrade.yml -  - name: Set node schedulability -    oc_adm_manage_node: -      node: "{{ openshift.node.nodename | lower }}" -      schedulable: True -    delegate_to: "{{ groups.oo_first_master.0 }}" -    retries: 10 -    delay: 5 -    register: node_schedulable -    until: node_schedulable is succeeded -    when: node_unschedulable is changed +  - import_role: +      name: openshift_manage_node +      tasks_from: config.yml +    vars: +      openshift_master_host: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 850442b3b..915fae9fd 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -50,16 +50,11 @@    - import_role:        name: openshift_node        tasks_from: upgrade.yml -  - name: Set node schedulability -    oc_adm_manage_node: -      node: "{{ openshift.node.nodename | lower }}" -      schedulable: True -    delegate_to: "{{ groups.oo_first_master.0 }}" -    retries: 10 -    delay: 5 -    register: node_schedulable -    until: node_schedulable is succeeded -    when: node_unschedulable is changed +  - import_role: +      name: openshift_manage_node +      tasks_from: config.yml +    vars: +      openshift_master_host: "{{ groups.oo_first_master.0 }}"  - name: Re-enable excluders    hosts: oo_nodes_to_upgrade:!oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index d520c6aee..a2d21b69f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -23,6 +23,7 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" +    openshift_protect_installed_version: False  - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index eb5f07ae0..9aa5a3b64 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -14,7 +14,7 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -35,6 +35,7 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_masters_to_config" +    openshift_protect_installed_version: False  - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 4daa9e490..cc2ec2709 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -23,6 +23,7 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" +    openshift_protect_installed_version: False  - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 8d42e4c91..b1ecc75d3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -14,7 +14,7 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -35,6 +35,7 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_masters_to_config" +    openshift_protect_installed_version: False  - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 0f74e0137..a73b7d63a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -23,6 +23,7 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" +    openshift_protect_installed_version: False  - import_playbook: validator.yml @@ -35,8 +36,6 @@  # Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml -  vars: -    master_config_hook: "v3_7/master_config_upgrade.yml"  # All controllers must be stopped at the same time then restarted  - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index a2f316c25..723b2e533 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -14,7 +14,8 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +  when: not skip_version_info | default(false)  - name: Configure the upgrade target for the common upgrade tasks    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -35,6 +36,7 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_masters_to_config" +    openshift_protect_installed_version: False  - import_playbook: validator.yml @@ -47,8 +49,6 @@  # Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml -  vars: -    master_config_hook: "v3_7/master_config_upgrade.yml"  # All controllers must be stopped at the same time then restarted  - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml index 1d4d1919c..ed97d539c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml @@ -1,20 +1 @@  --- -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.election.lockName' -    yaml_value: 'openshift-master-controllers' - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' -    yaml_value: service-signer.crt - -- modify_yaml: -    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -    yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' -    yaml_value: service-signer.key - -- modify_yaml: -    dest: "{{ openshift.common.config_base }}/master/master-config.yaml" -    yaml_key: servingInfo.clientCA -    yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index 552bea5e7..bf6e8605e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -10,6 +10,7 @@    - set_fact:        openshift_upgrade_target: '3.9'        openshift_upgrade_min: '3.7' +      openshift_release: '3.9'  - import_playbook: ../pre/config.yml    vars: @@ -19,6 +20,7 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" +    openshift_protect_installed_version: False  - import_playbook: validator.yml @@ -31,8 +33,6 @@  # Pre-upgrade completed  - import_playbook: ../upgrade_control_plane.yml -  vars: -    master_config_hook: "v3_7/master_config_upgrade.yml"  # All controllers must be stopped at the same time then restarted  - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index ef9871008..fe1fdefff 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -14,14 +14,28 @@  - import_playbook: ../init.yml    vars:      l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -    l_upgrade_non_node_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -- name: Configure the upgrade target for the common upgrade tasks +## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan +## If they've specified pkg_version or image_tag preserve that for later use +- name: Configure the upgrade target for the common upgrade tasks 3.8    hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config    tasks:    - set_fact: -      openshift_upgrade_target: '3.9' +      openshift_upgrade_target: '3.8'        openshift_upgrade_min: '3.7' +      openshift_release: '3.8' +      _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" +      _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" +      l_double_upgrade_cp: True +    when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') + +  - name: set l_force_image_tag_to_version = True +    set_fact: +      # Need to set this during 3.8 upgrade to ensure image_tag is set correctly +      # to match 3.8 version +      l_force_image_tag_to_version: True +    when: _requested_image_tag is defined  - import_playbook: ../pre/config.yml    # These vars a meant to exclude oo_nodes from plays that would otherwise include @@ -35,21 +49,72 @@      l_upgrade_verify_targets_hosts: "oo_masters_to_config"      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_masters_to_config" +    openshift_protect_installed_version: False +  when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors +- name: Flag pre-upgrade checks complete for hosts without errors 3.8    hosts: oo_masters_to_config:oo_etcd_to_config    tasks:    - set_fact:        pre_upgrade_complete: True +    when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')  # Pre-upgrade completed +- import_playbook: ../upgrade_control_plane.yml +  vars: +    openshift_release: '3.8' +  when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') + +## 3.8 upgrade complete we should now be able to upgrade to 3.9 + +- name: Configure the upgrade target for the common upgrade tasks 3.9 +  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config +  tasks: +  - meta: clear_facts +  - set_fact: +      openshift_upgrade_target: '3.9' +      openshift_upgrade_min: '3.8' +      openshift_release: '3.9' +      openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}" +  # Set the user's specified image_tag for 3.9 upgrade if it was provided. +  - set_fact: +      openshift_image_tag: "{{ _requested_image_tag }}" +      l_force_image_tag_to_version: False +    when: _requested_image_tag is defined +  # If the user didn't specify an image_tag, we need to force update image_tag +  # because it will have already been set during 3.8.  If we aren't running +  # a double upgrade, then we can preserve image_tag because it will still +  # be the user provided value. +  - set_fact: +      l_force_image_tag_to_version: True +    when: +    - l_double_upgrade_cp is defined and l_double_upgrade_cp +    - _requested_image_tag is not defined + +- import_playbook: ../pre/config.yml +  # These vars a meant to exclude oo_nodes from plays that would otherwise include +  # them by default. +  vars: +    l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" +    l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" +    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_no_proxy_hosts: "oo_masters_to_config" +    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" +    l_upgrade_verify_targets_hosts: "oo_masters_to_config" +    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" +    l_upgrade_excluder_hosts: "oo_masters_to_config" +    openshift_protect_installed_version: False + +- name: Flag pre-upgrade checks complete for hosts without errors +  hosts: oo_masters_to_config:oo_etcd_to_config +  tasks: +  - set_fact: +      pre_upgrade_complete: True  - import_playbook: ../upgrade_control_plane.yml    vars: -    master_config_hook: "v3_7/master_config_upgrade.yml" +    openshift_release: '3.9'  # All controllers must be stopped at the same time then restarted  - name: Cycle all controller services to force new leader election mode @@ -58,13 +123,19 @@    roles:    - role: openshift_facts    tasks: -  - name: Stop {{ openshift.common.service_type }}-master-controllers +  - name: Stop {{ openshift_service_type }}-master-controllers      systemd: -      name: "{{ openshift.common.service_type }}-master-controllers" +      name: "{{ openshift_service_type }}-master-controllers"        state: stopped -  - name: Start {{ openshift.common.service_type }}-master-controllers +  - name: Start {{ openshift_service_type }}-master-controllers      systemd: -      name: "{{ openshift.common.service_type }}-master-controllers" +      name: "{{ openshift_service_type }}-master-controllers"        state: started  - import_playbook: ../post_control_plane.yml + +- hosts: oo_masters +  tasks: +  - import_role: +      name: openshift_web_console +      tasks_from: remove_old_asset_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml index 1d1b255c1..859b1d88b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -12,6 +12,7 @@    - set_fact:        openshift_upgrade_target: '3.9'        openshift_upgrade_min: '3.7' +      openshift_release: '3.9'  - import_playbook: ../pre/config.yml    vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml index 4bd2d87b1..d8540abfb 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml @@ -1,5 +1,5 @@  --- -- name: Verify 3.9 specific upgrade checks +- name: Verify 3.8 specific upgrade checks    hosts: oo_first_master    roles:    - { role: lib_openshift } diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml new file mode 100644 index 000000000..089645d07 --- /dev/null +++ b/playbooks/common/private/components.yml @@ -0,0 +1,38 @@ +--- +# These are the core component plays that configure the layers above the control +# plane. A component is generally considered any part of OpenShift that runs on +# top of the cluster and may be considered optional. Over time, much of OpenShift +# above the Kubernetes apiserver and masters may be considered components. +# +# Preconditions: +# +# 1. The control plane is configured and reachable from nodes inside the cluster +# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can +#    perform root level actions against the cluster +# 3. On cloud providers, persistent volume provisioners are configured +# 4. A subset of nodes is available to allow components to schedule - this must +#    include the masters and usually includes infra nodes. +# 5. The init/main.yml playbook has been invoked + +- import_playbook: ../../openshift-glusterfs/private/config.yml +  when: groups.oo_glusterfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-hosted/private/config.yml + +- import_playbook: ../../openshift-web-console/private/config.yml +  when: openshift_web_console_install | default(true) | bool + +- import_playbook: ../../openshift-metrics/private/config.yml +  when: openshift_metrics_install_metrics | default(false) | bool + +- import_playbook: ../../openshift-logging/private/config.yml +  when: openshift_logging_install_logging | default(false) | bool + +- import_playbook: ../../openshift-prometheus/private/config.yml +  when: openshift_hosted_prometheus_deploy | default(false) | bool + +- import_playbook: ../../openshift-service-catalog/private/config.yml +  when: openshift_enable_service_catalog | default(true) | bool + +- import_playbook: ../../openshift-management/private/config.yml +  when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml new file mode 100644 index 000000000..0a5f1142b --- /dev/null +++ b/playbooks/common/private/control_plane.yml @@ -0,0 +1,34 @@ +--- +# These are the control plane plays that configure a control plane on top of hosts +# identified as masters. Over time, some of the pieces of the current control plane +# may be moved to the components list. +# +# It is not required for any nodes to be configured, or passed to be configured, +# when this playbook is invoked. +# +# Preconditions: +# +# 1. A set of machines have been identified to act as masters +# 2. On cloud providers, a load balancer has been configured to point to the masters +#    and that load balancer has a DNS name +# 3. The init/main.yml playbook has been invoked +# +# Postconditions: +# +# 1. The control plane is reachable from the outside of the cluster +# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin +#    access. + +- import_playbook: ../../openshift-checks/private/install.yml + +- import_playbook: ../../openshift-etcd/private/config.yml + +- import_playbook: ../../openshift-nfs/private/config.yml +  when: groups.oo_nfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-loadbalancer/private/config.yml +  when: groups.oo_lb_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-master/private/config.yml + +- import_playbook: ../../openshift-master/private/additional_config.yml | 
