diff options
Diffstat (limited to 'playbooks/common/openshift-cluster')
84 files changed, 1583 insertions, 739 deletions
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml deleted file mode 100644 index c0ea93d2c..000000000 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Additional master configuration - hosts: oo_first_master - vars: - cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" - etcd_urls: "{{ openshift.master.etcd_urls }}" - openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" - omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" - roles: - - role: openshift_master_cluster - when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" - - role: openshift_examples - registry_url: "{{ openshift.master.registry_url }}" - when: openshift.common.install_examples | bool - - role: openshift_hosted_templates - registry_url: "{{ openshift.master.registry_url }}" - - role: openshift_manageiq - when: openshift.common.use_manageiq | bool - - role: cockpit - when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and - (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' ) - - role: flannel_register - when: openshift.common.use_flannel | bool diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml new file mode 100644 index 000000000..5ddafdb07 --- /dev/null +++ b/playbooks/common/openshift-cluster/cockpit-ui.yml @@ -0,0 +1,6 @@ +--- +- name: Create Hosted Resources - cockpit-ui + hosts: oo_first_master + roles: + - role: cockpit-ui + when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1482b3a3f..244787985 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,56 +1,41 @@ --- -- include: initialize_oo_option_facts.yml - tags: - - always - -- name: Disable excluders - hosts: oo_masters_to_config:oo_nodes_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: disable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" +- include: ../openshift-checks/install.yml - include: ../openshift-etcd/config.yml - tags: - - etcd - include: ../openshift-nfs/config.yml - tags: - - nfs + when: groups.oo_nfs_to_config | default([]) | count > 0 - include: ../openshift-loadbalancer/config.yml - tags: - - loadbalancer + when: groups.oo_lb_to_config | default([]) | count > 0 - include: ../openshift-master/config.yml - tags: - - master -- include: additional_config.yml - tags: - - master +- include: ../openshift-master/additional_config.yml - include: ../openshift-node/config.yml - tags: - - node - include: ../openshift-glusterfs/config.yml - tags: - - glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 - include: openshift_hosted.yml - tags: - - hosted -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config:oo_nodes_to_config - tags: - - always +- include: openshift_metrics.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- include: openshift_logging.yml + when: openshift_logging_install_logging | default(false) | bool + +- include: service_catalog.yml + when: openshift_enable_service_catalog | default(false) | bool + +- include: ../openshift-management/config.yml + when: openshift_management_install_management | default(false) | bool + +- name: Print deprecated variable warning message if necessary + hosts: oo_first_master gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + tasks: + - debug: msg="{{__deprecation_message}}" + when: + - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml new file mode 100644 index 000000000..ec6f2c52c --- /dev/null +++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml @@ -0,0 +1,18 @@ +--- +- name: Create persistent volumes + hosts: oo_first_master + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" + tasks: + - debug: var=persistent_volumes + - debug: var=persistent_volume_claims + +- name: Create Hosted Resources - persistent volumes + hosts: oo_first_master + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index 5425f448f..be14b06f0 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -27,9 +27,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: master local_facts: dns_port: '8053' @@ -37,7 +34,7 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: dnsConfig.bindAddress yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" - notify: restart master + notify: restart master api - meta: flush_handlers - name: Configure nodes for dnsmasq @@ -50,9 +47,6 @@ role: "{{ item.role }}" local_facts: "{{ item.local_facts }}" with_items: - - role: common - local_facts: - use_dnsmasq: True - role: node local_facts: dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index c28ce4c14..78b552279 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,20 +5,20 @@ become: no gather_facts: no tasks: - - name: Evaluate groups - g_etcd_hosts required + - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required fail: - msg: This playbook requires g_etcd_hosts to be set - when: g_etcd_hosts is not defined + msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set + when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined - name: Evaluate groups - g_master_hosts or g_new_master_hosts required fail: msg: This playbook requires g_master_hosts or g_new_master_hosts to be set - when: g_master_hosts is not defined or g_new_master_hosts is not defined + when: g_master_hosts is not defined and g_new_master_hosts is not defined - name: Evaluate groups - g_node_hosts or g_new_node_hosts required fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set - when: g_node_hosts is not defined or g_new_node_hosts is not defined + when: g_node_hosts is not defined and g_new_node_hosts is not defined - name: Evaluate groups - g_lb_hosts required fail: @@ -33,13 +33,26 @@ - name: Evaluate groups - g_nfs_hosts is single host fail: msg: The nfs group must be limited to one host - when: (groups[g_nfs_hosts] | default([])) | length > 1 + when: g_nfs_hosts | default([]) | length > 1 - name: Evaluate groups - g_glusterfs_hosts required fail: msg: This playbook requires g_glusterfs_hosts to be set when: g_glusterfs_hosts is not defined + - name: Evaluate groups - Fail if no etcd hosts group is defined + fail: + msg: > + Running etcd as an embedded service is no longer supported. If this is a + new install please define an 'etcd' group with either one or three + hosts. These hosts may be the same hosts as your masters. If this is an + upgrade you may set openshift_master_unsupported_embedded_etcd=true + until a migration playbook becomes available. + when: + - g_etcd_hosts | default([]) | length not in [3,1] + - not openshift_master_unsupported_embedded_etcd | default(False) + - not (openshift_node_bootstrap | default(False)) + - name: Evaluate oo_all_hosts add_host: name: "{{ item }}" @@ -67,6 +80,15 @@ when: g_master_hosts|length > 0 changed_when: no + - name: Evaluate oo_new_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_new_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_new_etcd_hosts | default([]) }}" + changed_when: no + - name: Evaluate oo_masters_to_config add_host: name: "{{ item }}" @@ -108,7 +130,7 @@ add_host: name: "{{ item }}" groups: oo_etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}" changed_when: False - name: Evaluate oo_nodes_to_config @@ -157,3 +179,12 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" changed_when: no + + - name: Evaluate oo_etcd_to_migrate + add_host: + name: "{{ item }}" + groups: oo_etcd_to_migrate + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}" + changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 9cebecd68..91223d368 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -6,12 +6,164 @@ - name: Initialize host facts hosts: oo_all_hosts - roles: - - openshift_facts tasks: - - openshift_facts: + - name: load openshift_facts module + include_role: + name: openshift_facts + static: yes + + # TODO: Should this role be refactored into health_checks?? + - name: Run openshift_sanitize_inventory to set variables + include_role: + name: openshift_sanitize_inventory + + - name: Detecting Operating System from ostree_booted + stat: + path: /run/ostree-booted + register: ostree_booted + + # Locally setup containerized facts for now + - name: initialize_facts set fact l_is_atomic + set_fact: + l_is_atomic: "{{ ostree_booted.stat.exists }}" + + - name: initialize_facts set fact for containerized and l_is_*_system_container + set_fact: + l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" + l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + + - name: initialize_facts set facts for l_any_system_container + set_fact: + l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}" + + - name: initialize_facts set fact for l_etcd_runtime + set_fact: + l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist is fedora and python is v3 + fail: + msg: | + openshift-ansible requires Python 3 for {{ ansible_distribution }}; + For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html + when: + - ansible_distribution == 'Fedora' + - ansible_python['version']['major'] != 3 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist not Fedora and python must be v2 + fail: + msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" + when: + - ansible_distribution != 'Fedora' + - ansible_python['version']['major'] != 2 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + # Fail as early as possible if Atomic and old version of Docker + - when: + - l_is_atomic | bool + block: + + # See https://access.redhat.com/articles/2317361 + # and https://github.com/ansible/ansible/issues/15892 + # NOTE: the "'s can not be removed at this level else the docker command will fail + # NOTE: When ansible >2.2.1.x is used this can be updated per + # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121 + - name: Determine Atomic Host Docker Version + shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"' + register: l_atomic_docker_version + + - name: assert atomic host docker version is 1.12 or later + assert: + that: + - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=') + msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. + + - when: + - not l_is_atomic | bool + block: + - name: Ensure openshift-ansible installer package deps are installed + package: + name: "{{ item }}" + state: present + with_items: + - iproute + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" + - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" + - yum-utils + + - name: Ensure various deps for running system containers are installed + package: + name: "{{ item }}" + state: present + with_items: + - atomic + - ostree + - runc + when: + - l_any_system_container | bool + + - name: Default system_images_registry to a enterprise registry + set_fact: + system_images_registry: "registry.access.redhat.com" + when: + - system_images_registry is not defined + - openshift_deployment_type == "openshift-enterprise" + + - name: Default system_images_registry to community registry + set_fact: + system_images_registry: "docker.io" + when: + - system_images_registry is not defined + - openshift_deployment_type == "origin" + + - name: Gather Cluster facts and set is_containerized if needed + openshift_facts: role: common local_facts: + deployment_type: "{{ openshift_deployment_type }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" + cli_image: "{{ osm_image | default(None) }}" hostname: "{{ openshift_hostname | default(None) }}" - - set_fact: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + ip: "{{ openshift_ip | default(None) }}" + is_containerized: "{{ l_is_containerized | default(None) }}" + is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}" + is_node_system_container: "{{ l_is_node_system_container | default(false) }}" + is_master_system_container: "{{ l_is_master_system_container | default(false) }}" + is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}" + etcd_runtime: "{{ l_etcd_runtime }}" + system_images_registry: "{{ system_images_registry }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + http_proxy: "{{ openshift_http_proxy | default(None) }}" + https_proxy: "{{ openshift_https_proxy | default(None) }}" + no_proxy: "{{ openshift_no_proxy | default(None) }}" + generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" + + - name: Set fact of no_proxy_internal_hostnames + openshift_facts: + role: common + local_facts: + no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + + - name: initialize_facts set_fact repoquery command + set_fact: + repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + + - name: initialize_facts set_fact on openshift_docker_hosted_registry_network + set_fact: + openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml deleted file mode 100644 index ac3c702a0..000000000 --- a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Set oo_option facts - hosts: oo_all_hosts - tags: - - always - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined - - set_fact: - openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" - when: openshift_docker_selinux_enabled is not defined diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml new file mode 100644 index 000000000..a7114fc80 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml @@ -0,0 +1,8 @@ +--- +- name: Setup yum repositories for all hosts + hosts: oo_all_hosts + gather_facts: no + tasks: + - name: initialize openshift repos + include_role: + name: openshift_repos diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index f4e52869e..37a5284d5 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,24 +1,5 @@ --- # NOTE: requires openshift_facts be run -- name: Verify compatible yum/subscription-manager combination - hosts: oo_all_hosts - gather_facts: no - tasks: - # See: - # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 - # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 - # https://github.com/openshift/openshift-ansible/issues/1138 - # Consider the repoquery module for this work - - name: Check for bad combinations of yum and subscription-manager - command: > - {{ repoquery_cmd }} --installed --qf '%{version}' "yum" - register: yum_ver_test - changed_when: false - when: not openshift.common.is_atomic | bool - - fail: - msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout - - name: Determine openshift_version to configure on first master hosts: oo_first_master roles: @@ -27,9 +8,14 @@ # NOTE: We set this even on etcd hosts as they may also later run as masters, # and we don't want to install wrong version of docker and have to downgrade # later. -- name: Set openshift_version for all hosts - hosts: oo_all_hosts:!oo_first_master +- name: Set openshift_version for etcd, node, and master hosts + hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master vars: openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" + pre_tasks: + - set_fact: + openshift_pkg_version: -{{ openshift_version }} + when: openshift_pkg_version is not defined + - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" roles: - openshift_version diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml new file mode 100644 index 000000000..4b4f19690 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml @@ -0,0 +1,6 @@ +--- +- name: Create Hosted Resources - openshift_default_storage_class + hosts: oo_first_master + roles: + - role: openshift_default_storage_class + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 8d94b6509..c1536eb36 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,72 +1,35 @@ --- -- name: Create persistent volumes - hosts: oo_first_master - tags: - - hosted - vars: - persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" - persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" - roles: - - role: openshift_persistent_volumes - when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 +- name: Hosted Install Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Hosted install 'In Progress' + set_stats: + data: + installer_phase_hosted: "In Progress" + aggregate: false -- name: Create Hosted Resources - hosts: oo_first_master - tags: - - hosted - pre_tasks: - - set_fact: - openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" - openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" - when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - set_fact: - logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}" - logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" - logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" - roles: - - role: openshift_hosted - - role: openshift_metrics - when: openshift_hosted_metrics_deploy | default(false) | bool - - role: openshift_logging - when: openshift_hosted_logging_deploy | default(false) | bool - openshift_hosted_logging_hostname: "{{ logging_hostname }}" - openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}" - openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}" - openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}" - openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else '' }}" - openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" - openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_loggingops_storage_kind | default(none) == 'dynamic' else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else '' }}" - openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_hosted_loggingops_storage_kind | default(none) =='dynamic' else '' }}" +- include: create_persistent_volumes.yml - - role: cockpit-ui - when: ( openshift.common.version_gte_3_3_or_1_3 | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) and not (openshift.docker.hosted_registry_insecure | default(false) | bool) - - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') +- include: openshift_default_storage_class.yml -- name: Update master-config for publicLoggingURL - hosts: oo_masters_to_config:!oo_first_master - tags: - - hosted - pre_tasks: - - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - - set_fact: - openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" - tasks: +- include: openshift_hosted_create_projects.yml + +- include: openshift_hosted_router.yml - - block: - - include_role: - name: openshift_logging - tasks_from: update_master_config - when: openshift_hosted_logging_deploy | default(false) | bool +- include: openshift_hosted_registry.yml - - block: - - include_role: - name: openshift_metrics - tasks_from: update_master_config - when: openshift_hosted_metrics_deploy | default(false) | bool +- include: cockpit-ui.yml + +- include: openshift_prometheus.yml + when: openshift_hosted_prometheus_deploy | default(False) | bool + +- name: Hosted Install Checkpoint End + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Hosted install 'Complete' + set_stats: + data: + installer_phase_hosted: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml new file mode 100644 index 000000000..d5ca5185c --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml @@ -0,0 +1,7 @@ +--- +- name: Create Hosted Resources - openshift projects + hosts: oo_first_master + tasks: + - include_role: + name: openshift_hosted + tasks_from: create_projects.yml diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml new file mode 100644 index 000000000..2a91a827c --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml @@ -0,0 +1,13 @@ +--- +- name: Create Hosted Resources - registry + hosts: oo_first_master + tasks: + - set_fact: + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + - include_role: + name: openshift_hosted + tasks_from: registry.yml + when: + - openshift_hosted_manage_registry | default(True) | bool + - openshift_hosted_registry_registryurl is defined diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml new file mode 100644 index 000000000..bcb5a34a4 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_hosted_router.yml @@ -0,0 +1,13 @@ +--- +- name: Create Hosted Resources - router + hosts: oo_first_master + tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + - include_role: + name: openshift_hosted + tasks_from: router.yml + when: + - openshift_hosted_manage_router | default(True) | bool + - openshift_hosted_router_registryurl is defined diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index 57580406c..529a4c939 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -1,5 +1,13 @@ --- -- include: evaluate_groups.yml +- name: Logging Install Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Logging install 'In Progress' + set_stats: + data: + installer_phase_logging: "In Progress" + aggregate: false - name: OpenShift Aggregated Logging hosts: oo_first_master @@ -13,4 +21,13 @@ - include_role: name: openshift_logging tasks_from: update_master_config - when: openshift_logging_install_logging | default(false) | bool + +- name: Logging Install Checkpoint End + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Logging install 'Complete' + set_stats: + data: + installer_phase_logging: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index bcff4a1a1..9c0bd489b 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -1,7 +1,34 @@ --- -- include: evaluate_groups.yml +- name: Metrics Install Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Metrics install 'In Progress' + set_stats: + data: + installer_phase_metrics: "In Progress" + aggregate: false - name: OpenShift Metrics hosts: oo_first_master roles: - - openshift_metrics + - role: openshift_metrics + +- name: OpenShift Metrics + hosts: oo_masters:!oo_first_master + serial: 1 + tasks: + - name: Setup the non-first masters configs + include_role: + name: openshift_metrics + tasks_from: update_master_config.yaml + +- name: Metrics Install Checkpoint End + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Metrics install 'Complete' + set_stats: + data: + installer_phase_metrics: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml new file mode 100644 index 000000000..a73b294a5 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml @@ -0,0 +1,25 @@ +--- +- name: Prometheus Install Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Prometheus install 'In Progress' + set_stats: + data: + installer_phase_prometheus: "In Progress" + aggregate: false + +- name: Create Hosted Resources - openshift_prometheus + hosts: oo_first_master + roles: + - role: openshift_prometheus + +- name: Prometheus Install Checkpoint End + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Prometheus install 'Complete' + set_stats: + data: + installer_phase_prometheus: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml new file mode 100644 index 000000000..4a9fbf7eb --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/check-expiry.yml @@ -0,0 +1,12 @@ +--- +- name: Check cert expirys + hosts: "{{ g_check_expiry_hosts }}" + vars: + openshift_certificate_expiry_show_all: yes + roles: + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml new file mode 100644 index 000000000..d738c8207 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-backup.yml @@ -0,0 +1,19 @@ +--- +- name: Backup and remove generated etcd certificates + hosts: oo_first_etcd + any_errors_fatal: true + tasks: + - include_role: + name: etcd + tasks_from: backup_generated_certificates + - include_role: + name: etcd + tasks_from: remove_generated_certificates + +- name: Backup deployed etcd certificates + hosts: oo_etcd_to_config + any_errors_fatal: true + tasks: + - include_role: + name: etcd + tasks_from: backup_server_certificates diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml index 6964e8567..044875d1c 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -13,34 +13,15 @@ - name: Backup existing etcd CA certificate directories hosts: oo_etcd_to_config - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" tasks: - - name: Determine if CA certificate directory exists - stat: - path: "{{ etcd_ca_dir }}" - register: etcd_ca_certs_dir_stat - - name: Backup generated etcd certificates - command: > - tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz - {{ etcd_ca_dir }} - args: - warn: no - when: etcd_ca_certs_dir_stat.stat.exists | bool - - name: Remove CA certificate directory - file: - path: "{{ etcd_ca_dir }}" - state: absent - when: etcd_ca_certs_dir_stat.stat.exists | bool + - include_role: + name: etcd + tasks_from: backup_ca_certificates + - include_role: + name: etcd + tasks_from: remove_ca_certificates -- name: Generate new etcd CA - hosts: oo_first_etcd - roles: - - role: openshift_etcd_ca - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" +- include: ../../openshift-etcd/ca.yml - name: Create temp directory for syncing certs hosts: localhost @@ -55,52 +36,14 @@ - name: Distribute etcd CA to etcd hosts hosts: oo_etcd_to_config - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" tasks: - - name: Create a tarball of the etcd ca certs - command: > - tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz - -C {{ etcd_ca_dir }} . - args: - creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - warn: no - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Retrieve etcd ca cert tarball - fetch: - src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Ensure ca directory exists - file: - path: "{{ etcd_ca_dir }}" - state: directory - - name: Unarchive etcd ca cert tarballs - unarchive: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" - dest: "{{ etcd_ca_dir }}" - - name: Read current etcd CA - slurp: - src: "{{ etcd_conf_dir }}/ca.crt" - register: g_current_etcd_ca_output - - name: Read new etcd CA - slurp: - src: "{{ etcd_ca_dir }}/ca.crt" - register: g_new_etcd_ca_output - - copy: - content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" - dest: "{{ item }}/ca.crt" - with_items: - - "{{ etcd_conf_dir }}" - - "{{ etcd_ca_dir }}" + - include_role: + name: etcd + tasks_from: distribute_ca + vars: + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - include: ../../openshift-etcd/restart.yml # Do not restart etcd when etcd certificates were previously expired. @@ -111,17 +54,13 @@ - name: Retrieve etcd CA certificate hosts: oo_first_etcd - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" tasks: - - name: Retrieve etcd CA certificate - fetch: - src: "{{ etcd_conf_dir }}/ca.crt" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes + - include_role: + name: etcd + tasks_from: retrieve_ca_certificates + vars: + etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Distribute etcd CA to masters hosts: oo_masters_to_config @@ -146,13 +85,19 @@ changed_when: false - include: ../../openshift-master/restart.yml - # Do not restart masters when master certificates were previously expired. - when: ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) - and - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # Do not restart masters when master or etcd certificates were previously expired. + when: + # masters + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml deleted file mode 100644 index 6b5c805e6..000000000 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -- name: Backup and remove generated etcd certificates - hosts: oo_first_etcd - any_errors_fatal: true - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - post_tasks: - - name: Determine if generated etcd certificates exist - stat: - path: "{{ etcd_conf_dir }}/generated_certs" - register: etcd_generated_certs_dir_stat - - name: Backup generated etcd certificates - command: > - tar -czf {{ etcd_conf_dir }}/etcd-generated-certificate-backup-{{ ansible_date_time.epoch }}.tgz - {{ etcd_conf_dir }}/generated_certs - args: - warn: no - when: etcd_generated_certs_dir_stat.stat.exists | bool - - name: Remove generated etcd certificates - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ etcd_conf_dir }}/generated_certs" - -- name: Backup and removed deployed etcd certificates - hosts: oo_etcd_to_config - any_errors_fatal: true - roles: - - role: etcd_common - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - post_tasks: - - name: Backup etcd certificates - command: > - tar -czvf /etc/etcd/etcd-server-certificate-backup-{{ ansible_date_time.epoch }}.tgz - {{ etcd_conf_dir }}/ca.crt - {{ etcd_conf_dir }}/server.crt - {{ etcd_conf_dir }}/server.key - {{ etcd_conf_dir }}/peer.crt - {{ etcd_conf_dir }}/peer.key - args: - warn: no - -- name: Redeploy etcd certificates - hosts: oo_etcd_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_server_certificates - etcd_certificates_redeploy: true - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - -- name: Redeploy etcd client certificates for masters - hosts: oo_masters_to_config - any_errors_fatal: true - roles: - - role: openshift_etcd_client_certificates - etcd_certificates_redeploy: true - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" - etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" - etcd_cert_prefix: "master.etcd-" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml index c30889d64..4dbc041b0 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters-backup.yml @@ -1,5 +1,5 @@ --- -- name: Redeploy master certificates +- name: Backup and remove master cerftificates hosts: oo_masters_to_config any_errors_fatal: true vars: @@ -7,12 +7,12 @@ openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" pre_tasks: - stat: - path: "{{ openshift_generated_configs_dir }}" + path: "{{ openshift.common.config_base }}/generated-configs" register: openshift_generated_configs_dir_stat - name: Backup generated certificate and config directories command: > tar -czvf /etc/origin/master-node-cert-config-backup-{{ ansible_date_time.epoch }}.tgz - {{ openshift_generated_configs_dir }} + {{ openshift.common.config_base }}/generated-configs {{ openshift.common.config_base }}/master when: openshift_generated_configs_dir_stat.stat.exists delegate_to: "{{ openshift_ca_host }}" @@ -22,7 +22,7 @@ path: "{{ item }}" state: absent with_items: - - "{{ openshift_generated_configs_dir }}" + - "{{ openshift.common.config_base }}/generated-configs" - name: Remove generated certificates file: path: "{{ openshift.common.config_base }}/master/{{ item }}" @@ -36,18 +36,3 @@ - "openshift-master.crt" - "openshift-master.key" - "openshift-master.kubeconfig" - - name: Remove generated etcd client certificates - file: - path: "{{ openshift.common.config_base }}/master/{{ item }}" - state: absent - with_items: - - "master.etcd-client.crt" - - "master.etcd-client.key" - when: groups.oo_etcd_to_config | default([]) | length == 0 - roles: - - role: openshift_master_certificates - openshift_master_etcd_hosts: "{{ hostvars - | oo_select_keys(groups['oo_etcd_to_config'] | default([])) - | oo_collect('openshift.common.hostname') - | default(none, true) }}" - openshift_certificates_redeploy: true diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml index 4990a03f2..2ad84b3b9 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/nodes.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/nodes-backup.yml @@ -22,8 +22,3 @@ state: absent with_items: - "{{ openshift.common.config_base }}/node/ca.crt" - roles: - - role: openshift_node_certificates - openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_certificates_redeploy: true diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 089ae6bbc..2068ed199 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -7,7 +7,7 @@ when: not openshift.common.version_gte_3_2_or_1_2 | bool - name: Check cert expirys - hosts: oo_nodes_to_config:oo_masters_to_config + hosts: oo_nodes_to_config:oo_masters_to_config:oo_etcd_to_config vars: openshift_certificate_expiry_show_all: yes roles: @@ -44,8 +44,8 @@ - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: servingInfo.clientCA - yaml_value: ca-bundle.crt - when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt' + yaml_value: ca.crt + when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' - modify_yaml: dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: etcdClientInfo.ca @@ -105,25 +105,27 @@ - "ca.serial.txt" - "ca-bundle.crt" -- name: Generate new OpenShift CA certificate +- name: Create temporary directory for creating new CA certificate hosts: oo_first_master - pre_tasks: + tasks: - name: Create temporary directory for creating new CA certificate command: > mktemp -d /tmp/openshift-ansible-XXXXXXX register: g_new_openshift_ca_mktemp changed_when: false - roles: - - role: openshift_ca + +- name: Create OpenShift CA + hosts: oo_first_master + vars: # Set openshift_ca_config_dir to a temporary directory where CA # will be created. We'll replace the existing CA with the CA # created in the temporary directory. - openshift_ca_config_dir: "{{ g_new_openshift_ca_mktemp.stdout }}" + openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}" + roles: + - role: openshift_master_facts + - role: openshift_named_certificates + - role: openshift_ca openshift_ca_host: "{{ groups.oo_first_master.0 }}" - openshift_master_hostnames: "{{ hostvars - | oo_select_keys(groups['oo_masters_to_config'] | default([])) - | oo_collect('openshift.common.all_hostnames') - | oo_flatten | unique }}" - name: Create temp directory for syncing certs hosts: localhost @@ -209,16 +211,22 @@ with_items: "{{ client_users }}" - include: ../../openshift-master/restart.yml - # Do not restart masters when master certificates were previously expired. - when: ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) - and - ('expired' not in hostvars - | oo_select_keys(groups['oo_masters_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # Do not restart masters when master or etcd certificates were previously expired. + when: + # masters + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) - name: Distribute OpenShift CA certificate to nodes hosts: oo_nodes_to_config @@ -268,13 +276,28 @@ changed_when: false - include: ../../openshift-node/restart.yml - # Do not restart nodes when node certificates were previously expired. - when: ('expired' not in hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) - and - ('expired' not in hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('check_results.check_results.ocp_certs') - | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) + # Do not restart nodes when node, master or etcd certificates were previously expired. + when: + # nodes + - ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) + # masters + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + - ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) + # etcd + - ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index 8c8062585..afd5463b2 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -66,6 +66,7 @@ --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt + --config={{ mktemp.stdout }}/admin.kubeconfig --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index 9f14f2d69..2116c745c 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -7,23 +7,34 @@ tasks: - name: Create temp directory for kubeconfig command: mktemp -d /tmp/openshift-ansible-XXXXXX - register: mktemp + register: router_cert_redeploy_tempdir changed_when: false + - name: Copy admin client config(s) command: > - cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig + cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig changed_when: false - name: Determine if router exists command: > {{ openshift.common.client_binary }} get dc/router -o json - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default register: l_router_dc failed_when: false changed_when: false - - set_fact: + - name: Determine if router service exists + command: > + {{ openshift.common.client_binary }} get svc/router -o json + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig + -n default + register: l_router_svc + failed_when: false + changed_when: false + + - name: Collect router environment variables and secrets + set_fact: router_env_vars: "{{ ((l_router_dc.stdout | from_json)['spec']['template']['spec']['containers'][0]['env'] | oo_collect('name')) | default([]) }}" @@ -34,20 +45,32 @@ changed_when: false when: l_router_dc.rc == 0 + - name: Collect router service annotations + set_fact: + router_service_annotations: "{{ (l_router_svc.stdout | from_json)['metadata']['annotations'] if 'annotations' in (l_router_svc.stdout | from_json)['metadata'] else [] }}" + when: l_router_svc.rc == 0 + - name: Update router environment variables shell: > {{ openshift.common.client_binary }} env dc/router OPENSHIFT_CA_DATA="$(cat /etc/origin/master/ca.crt)" OPENSHIFT_CERT_DATA="$(cat /etc/origin/master/openshift-router.crt)" OPENSHIFT_KEY_DATA="$(cat /etc/origin/master/openshift-router.key)" - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - when: l_router_dc.rc == 0 and 'OPENSHIFT_CA_DATA' in router_env_vars and 'OPENSHIFT_CERT_DATA' in router_env_vars and 'OPENSHIFT_KEY_DATA' in router_env_vars + when: + - l_router_dc.rc == 0 + - ('OPENSHIFT_CA_DATA' in router_env_vars) + - ('OPENSHIFT_CERT_DATA' in router_env_vars) + - ('OPENSHIFT_KEY_DATA' in router_env_vars) + # When the router service contains service signer annotations we + # will delete the existing certificate secret and allow OpenShift to + # replace the secret. - block: - name: Delete existing router certificate secret oc_secret: - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + kubeconfig: "{{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig" name: router-certs namespace: default state: absent @@ -58,85 +81,61 @@ {{ openshift.common.client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name- service.alpha.openshift.io/serving-cert-signed-by- - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - name: Add serving-cert-secret annotation to router service command: > {{ openshift.common.client_binary }} annotate service/router service.alpha.openshift.io/serving-cert-secret-name=router-certs - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined + when: + - l_router_dc.rc == 0 + - l_router_svc.rc == 0 + - ('router-certs' in router_secrets) + - openshift_hosted_router_certificate is undefined + - ('service.alpha.openshift.io/serving-cert-secret-name') in router_service_annotations + - ('service.alpha.openshift.io/serving-cert-signed-by') in router_service_annotations - - block: - - assert: - that: - - "'certfile' in openshift_hosted_router_certificate" - - "'keyfile' in openshift_hosted_router_certificate" - - "'cafile' in openshift_hosted_router_certificate" - msg: |- - openshift_hosted_router_certificate has been set in the inventory but is - missing one or more required keys. Ensure that 'certfile', 'keyfile', - and 'cafile' keys have been specified for the openshift_hosted_router_certificate - inventory variable. - - - name: Read router certificate and key - become: no - local_action: - module: slurp - src: "{{ item }}" - register: openshift_router_certificate_output - # Defaulting dictionary keys to none to avoid deprecation warnings - # (future fatal errors) during template evaluation. Dictionary keys - # won't be accessed unless openshift_hosted_router_certificate is - # defined and has all keys (certfile, keyfile, cafile) which we - # check above. - with_items: - - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}" - - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}" - - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}" - - - name: Write temporary router certificate file - copy: - content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" - dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" - mode: 0600 - - - name: Write temporary router key file - copy: - content: "{{ (openshift_router_certificate_output.results - | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}" - dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" - mode: 0600 - - - name: Replace router-certs secret - shell: > - {{ openshift.common.client_binary }} secrets new router-certs - tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" - tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" - --type=kubernetes.io/tls - --confirm - -o json | {{ openshift.common.client_binary }} replace -f - + # When there are no annotations on the router service we will allow + # the openshift_hosted role to either create a new wildcard + # certificate (since we deleted the original) or reapply a custom + # openshift_hosted_router_certificate. + - file: + path: "{{ item }}" + state: absent + with_items: + - /etc/origin/master/openshift-router.crt + - /etc/origin/master/openshift-router.key + when: + - l_router_dc.rc == 0 + - l_router_svc.rc == 0 + - ('router-certs' in router_secrets) + - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations + - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - - name: Remove temporary router certificate and key files - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" - - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" - when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined + - include_role: + name: openshift_hosted + tasks_from: main + vars: + openshift_hosted_manage_registry: false + when: + - l_router_dc.rc == 0 + - l_router_svc.rc == 0 + - ('router-certs' in router_secrets) + - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations + - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - name: Redeploy router command: > {{ openshift.common.client_binary }} deploy dc/router --latest - --config={{ mktemp.stdout }}/admin.kubeconfig + --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default - name: Delete temp directory file: - name: "{{ mktemp.stdout }}" + name: "{{ router_cert_redeploy_tempdir.stdout }}" state: absent changed_when: False diff --git a/playbooks/common/openshift-cluster/sanity_checks.yml b/playbooks/common/openshift-cluster/sanity_checks.yml new file mode 100644 index 000000000..26716a92d --- /dev/null +++ b/playbooks/common/openshift-cluster/sanity_checks.yml @@ -0,0 +1,51 @@ +--- +- name: Verify Requirements + hosts: oo_all_hosts + tasks: + - fail: + msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Nuage sdn can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with flannel + when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Contiv can not be used with nuage + when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico + when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool + + - fail: + msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. + when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool + + - fail: + msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool + + - fail: + msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both + when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool + + - fail: + msg: openshift_hostname must be 63 characters or less + when: openshift_hostname is defined and openshift_hostname | length > 63 + + - fail: + msg: openshift_public_hostname must be 63 characters or less + when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..bd964b2ce --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,29 @@ +--- +- name: Service Catalog Install Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Service Catalog install 'In Progress' + set_stats: + data: + installer_phase_servicecatalog: "In Progress" + aggregate: false + +- name: Service Catalog + hosts: oo_first_master + roles: + - openshift_service_catalog + - ansible_service_broker + - template_service_broker + vars: + first_master: "{{ groups.oo_first_master[0] }}" + +- name: Service Catalog Install Checkpoint End + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set Service Catalog install 'Complete' + set_stats: + data: + installer_phase_servicecatalog: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 6ed31a644..45b34c8bd 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -1,4 +1,16 @@ --- +- name: Initialization Checkpoint Start + hosts: oo_all_hosts + gather_facts: false + roles: + - installer_checkpoint + tasks: + - name: Set install initialization 'In Progress' + set_stats: + data: + installer_phase_initialize: "In Progress" + aggregate: false + - include: evaluate_groups.yml tags: - always @@ -7,10 +19,28 @@ tags: - always +- include: sanity_checks.yml + tags: + - always + - include: validate_hostnames.yml tags: - node +- include: initialize_openshift_repos.yml + tags: + - always + - include: initialize_openshift_version.yml tags: - always + +- name: Initialization Checkpoint End + hosts: oo_all_hosts + gather_facts: false + tasks: + - name: Set install initialization 'Complete' + set_stats: + data: + installer_phase_initialize: "Complete" + aggregate: false diff --git a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml index 1a6580795..eb118365a 100644 --- a/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate etcd instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: etcd_names_output with_sequence: count={{ num_etcd }} diff --git a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml index 36d7b7870..783f70f50 100644 --- a/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml @@ -3,7 +3,7 @@ - name: Generate master instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}" register: master_names_output with_sequence: count={{ num_masters }} diff --git a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml index 278942f8b..c103e40a9 100644 --- a/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml +++ b/playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml @@ -5,7 +5,7 @@ - name: Generate node instance names(s) set_fact: - scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" + scratch_name: "{{ openshift_cluster_id | default('default') }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}" register: node_names_output with_sequence: count={{ number_nodes }} diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml deleted file mode 100644 index be956fca5..000000000 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- include: evaluate_groups.yml - -- name: Subscribe hosts, update repos and update OS packages - hosts: oo_hosts_to_update - roles: - # Explicitly calling openshift_facts because it appears that when - # rhel_subscribe is skipped that the openshift_facts dependency for - # openshift_repos is also skipped (this is the case at least for Ansible - # 2.0.2) - - openshift_facts - - role: rhel_subscribe - when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - - openshift_repos - - os_update_latest diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml deleted file mode 100644 index 9f7961614..000000000 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 07db071ce..98953f72e 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -4,7 +4,6 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - include: ../initialize_nodes_to_upgrade.yml @@ -52,11 +51,15 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets + {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + register: l_docker_upgrade_drain_result + until: not l_docker_upgrade_drain_result | failed + retries: 60 + delay: 60 - - include: upgrade.yml + - include: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 1b418920f..83f16ac0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -1,6 +1,10 @@ --- - name: Restart docker service: name=docker state=restarted + register: l_docker_restart_docker_in_upgrade_result + until: not l_docker_restart_docker_in_upgrade_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -11,7 +15,6 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -24,4 +27,5 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 17f8fc6e9..808cc562c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -4,7 +4,6 @@ - name: Stop containerized services service: name={{ item }} state=stopped with_items: - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -32,7 +31,13 @@ - debug: var=docker_image_count.stdout when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool -- service: name=docker state=stopped +- service: + name: docker + state: stopped + register: l_pb_docker_upgrade_stop_result + until: not l_pb_docker_upgrade_stop_result | failed + retries: 3 + delay: 30 - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index b2a2eac9a..52345a9ba 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -18,12 +18,16 @@ - name: Get current version of Docker command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker" register: curr_docker_version + retries: 4 + until: curr_docker_version | succeeded changed_when: false - name: Get latest available version of Docker command: > {{ repoquery_cmd }} --qf '%{version}' "docker" register: avail_docker_version + retries: 4 + until: avail_docker_version | succeeded # Don't expect docker rpm to be available on hosts that don't already have it installed: when: pkg_check.rc == 0 failed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 616ba04f8..d086cad00 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -2,13 +2,16 @@ - name: Backup etcd hosts: oo_etcd_hosts_to_backup roles: - - role: openshift_facts - - role: etcd_common - r_etcd_common_action: backup - r_etcd_common_backup_tag: etcd_backup_tag - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + - role: openshift_etcd_facts + post_tasks: + - include_role: + name: etcd + tasks_from: backup + vars: + r_etcd_common_backup_tag: "{{ etcd_backup_tag }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup hosts: localhost @@ -20,7 +23,7 @@ | oo_select_keys(groups.oo_etcd_hosts_to_backup) | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" - set_fact: - etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) | list }}" - fail: msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index 64abc54e7..5b8ba3bb2 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -15,10 +15,15 @@ hosts: oo_etcd_hosts_to_upgrade tasks: - include_role: - name: etcd_common - vars: - r_etcd_common_action: drop_etcdctl + name: etcd + tasks_from: drop_etcdctl - name: Perform etcd upgrade include: ./upgrade.yml when: openshift_etcd_upgrade | default(true) | bool + +- name: Backup etcd + include: backup.yml + vars: + etcd_backup_tag: "post-3.0-" + when: openshift_etcd_backup | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index 0431c1ce0..d71c96cd7 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -36,7 +36,7 @@ - not openshift.common.is_etcd_system_container | bool - name: Record containerized etcd version (runc) - command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\* + command: runc exec etcd rpm -qa --qf '%{version}' etcd\* register: etcd_container_version_runc failed_when: false # AUDIT:changed_when: `false` because we are only inspecting @@ -98,13 +98,11 @@ serial: 1 tasks: - include_role: - name: etcd_upgrade + name: etcd + tasks_from: upgrade_image + vars: + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" when: - ansible_distribution == 'Fedora' - not openshift.common.is_containerized | bool - -- name: Backup etcd - include: backup.yml - vars: - etcd_backup_tag: "post-3.0-" - when: openshift_etcd_backup | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml index 831ca8f57..e5e895775 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml @@ -5,13 +5,14 @@ - name: Upgrade containerized hosts to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 - roles: - - role: etcd_upgrade - r_etcd_upgrade_action: upgrade - r_etcd_upgrade_mechanism: image - r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - etcd_peer: "{{ openshift.common.hostname }}" + tasks: + - include_role: + name: etcd + tasks_from: upgrade_image + vars: + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<') - openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml index 2e79451e0..a2a26bad4 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml @@ -5,13 +5,14 @@ - name: Upgrade to {{ etcd_upgrade_version }} hosts: oo_etcd_hosts_to_upgrade serial: 1 - roles: - - role: etcd_upgrade - r_etcd_upgrade_action: upgrade - r_etcd_upgrade_mechanism: rpm - r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" - r_etcd_common_etcd_runtime: "host" - etcd_peer: "{{ openshift.common.hostname }}" + tasks: + - include_role: + name: etcd + tasks_from: upgrade_rpm + vars: + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" when: - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<') - ansible_distribution == 'RedHat' diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml new file mode 100644 index 000000000..9c9c260fb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: Role +metadata: + name: shared-resource-viewer + namespace: openshift +rules: +- apiGroups: + - "" + - template.openshift.io + attributeRestrictions: null + resources: + - templates + verbs: + - get + - list + - watch +- apiGroups: + - "" + - image.openshift.io + attributeRestrictions: null + resources: + - imagestreamimages + - imagestreams + - imagestreamtags + verbs: + - get + - list + - watch +- apiGroups: + - "" + - image.openshift.io + attributeRestrictions: null + resources: + - imagestreams/layers + verbs: + - get diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 0f421928b..2826951e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -4,9 +4,6 @@ # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - -- include: ../initialize_oo_option_facts.yml - include: ../initialize_facts.yml diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 72de63070..fc1cbf32a 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -30,6 +30,7 @@ ansible_become: "{{ g_sudo | default(omit) }}" with_items: " {{ groups['oo_nodes_to_config'] }}" when: + - hostvars[item].openshift is defined - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker deleted file mode 120000 index 6aeca2842..000000000 --- a/playbooks/common/openshift-cluster/upgrades/master_docker +++ /dev/null @@ -1 +0,0 @@ -../../../../roles/openshift_master/templates/master_docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index d9ddf3860..122066955 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -90,10 +90,12 @@ # openshift_examples from failing when trying to replace templates that do # not already exist. We could have potentially done a replace --force to # create and update in one step. - - openshift_examples + - role: openshift_examples + when: openshift_install_examples | default(true,true) | bool - openshift_hosted_templates # Update the existing templates - role: openshift_examples + when: openshift_install_examples | default(true,true) | bool registry_url: "{{ openshift.master.registry_url }}" openshift_examples_import_command: replace - role: openshift_hosted_templates @@ -101,9 +103,16 @@ openshift_hosted_templates_import_command: replace # Check for warnings to be printed at the end of the upgrade: -- name: Check for warnings +- name: Clean up and display warnings hosts: oo_masters_to_config - tasks: + tags: + - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + post_tasks: # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: - name: grep pluginOrderOverride command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml @@ -119,12 +128,8 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- name: Re-enable excluder if it was previously enabled - hosts: oo_masters_to_config - tags: - - always - gather_facts: no - roles: - - role: openshift_excluder - r_openshift_excluder_action: enable - r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + - name: Warn if shared-resource-viewer could not be updated + debug: + msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213" + when: + - __shared_resource_viewer_protected | default(false) diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml index 9d8b73cff..6d8503879 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml @@ -1,8 +1,10 @@ --- # Only check if docker upgrade is required if docker_upgrade is not # already set to False. -- include: ../docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool +- include: ../../docker/upgrade_check.yml + when: + - docker_upgrade is not defined or (docker_upgrade | bool) + - not (openshift.common.is_atomic | bool) # Additional checks for Atomic hosts: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 06eb5f936..6a5bc24f7 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,23 +9,29 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - name: Ensure Master is running - service: - name: "{{ openshift.common.service_type }}-master" - state: started - enabled: yes - when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool + - when: openshift.common.is_containerized | bool + block: + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master" - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-api" - state: started - enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + # In case of the non-ha to ha upgrade. + - name: Check if the {{ openshift.common.service_type }}-master-api.service exists + command: > + systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend + register: master_api_service_status - - name: Ensure HA Master is running - service: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + - set_fact: + master_services: + - "{{ openshift.common.service_type }}-master-api" + - "{{ openshift.common.service_type }}-master-controllers" + when: + - master_api_service_status.stdout_lines | length > 0 + - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0] + + - name: Ensure Master is running + service: + name: "{{ item }}" + state: started + enabled: yes + with_items: "{{ master_services }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml new file mode 100644 index 000000000..f75ae3b15 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml @@ -0,0 +1,22 @@ +--- +- name: Verify all masters has etcd3 storage backend set + hosts: oo_masters_to_config + gather_facts: no + roles: + - lib_utils + tasks: + - name: Read master storage backend setting + yedit: + state: list + src: /etc/origin/master/master-config.yaml + key: kubernetesMasterConfig.apiServerArguments.storage-backend + register: _storage_backend + + - fail: + msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue" + when: + # assuming the master-config.yml is properly configured, i.e. the value is a list + - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3" + + - debug: + msg: "Storage backend is set to etcd3" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml new file mode 100644 index 000000000..2a8de50a2 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -0,0 +1,16 @@ +--- +- name: OpenShift Health Checks + hosts: oo_all_hosts + any_errors_fatal: true + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: upgrade + post_tasks: + - name: Run health checks (upgrade) + action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - docker_image_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml index 9a959a959..3c0017891 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml @@ -5,9 +5,9 @@ tasks: - fail: msg: > - This upgrade is only supported for origin, openshift-enterprise, and online + This upgrade is only supported for origin and openshift-enterprise deployment types - when: deployment_type not in ['origin','openshift-enterprise', 'online'] + when: deployment_type not in ['origin','openshift-enterprise'] # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml deleted file mode 100644 index 354af3cde..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Verify node processes - hosts: oo_nodes_to_config - roles: - - openshift_facts - - openshift_docker_facts - tasks: - - name: Ensure Node is running - service: - name: "{{ openshift.common.service_type }}-node" - state: started - enabled: yes - when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 9b4a8e413..13fa37b09 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -4,6 +4,12 @@ msg: Verify OpenShift is already installed when: openshift.common.version is not defined +- name: Update oreg_auth docker login credentials if necessary + include_role: + name: docker + tasks_from: registry_auth.yml + when: oreg_auth_user is defined + - name: Verify containers are available for upgrade command: > docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} @@ -27,13 +33,17 @@ - name: Set fact avail_openshift_version set_fact: - avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}" + avail_openshift_version: "{{ repoquery_out.results.versions.available_versions_full.0 }}" + - name: Set openshift_pkg_version when not specified + set_fact: + openshift_pkg_version: "-{{ repoquery_out.results.versions.available_versions_full.0 }}" + when: openshift_pkg_version | default('') == '' - name: Verify OpenShift RPMs are available for upgrade fail: msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" when: - - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<') + - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<') - name: Fail when openshift version does not meet minium requirement for Origin upgrade fail: diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index 164baca81..8cc46ab68 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -8,7 +8,6 @@ # TODO: If the sdn package isn't already installed this will install it, we # should fix that - - name: Upgrade master packages package: name={{ master_pkgs | join(',') }} state=present vars: @@ -16,7 +15,7 @@ - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" - PyYAML diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 6738ce11f..a5e2f7940 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -5,13 +5,19 @@ # oc adm migrate storage should be run prior to etcd v3 upgrade # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 -- name: Pre master upgrade - Upgrade job storage +- name: Pre master upgrade - Upgrade all storage hosts: oo_first_master tasks: - - name: Upgrade job storage + - name: Upgrade all storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -25,7 +31,6 @@ role: master local_facts: embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}" - name: Upgrade and backup etcd include: ./etcd/main.yml @@ -85,7 +90,10 @@ - include_vars: ../../../../roles/openshift_master/vars/main.yml - - name: Update systemd units + - name: Update journald config + include: ../../../../roles/openshift_master/tasks/journald.yml + + - name: Remove any legacy systemd units and update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml - name: Check for ca-bundle.crt @@ -140,16 +148,21 @@ - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined - - set_fact: - master_update_complete: True - -- name: Post master upgrade - Upgrade job storage - hosts: oo_first_master - tasks: - - name: Upgrade job storage + - name: Post master upgrade - Upgrade clusterpolicies storage command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm + migrate storage --include=clusterpolicies --confirm + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool + run_once: true + delegate_to: "{{ groups.oo_first_master.0 }}" + + - set_fact: + master_update_complete: True ############################################################################## # Gate on master update complete @@ -164,7 +177,7 @@ | oo_select_keys(groups.oo_masters_to_config) | oo_collect('inventory_hostname', {'master_update_complete': true}) }}" - set_fact: - master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}" + master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) | list }}" - fail: msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}" when: master_update_failed | length > 0 @@ -178,18 +191,18 @@ roles: - { role: openshift_cli } vars: - origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" - ent_reconcile_bindings: true openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe # restart. skip_docker_role: True + __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: - name: Reconcile Cluster Roles command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-roles --additive-only=true --confirm -o name register: reconcile_cluster_role_result + when: openshift_version | version_compare('3.7','<') changed_when: - reconcile_cluster_role_result.stdout != '' - reconcile_cluster_role_result.rc == 0 @@ -204,7 +217,7 @@ --exclude-groups=system:unauthenticated --exclude-users=system:anonymous --additive-only=true --confirm -o name - when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + when: openshift_version | version_compare('3.7','<') register: reconcile_bindings_result changed_when: - reconcile_bindings_result.stdout != '' @@ -219,17 +232,74 @@ changed_when: - reconcile_jenkins_role_binding_result.stdout != '' - reconcile_jenkins_role_binding_result.rc == 0 - when: openshift.common.version_gte_3_4_or_1_4 | bool + when: + - openshift_version | version_compare('3.7','<') + - openshift_version | version_compare('3.4','>=') + + - when: openshift_upgrade_target | version_compare('3.7','<') + block: + - name: Retrieve shared-resource-viewer + oc_obj: + state: list + kind: role + name: "shared-resource-viewer" + namespace: "openshift" + register: objout + + - name: Determine if shared-resource-viewer is protected + set_fact: + __shared_resource_viewer_protected: true + when: + - "'results' in objout" + - "'results' in objout['results']" + - "'annotations' in objout['results']['results'][0]['metadata']" + - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']" + - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'" + - copy: + src: "{{ item }}" + dest: "/tmp/{{ item }}" + with_items: + - "{{ __master_shared_resource_viewer_file }}" + when: __shared_resource_viewer_protected is not defined + + - name: Fixup shared-resource-viewer role + oc_obj: + state: present + kind: role + name: "shared-resource-viewer" + namespace: "openshift" + files: + - "/tmp/{{ __master_shared_resource_viewer_file }}" + delete_after: true + when: __shared_resource_viewer_protected is not defined + register: result + retries: 3 + delay: 5 + until: result.rc == 0 + ignore_errors: true + - name: Reconcile Security Context Constraints command: > - {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true -o name + {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name register: reconcile_scc_result changed_when: - reconcile_scc_result.stdout != '' - reconcile_scc_result.rc == 0 run_once: true + - name: Migrate storage post policy reconciliation + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + run_once: true + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false) | bool + - set_fact: reconcile_complete: True @@ -246,7 +316,7 @@ | oo_select_keys(groups.oo_masters_to_config) | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}" - set_fact: - reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}" + reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) | list }}" - fail: msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}" when: reconcile_failed | length > 0 @@ -258,7 +328,7 @@ roles: - openshift_facts tasks: - - include: docker/upgrade.yml + - include: docker/tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - name: Drain and upgrade master nodes @@ -288,15 +358,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_control_plane_drain_result + until: not l_upgrade_control_plane_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker - - openshift_node_upgrade - openshift_node_dnsmasq + - openshift_node_upgrade post_tasks: - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 35a50cf4e..c93a5d89c 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -26,15 +26,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker - - openshift_node_upgrade - openshift_node_dnsmasq + - openshift_node_upgrade - role: openshift_excluder r_openshift_excluder_action: enable r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 83d2cec81..8558bf3e9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -74,18 +74,21 @@ - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" + when: + - openshift_master_scheduler_current_predicates != default_predicates_no_region + - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] when: openshift_master_scheduler_predicates | default(none) is none @@ -131,18 +134,21 @@ - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" + when: + - openshift_master_scheduler_current_priorities != default_priorities_no_zone + - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] when: openshift_master_scheduler_priorities | default(none) is none @@ -162,5 +168,6 @@ content: "{{ scheduler_config | to_nice_json }}" dest: "{{ openshift_master_scheduler_conf }}" backup: true - when: "{{ openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined }}" + when: > + openshift_upgrade_scheduler_predicates is defined or + openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index d69472fad..5e7a66171 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -41,12 +41,12 @@ - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.servicesServingCert.signer.certFile' + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' yaml_value: service-signer.crt - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile' + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' yaml_value: service-signer.key - modify_yaml: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml index f1245aa2e..a241ef039 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -39,8 +39,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -88,7 +89,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index b693ab55c..54c85f0fb 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -47,8 +47,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -92,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index 4fd029107..cee4e9087 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -40,8 +40,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -89,7 +90,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml index ed89dbe8d..52458e03c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -1,16 +1,10 @@ --- - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml index 965e39482..ae217ba2e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -39,8 +39,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -88,7 +89,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index 7830f462c..d7cb38d03 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -47,8 +47,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -92,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index 4364ff8e3..8531e6045 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -40,8 +40,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -89,7 +90,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml index ed89dbe8d..52458e03c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml @@ -1,16 +1,10 @@ --- - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index 4e7c14e94..bda245fe1 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -39,13 +39,18 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: - pre_upgrade +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -70,10 +75,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade @@ -88,7 +89,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade @@ -111,6 +112,8 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_5/master_config_upgrade.yml" - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 45b664d06..6cdea7b84 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -47,13 +47,18 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: - pre_upgrade +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -74,10 +79,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade @@ -92,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index 036d3fcf5..e29d0f8e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -40,8 +40,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: @@ -89,7 +90,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml index ed89dbe8d..db0c8f886 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml @@ -1,16 +1,15 @@ --- - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginConfig' - yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "'admission_plugin_config' in openshift.master" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'admissionConfig.pluginOrderOverride' - yaml_value: + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key - modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'kubernetesMasterConfig.admissionConfig' - yaml_value: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 5b9ac9e8f..dd109cfa9 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -39,13 +39,22 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -70,10 +79,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade @@ -88,7 +93,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade @@ -111,6 +116,8 @@ - include: ../cleanup_unused_images.yml - include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" - include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index a470c7595..8ab68002d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -47,13 +47,22 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + - include: ../disable_master_excluders.yml tags: - pre_upgrade @@ -74,10 +83,6 @@ # docker is configured and running. skip_docker_role: True -- include: ../pre/verify_control_plane_running.yml - tags: - - pre_upgrade - - include: ../../../openshift-master/validate_restart.yml tags: - pre_upgrade @@ -92,7 +97,7 @@ - name: Verify docker upgrade targets hosts: oo_masters_to_config:oo_etcd_to_config tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 25eceaf90..ba6fcc3f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -40,13 +40,18 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool - include: ../pre/verify_inventory_vars.yml tags: - pre_upgrade +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + - include: ../disable_node_excluders.yml tags: - pre_upgrade @@ -89,7 +94,7 @@ - name: Verify docker upgrade targets hosts: oo_nodes_to_upgrade tasks: - - include: ../pre/verify_docker_upgrade_targets.yml + - include: ../pre/tasks/verify_docker_upgrade_targets.yml tags: - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml new file mode 100644 index 000000000..1d4d1919c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml @@ -0,0 +1,20 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.election.lockName' + yaml_value: 'openshift-master-controllers' + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' + yaml_value: service-signer.crt + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' + yaml_value: service-signer.key + +- modify_yaml: + dest: "{{ openshift.common.config_base }}/master/master-config.yaml" + yaml_key: servingInfo.clientCA + yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml new file mode 100644 index 000000000..f4862e321 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -0,0 +1,142 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_masters_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml new file mode 100644 index 000000000..d5a8379d7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -0,0 +1,144 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- include: ../pre/verify_etcd3_backend.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +# All controllers must be stopped at the same time then restarted +- name: Cycle all controller services to force new leader election mode + hosts: oo_etcd_to_config + gather_facts: no + tasks: + - name: Stop {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: stopped + - name: Start {{ openshift.common.service_type }}-master-controllers + systemd: + name: "{{ openshift.common.service_type }}-master-controllers" + state: started + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml new file mode 100644 index 000000000..bc080f9a3 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -0,0 +1,115 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml new file mode 100644 index 000000000..8e4f99c91 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -0,0 +1,23 @@ +--- +############################################################################### +# Pre upgrade checks for known data problems, if this playbook fails you should +# contact support. If you're not supported contact users@lists.openshift.com +############################################################################### +- name: Verify 3.7 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: + + - name: Confirm OpenShift authorization objects are in sync + command: > + {{ openshift.common.client_binary }} adm migrate authorization + when: openshift_version | version_compare('3.7','<') + changed_when: false + register: l_oc_result + until: l_oc_result.rc == 0 + retries: 4 + delay: 15 diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 33fc5630f..be2e6a15a 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,17 +1,22 @@ --- -- name: Gather and set facts for node hosts +- name: Validate node hostnames hosts: oo_nodes_to_config - roles: - - openshift_facts tasks: - - shell: + - name: Query DNS for IP address of {{ openshift.common.hostname }} + shell: getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' register: lookupip changed_when: false failed_when: false - name: Warn user about bad openshift_hostname values pause: - prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort." + prompt: + The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} + doesn't resolve to an IP address owned by this host. Please set + openshift_hostname variable to a hostname that when resolved on the host + in question resolves to an IP address matching an interface on this + host. This host will fail liveness checks for pods utilizing hostPorts, + press ENTER to continue or CTRL-C to abort. seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" when: - lookupip.stdout != '127.0.0.1' |