diff options
Diffstat (limited to 'playbooks')
188 files changed, 2158 insertions, 789 deletions
diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml deleted file mode 100644 index 69b2541bb..000000000 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- hosts: masters[0] - roles: - - role: openshift_logging - openshift_hosted_logging_cleanup: no - -- name: Update master-config for publicLoggingURL - hosts: masters:!masters[0] - pre_tasks: - - set_fact: - openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" - tasks: - - include_role: - name: openshift_logging - tasks_from: update_master_config - when: openshift_hosted_logging_deploy | default(false) | bool diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index d203b9cda..cf811ca84 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -198,3 +198,21 @@ At this point your cluster should be ready for workloads. Proceed to deploy app ### Still to come There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities. + +## Uninstall / Deprovisioning + +To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. + +``` +ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml +``` + +This should result in removal of the security groups and VPC that were created. + +Cleaning up the S3 bucket contents can be accomplished with: + +``` +ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml +``` + +NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file. diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml index e7bed4f6e..46c453333 100755 --- a/playbooks/aws/openshift-cluster/accept.yml +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -1,8 +1,7 @@ #!/usr/bin/ansible-playbook --- -- name: Setup the vpc and the master node group +- name: Accept nodes hosts: localhost - remote_user: root gather_facts: no tasks: - name: Alert user to variables needed - clusterid @@ -17,37 +16,7 @@ import_role: name: lib_openshift - - name: fetch masters - ec2_instance_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" - "tag:host-type": master - instance-state-name: running - register: mastersout - retries: 20 - delay: 3 - until: "'instances' in mastersout and mastersout.instances|length > 0" - - - name: fetch new node instances - ec2_instance_facts: - region: "{{ openshift_aws_region | default('us-east-1') }}" - filters: - "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}" - "tag:host-type": node - instance-state-name: running - register: instancesout - retries: 20 - delay: 3 - until: "'instances' in instancesout and instancesout.instances|length > 0" - - - debug: - msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" - - - name: approve nodes - oc_adm_csr: - #approve_all: True - nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}" - timeout: 60 - register: nodeout - delegate_to: "{{ mastersout.instances[0].public_ip_address }}" + - name: accept nodes + import_role: + name: openshift_aws + tasks_from: accept_nodes.yml diff --git a/playbooks/aws/openshift-cluster/hosted.yml b/playbooks/aws/openshift-cluster/hosted.yml deleted file mode 100644 index 9d9ed29de..000000000 --- a/playbooks/aws/openshift-cluster/hosted.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- import_playbook: ../../openshift-hosted/private/config.yml - -- import_playbook: ../../openshift-metrics/private/config.yml - when: openshift_metrics_install_metrics | default(false) | bool - -- import_playbook: ../../openshift-logging/private/config.yml - when: openshift_logging_install_logging | default(false) | bool - -- import_playbook: ../../openshift-prometheus/private/config.yml - when: openshift_hosted_prometheus_deploy | default(false) | bool - -- import_playbook: ../../openshift-service-catalog/private/config.yml - when: openshift_enable_service_catalog | default(false) | bool - -- import_playbook: ../../openshift-management/private/config.yml - when: openshift_management_install_management | default(false) | bool - -- name: Print deprecated variable warning message if necessary - hosts: oo_first_master - gather_facts: no - tasks: - - debug: msg="{{__deprecation_message}}" - when: - - __deprecation_message | default ('') | length > 0 diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml index b03fb0b7f..938e83f5e 100644 --- a/playbooks/aws/openshift-cluster/install.yml +++ b/playbooks/aws/openshift-cluster/install.yml @@ -2,7 +2,7 @@ - name: Setup the master node group hosts: localhost tasks: - - include_role: + - import_role: name: openshift_aws tasks_from: setup_master_group.yml @@ -11,36 +11,15 @@ gather_facts: no remote_user: root tasks: - - include_role: + - import_role: name: openshift_aws tasks_from: master_facts.yml - name: run the init import_playbook: ../../init/main.yml -- name: perform the installer openshift-checks - import_playbook: ../../openshift-checks/private/install.yml +- name: configure the control plane + import_playbook: ../../common/private/control_plane.yml -- name: etcd install - import_playbook: ../../openshift-etcd/private/config.yml - -- name: include nfs - import_playbook: ../../openshift-nfs/private/config.yml - when: groups.oo_nfs_to_config | default([]) | count > 0 - -- name: include loadbalancer - import_playbook: ../../openshift-loadbalancer/private/config.yml - when: groups.oo_lb_to_config | default([]) | count > 0 - -- name: include openshift-master config - import_playbook: ../../openshift-master/private/config.yml - -- name: include master additional config - import_playbook: ../../openshift-master/private/additional_config.yml - -- name: include master additional config +- name: ensure the masters are configured as nodes import_playbook: ../../openshift-node/private/config.yml - -- name: include openshift-glusterfs - import_playbook: ../../openshift-glusterfs/private/config.yml - when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml index 4b5bd22ea..d538b862d 100644 --- a/playbooks/aws/openshift-cluster/provision.yml +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -1,8 +1,7 @@ --- -- name: Setup the elb and the master node group +- name: Alert user to variables needed hosts: localhost tasks: - - name: Alert user to variables needed - clusterid debug: msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" @@ -11,7 +10,14 @@ debug: msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" +- import_playbook: provision_s3.yml + +- import_playbook: provision_elb.yml + +- name: Create the master node group + hosts: localhost + tasks: - name: provision cluster - include_role: + import_role: name: openshift_aws tasks_from: provision.yml diff --git a/playbooks/aws/openshift-cluster/provision_elb.yml b/playbooks/aws/openshift-cluster/provision_elb.yml new file mode 100644 index 000000000..9f27dca3b --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_elb.yml @@ -0,0 +1,9 @@ +--- +- name: Create elb + hosts: localhost + connection: local + tasks: + - name: provision elb + include_role: + name: openshift_aws + tasks_from: provision_elb.yml diff --git a/playbooks/aws/openshift-cluster/provision_install.yml b/playbooks/aws/openshift-cluster/provision_install.yml index f98f5be9a..bd154fa83 100644 --- a/playbooks/aws/openshift-cluster/provision_install.yml +++ b/playbooks/aws/openshift-cluster/provision_install.yml @@ -15,5 +15,5 @@ - name: Include the accept.yml playbook to accept nodes into the cluster import_playbook: accept.yml -- name: Include the hosted.yml playbook to finish the hosted configuration - import_playbook: hosted.yml +- name: Include the components playbook to finish the hosted configuration + import_playbook: ../../common/private/components.yml diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml index 6e843453c..6c7c1f069 100644 --- a/playbooks/aws/openshift-cluster/provision_instance.yml +++ b/playbooks/aws/openshift-cluster/provision_instance.yml @@ -7,6 +7,6 @@ gather_facts: no tasks: - name: create an instance and prepare for ami - include_role: + import_role: name: openshift_aws tasks_from: provision_instance.yml diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml index 44c686e08..82f147865 100644 --- a/playbooks/aws/openshift-cluster/provision_nodes.yml +++ b/playbooks/aws/openshift-cluster/provision_nodes.yml @@ -13,6 +13,6 @@ msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" - name: create the node groups - include_role: + import_role: name: openshift_aws tasks_from: provision_nodes.yml diff --git a/playbooks/aws/openshift-cluster/provision_s3.yml b/playbooks/aws/openshift-cluster/provision_s3.yml new file mode 100644 index 000000000..45b439083 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_s3.yml @@ -0,0 +1,10 @@ +--- +- name: Create s3 bucket + hosts: localhost + connection: local + tasks: + - name: create s3 bucket + include_role: + name: openshift_aws + tasks_from: s3.yml + when: openshift_aws_create_s3 | default(true) | bool diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml index 7d74a691a..a0d4ec728 100644 --- a/playbooks/aws/openshift-cluster/provision_sec_group.yml +++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml @@ -7,7 +7,7 @@ gather_facts: no tasks: - name: create security groups - include_role: + import_role: name: openshift_aws tasks_from: security_group.yml when: openshift_aws_create_security_groups | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml index 3ec683958..d86ff9f9b 100644 --- a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml +++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml @@ -4,7 +4,7 @@ gather_facts: no tasks: - name: create an instance and prepare for ami - include_role: + import_role: name: openshift_aws tasks_from: ssh_keys.yml vars: diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml index 0a23a6d32..cf72f6c87 100644 --- a/playbooks/aws/openshift-cluster/provision_vpc.yml +++ b/playbooks/aws/openshift-cluster/provision_vpc.yml @@ -4,7 +4,7 @@ gather_facts: no tasks: - name: create a vpc - include_role: + import_role: name: openshift_aws tasks_from: vpc.yml when: openshift_aws_create_vpc | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml index 8239a64fb..f315db604 100644 --- a/playbooks/aws/openshift-cluster/seal_ami.yml +++ b/playbooks/aws/openshift-cluster/seal_ami.yml @@ -7,6 +7,6 @@ become: no tasks: - name: seal the ami - include_role: + import_role: name: openshift_aws tasks_from: seal_ami.yml diff --git a/playbooks/aws/openshift-cluster/uninstall_elb.yml b/playbooks/aws/openshift-cluster/uninstall_elb.yml new file mode 100644 index 000000000..c1b724f0c --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_elb.yml @@ -0,0 +1,9 @@ +--- +- name: Delete elb + hosts: localhost + connection: local + tasks: + - name: deprovision elb + include_role: + name: openshift_aws + tasks_from: uninstall_elb.yml diff --git a/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml new file mode 100644 index 000000000..180c2281a --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_prerequisites.yml @@ -0,0 +1,6 @@ +--- +- import_playbook: uninstall_sec_group.yml + +- import_playbook: uninstall_vpc.yml + +- import_playbook: uninstall_ssh_keypair.yml diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml new file mode 100644 index 000000000..448b47aee --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml @@ -0,0 +1,10 @@ +--- +- name: Empty/delete s3 bucket + hosts: localhost + connection: local + tasks: + - name: empty/delete s3 bucket + include_role: + name: openshift_aws + tasks_from: uninstall_s3.yml + when: openshift_aws_create_s3 | default(true) | bool diff --git a/playbooks/aws/openshift-cluster/uninstall_sec_group.yml b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml new file mode 100644 index 000000000..642e5b169 --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_sec_group.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: delete security groups + include_role: + name: openshift_aws + tasks_from: uninstall_security_group.yml + when: openshift_aws_create_security_groups | default(True) | bool diff --git a/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml new file mode 100644 index 000000000..ec9caa51b --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_ssh_keypair.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: remove ssh keypair(s) + include_role: + name: openshift_aws + tasks_from: uninstall_ssh_keys.yml + when: openshift_aws_users | default([]) | length > 0 diff --git a/playbooks/aws/openshift-cluster/uninstall_vpc.yml b/playbooks/aws/openshift-cluster/uninstall_vpc.yml new file mode 100644 index 000000000..4c988bcc5 --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_vpc.yml @@ -0,0 +1,10 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: delete vpc + include_role: + name: openshift_aws + tasks_from: uninstall_vpc.yml + when: openshift_aws_create_vpc | default(True) | bool diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example index 1491fb868..78484fdbd 100644 --- a/playbooks/aws/provisioning_vars.yml.example +++ b/playbooks/aws/provisioning_vars.yml.example @@ -21,6 +21,12 @@ openshift_release: # v3.7 # This will be dependent on the version provided by the yum repository openshift_pkg_version: # -3.7.0 +# OpenShift api port +# Fulfills a chicken/egg scenario with how Ansible treats host inventory file +# and extra_vars. This is used for SecurityGroups, ELB Listeners as well as +# an override to installer inventory openshift_master_api_port key +# openshift_master_api_port: 8443 + # specify a clusterid # This value is also used as the default value for many other components. #openshift_aws_clusterid: default @@ -41,12 +47,28 @@ openshift_pkg_version: # -3.7.0 # a vpc, set this to false. #openshift_aws_create_vpc: true +# when openshift_aws_create_vpc is true (the default), the VPC defined in +# openshift_aws_vpc will be created +#openshift_aws_vpc: +# name: "{{ openshift_aws_vpc_name }}" +# cidr: 172.31.0.0/16 +# subnets: +# us-east-1: +# - cidr: 172.31.48.0/20 +# az: "us-east-1c" +# default_az: true +# - cidr: 172.31.32.0/20 +# az: "us-east-1e" +# - cidr: 172.31.16.0/20 +# az: "us-east-1a" + # Name of the vpc. Needs to be set if using a pre-existing vpc. #openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}" # Name of the subnet in the vpc to use. Needs to be set if using a pre-existing -# vpc + subnet. -#openshift_aws_subnet_name: +# vpc + subnet. Otherwise will use the subnet with 'default_az' set (see above +# example VPC structure) +#openshift_aws_subnet_az: # -------------- # # Security Group # @@ -93,6 +115,11 @@ openshift_aws_ssh_key_name: # myuser_key # --------- # # Variables in this section apply to building a node AMI for use in your # openshift cluster. +# openshift-ansible will perform the container runtime storage setup when specified +# The current storage setup with require a drive if using a separate storage device +# for the container runtime. +container_runtime_docker_storage_type: overlay2 +container_runtime_docker_storage_setup_device: /dev/xvdb # must specify a base_ami when building an AMI openshift_aws_base_ami: # ami-12345678 diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md new file mode 100644 index 000000000..7ede3a28c --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/README.md @@ -0,0 +1,20 @@ +# v3.10 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml new file mode 100644 index 000000000..977b4f381 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade.yml @@ -0,0 +1,5 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml new file mode 100644 index 000000000..8b76bf4ff --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml @@ -0,0 +1,16 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml + +- import_playbook: ../../../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml new file mode 100644 index 000000000..b4353edc2 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml @@ -0,0 +1,7 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 869e185af..c8f397186 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -12,3 +12,5 @@ # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # - import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml + +- import_playbook: ../../../../openshift-master/private/restart.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml index 23a3fcbb5..23a3fcbb5 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index dc9d0a139..f70f05bac 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -6,7 +6,7 @@ roles: - role: rhel_subscribe when: - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - ansible_distribution == "RedHat" - rhsub_user is defined - rhsub_pass is defined diff --git a/playbooks/cluster-operator/aws/components.yml b/playbooks/cluster-operator/aws/components.yml new file mode 100644 index 000000000..8587aac45 --- /dev/null +++ b/playbooks/cluster-operator/aws/components.yml @@ -0,0 +1,24 @@ +--- +- name: Alert user to variables needed + hosts: localhost + tasks: + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + +- name: Setup the master node group + hosts: localhost + tasks: + - import_role: + name: openshift_aws + tasks_from: setup_master_group.yml + +- name: run the init + import_playbook: ../../init/main.yml + +- name: Include the components playbook to finish the hosted configuration + import_playbook: ../../common/private/components.yml diff --git a/playbooks/cluster-operator/aws/infrastructure.yml b/playbooks/cluster-operator/aws/infrastructure.yml new file mode 100644 index 000000000..9669820fb --- /dev/null +++ b/playbooks/cluster-operator/aws/infrastructure.yml @@ -0,0 +1,21 @@ +--- +- name: Alert user to variables needed + hosts: localhost + tasks: + - name: Alert user to variables needed - clusterid + debug: + msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}" + + - name: Alert user to variables needed - region + debug: + msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}" + +- import_playbook: ../../aws/openshift-cluster/provision_vpc.yml + +- import_playbook: ../../aws/openshift-cluster/provision_ssh_keypair.yml + +- import_playbook: ../../aws/openshift-cluster/provision_sec_group.yml + +- import_playbook: ../../aws/openshift-cluster/provision_s3.yml + +- import_playbook: ../../aws/openshift-cluster/provision_elb.yml diff --git a/playbooks/cluster-operator/aws/roles b/playbooks/cluster-operator/aws/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/cluster-operator/aws/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 372a39e74..6d82fa928 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -2,7 +2,6 @@ - name: Create local temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -11,8 +10,15 @@ changed_when: false when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) + - name: Chmod local temp directory + local_action: command chmod 777 "{{ local_cert_sync_tmpdir.stdout }}" + changed_when: false + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) + - name: Create service signer certificate hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Create remote temp directory for creating certs command: mktemp -d /tmp/openshift-ansible-XXXXXXX @@ -65,7 +71,6 @@ - name: Delete local temp directory hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Delete local temp directory diff --git a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 5b8746f2a..8392e21ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -12,14 +12,11 @@ roles: - openshift_facts tasks: - - set_fact: - repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - - fail: msg: Cannot upgrade Docker on Atomic operating systems. when: openshift_is_atomic | bool - - include_role: + - import_role: name: container_runtime tasks_from: docker_upgrade_check.yml when: docker_upgrade is not defined or docker_upgrade | bool @@ -54,13 +51,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift_client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade register: l_docker_upgrade_drain_result until: not (l_docker_upgrade_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_docker_upgrade_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 - include_tasks: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index 8ee83819e..a9a35b028 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -5,7 +5,9 @@ g_new_master_hosts: [] g_new_node_hosts: [] -- import_playbook: ../../../init/facts.yml +- import_playbook: ../../../init/basic_facts.yml +- import_playbook: ../../../init/base_packages.yml +- import_playbook: ../../../init/cluster_facts.yml - name: Ensure firewall is not switched during upgrade hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}" diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index fc1cbf32a..07be0b0d4 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -31,7 +31,7 @@ with_items: " {{ groups['oo_nodes_to_config'] }}" when: - hostvars[item].openshift is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + - hostvars[item].openshift.common.hostname | lower in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 1b57521df..edc5aa9c5 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -1,7 +1,15 @@ --- -############################################################################### -# Post upgrade - Upgrade default router, default registry and examples -############################################################################### +#################################################################################### +# Post upgrade - Upgrade web console, default router, default registry, and examples +#################################################################################### +- name: Upgrade web console + hosts: oo_first_master + roles: + - role: openshift_web_console + when: + - openshift_web_console_install | default(true) | bool + - openshift_upgrade_target is version_compare('3.9','>=') + - name: Upgrade default router and default registry hosts: oo_first_master vars: @@ -105,6 +113,29 @@ registry_url: "{{ openshift.master.registry_url }}" openshift_hosted_templates_import_command: replace + post_tasks: + # Do not perform these tasks when the registry is insecure. The default registry is insecure in openshift_hosted/defaults/main.yml + - when: not (openshift_docker_hosted_registry_insecure | default(True)) + block: + # we need to migrate customers to the new pattern of pushing to the registry via dns + # Step 1: verify the certificates have the docker registry service name + - name: shell command to determine if the docker-registry.default.svc is found in the registry certificate + shell: > + echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000 | openssl x509 -text | grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' + register: cert_output + changed_when: false + failed_when: + - cert_output.rc not in [0, 1] + + # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs + - name: set a fact to include the registry certs playbook if needed + set_fact: + openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0 }}" + +# Run the redeploy certs based upon the certificates. Defaults to False for insecure registries +- when: (hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry | default(False)) | bool + import_playbook: ../../../openshift-hosted/private/redeploy-registry-certificates.yml + # Check for warnings to be printed at the end of the upgrade: - name: Clean up and display warnings hosts: oo_masters_to_config @@ -134,3 +165,10 @@ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213" when: - __shared_resource_viewer_protected | default(false) + +- name: Upgrade Service Catalog + hosts: oo_first_master + roles: + - role: openshift_service_catalog + when: + - openshift_enable_service_catalog | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index de74c8ab8..44af37b2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -1,10 +1,10 @@ --- +# for control-plane upgrade, several variables may be passed in to this play +# why may affect the tasks here and in imported playbooks. # Pre-upgrade - import_playbook: ../initialize_nodes_to_upgrade.yml -- import_playbook: verify_cluster.yml - - name: Update repos on upgrade hosts hosts: "{{ l_upgrade_repo_hosts }}" roles: @@ -47,7 +47,11 @@ # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if # defined, and overriding the normal behavior of protecting the installed version openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False + # openshift_protect_installed_version is passed n via upgrade_control_plane.yml + # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml + # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml + +- import_playbook: verify_cluster.yml # If we're only upgrading nodes, we need to ensure masters are already upgraded - name: Verify masters are already upgraded @@ -56,7 +60,7 @@ - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." when: - l_upgrade_nodes_only | default(False) | bool - - openshift.common.version != openshift_version + - not openshift.common.version | match(openshift_version) # If we're only upgrading nodes, skip this. - import_playbook: ../../../../openshift-master/private/validate_restart.yml @@ -72,6 +76,6 @@ - name: Verify docker upgrade targets hosts: "{{ l_upgrade_docker_target_hosts }}" tasks: - - include_role: + - import_role: name: container_runtime tasks_from: docker_upgrade_check.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 4713f8633..4902b9ecd 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -5,11 +5,6 @@ hosts: oo_first_master gather_facts: no tasks: - - fail: - msg: > - This upgrade is only supported for origin and openshift-enterprise - deployment types - when: deployment_type not in ['origin','openshift-enterprise'] # Error out in situations where the user has older versions specified in their # inventory in any of the openshift_release, openshift_image_tag, and @@ -22,6 +17,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_pkg_version is defined + - openshift_pkg_version != "" - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<') - fail: @@ -30,6 +26,7 @@ valid version for a {{ openshift_upgrade_target }} upgrade when: - openshift_image_tag is defined + - openshift_image_tag != "" - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<') - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index 95c37c38c..45ddf7eea 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -5,7 +5,7 @@ when: openshift.common.version is not defined - name: Update oreg_auth docker login credentials if necessary - include_role: + import_role: name: container_runtime tasks_from: registry_auth.yml when: oreg_auth_user is defined @@ -21,7 +21,7 @@ block: - name: Check latest available OpenShift RPM version repoquery: - name: "{{ openshift_service_type }}" + name: "{{ openshift_service_type }}{{ '-' ~ openshift_release ~ '*' if openshift_release is defined else '' }}" ignore_excluders: true register: repoquery_out @@ -49,5 +49,5 @@ fail: msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" when: - - deployment_type == 'origin' + - openshift_deployment_type == 'origin' - openshift.common.version is version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 0263e721d..3c0b72832 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,6 +2,7 @@ ############################################################################### # Upgrade Masters ############################################################################### + - name: Backup and upgrade etcd import_playbook: ../../../openshift-etcd/private/upgrade_main.yml @@ -22,6 +23,8 @@ # See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 - name: Pre master upgrade - Upgrade all storage hosts: oo_first_master + roles: + - openshift_facts tasks: - name: Upgrade all storage command: > @@ -30,7 +33,6 @@ register: l_pb_upgrade_control_plane_pre_upgrade_storage when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool @@ -46,13 +48,10 @@ # support for optional hooks to be defined. - name: Upgrade master hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 + roles: + - openshift_facts tasks: - - include_role: - name: openshift_facts - # Run the pre-upgrade hook if defined: - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined @@ -60,7 +59,7 @@ - include_tasks: "{{ openshift_master_upgrade_pre_hook }}" when: openshift_master_upgrade_pre_hook is defined - - include_role: + - import_role: name: openshift_master tasks_from: upgrade.yml @@ -71,6 +70,12 @@ - include_tasks: "{{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined + - name: Disable master controller + service: + name: "{{ openshift_service_type }}-master-controllers" + enabled: false + when: openshift.common.rolling_restart_mode == 'system' + - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml when: openshift.common.rolling_restart_mode == 'system' @@ -93,7 +98,6 @@ - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - openshift_version is version_compare('3.7','<') failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool run_once: true @@ -108,7 +112,6 @@ - name: Gate on master update hosts: localhost connection: local - become: no tasks: - set_fact: master_update_completed: "{{ hostvars @@ -128,6 +131,7 @@ hosts: oo_masters_to_config roles: - { role: openshift_cli } + - { role: openshift_facts } vars: __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml" tasks: @@ -229,7 +233,6 @@ register: l_pb_upgrade_control_plane_post_upgrade_storage when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool failed_when: - - openshift_upgrade_post_storage_migration_enabled | default(true) | bool - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 - openshift_upgrade_post_storage_migration_fatal | default(false) | bool @@ -242,7 +245,6 @@ - name: Gate on reconcile hosts: localhost connection: local - become: no tasks: - set_fact: reconcile_completed: "{{ hostvars @@ -291,28 +293,31 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_control_plane_drain_result until: not (l_upgrade_control_plane_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_control_plane_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 roles: - openshift_facts post_tasks: - - include_role: + - import_role: + name: openshift_node + tasks_from: upgrade_pre.yml + - import_role: name: openshift_node tasks_from: upgrade.yml + - import_role: + name: openshift_manage_node + tasks_from: config.yml vars: - openshift_node_upgrade_in_progress: True - - name: Set node schedulability - oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" - schedulable: True - delegate_to: "{{ groups.oo_first_master.0 }}" - retries: 10 - delay: 5 - register: node_schedulable - until: node_schedulable is succeeded - when: node_unschedulable is changed + openshift_master_host: "{{ groups.oo_first_master.0 }}" + openshift_manage_node_is_master: true diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index ece69a3d5..915fae9fd 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -4,11 +4,9 @@ roles: - role: openshift_facts tasks: - - include_role: + - import_role: name: openshift_node tasks_from: upgrade_pre.yml - vars: - openshift_node_upgrade_in_progress: True - name: Drain and upgrade nodes hosts: oo_nodes_to_upgrade:!oo_masters_to_config @@ -35,34 +33,33 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }} + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force --delete-local-data --ignore-daemonsets + --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: 60 - delay: 60 + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" + delay: 5 + failed_when: + - l_upgrade_nodes_drain_result is failed + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 post_tasks: - - include_role: + - import_role: name: openshift_node tasks_from: upgrade.yml + - import_role: + name: openshift_manage_node + tasks_from: config.yml vars: - openshift_node_upgrade_in_progress: True - - name: Set node schedulability - oc_adm_manage_node: - node: "{{ openshift.node.nodename | lower }}" - schedulable: True - delegate_to: "{{ groups.oo_first_master.0 }}" - retries: 10 - delay: 5 - register: node_schedulable - until: node_schedulable is succeeded - when: node_unschedulable is changed + openshift_master_host: "{{ groups.oo_first_master.0 }}" - name: Re-enable excluders hosts: oo_nodes_to_upgrade:!oo_masters_to_config tasks: - - include_role: + - import_role: name: openshift_excluder vars: r_openshift_excluder_action: enable diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml index a90082760..e259b5d09 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml @@ -3,7 +3,7 @@ hosts: localhost tasks: - name: build upgrade scale groups - include_role: + import_role: name: openshift_aws tasks_from: upgrade_node_group.yml @@ -50,17 +50,17 @@ delegate_to: "{{ groups.oo_first_master.0 }}" register: l_upgrade_nodes_drain_result until: not (l_upgrade_nodes_drain_result is failed) - retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0 | int }}" + retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}" delay: 5 failed_when: - l_upgrade_nodes_drain_result is failed - - openshift_upgrade_nodes_drain_timeout | default(0) == '0' + - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0 # Alright, let's clean up! - name: clean up the old scale group hosts: localhost tasks: - name: clean up scale group - include_role: + import_role: name: openshift_aws tasks_from: remove_scale_group.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/master_config_upgrade.yml @@ -0,0 +1 @@ +--- diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/roles b/playbooks/common/openshift-cluster/upgrades/v3_10/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml new file mode 100644 index 000000000..ec1da6d39 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- import_playbook: upgrade_control_plane.yml + +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml new file mode 100644 index 000000000..64ee03562 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml @@ -0,0 +1,58 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- import_playbook: ../init.yml + vars: + l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + +- name: Configure the upgrade target for the common upgrade tasks 3.10 + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tasks: + - meta: clear_facts + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. + vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +- import_playbook: ../upgrade_control_plane.yml + vars: + openshift_release: '3.10' + +- import_playbook: ../post_control_plane.yml + +- hosts: oo_masters + tasks: + - import_role: + name: openshift_web_console + tasks_from: remove_old_asset_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml new file mode 100644 index 000000000..eea1b250e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_nodes.yml @@ -0,0 +1,35 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- import_playbook: ../init.yml + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tasks: + - set_fact: + openshift_upgrade_target: '3.10' + openshift_upgrade_min: '3.9' + openshift_release: '3.10' + +- import_playbook: ../pre/config.yml + vars: + l_upgrade_repo_hosts: "oo_nodes_to_config" + l_upgrade_no_proxy_hosts: "oo_all_hosts" + l_upgrade_health_check_hosts: "oo_nodes_to_config" + l_upgrade_verify_targets_hosts: "oo_nodes_to_config" + l_upgrade_docker_target_hosts: "oo_nodes_to_config" + l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config" + l_upgrade_nodes_only: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +# Pre-upgrade completed + +- import_playbook: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml new file mode 100644 index 000000000..d8540abfb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_10/validator.yml @@ -0,0 +1,7 @@ +--- +- name: Verify 3.8 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - debug: msg="noop" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index a5ad3801d..a2d21b69f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -13,7 +13,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: @@ -23,6 +23,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 1498db4c5..9aa5a3b64 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -14,22 +14,28 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml index 6958652d8..4febe76ee 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -15,7 +15,7 @@ tasks: - set_fact: openshift_upgrade_target: '3.6' - openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.5' if openshift_deployment_type == 'origin' else '3.5' }}" - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index 4daa9e490..cc2ec2709 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -23,6 +23,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index 1750148d4..b1ecc75d3 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -14,6 +14,7 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,13 +24,18 @@ openshift_upgrade_min: '3.6' - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml index 49e691352..9c7688981 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -7,6 +7,7 @@ hosts: oo_first_master roles: - { role: lib_openshift } + - { role: openshift_facts } tasks: - name: Check for invalid namespaces and SDN errors diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 0f74e0137..a73b7d63a 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -23,6 +23,7 @@ l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml @@ -35,8 +36,6 @@ # Pre-upgrade completed - import_playbook: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_7/master_config_upgrade.yml" # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 08bfd239f..723b2e533 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -14,6 +14,8 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + when: not skip_version_info | default(false) - name: Configure the upgrade target for the common upgrade tasks hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config @@ -23,13 +25,18 @@ openshift_upgrade_min: '3.7' - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False - import_playbook: validator.yml @@ -42,8 +49,6 @@ # Pre-upgrade completed - import_playbook: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_7/master_config_upgrade.yml" # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml index 1d4d1919c..ed97d539c 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/master_config_upgrade.yml @@ -1,20 +1 @@ --- -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.election.lockName' - yaml_value: 'openshift-master-controllers' - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.certFile' - yaml_value: service-signer.crt - -- modify_yaml: - dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile' - yaml_value: service-signer.key - -- modify_yaml: - dest: "{{ openshift.common.config_base }}/master/master-config.yaml" - yaml_key: servingInfo.clientCA - yaml_value: ca.crt diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index 0aea5069d..ec1da6d39 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -2,54 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- import_playbook: ../init.yml +- import_playbook: upgrade_control_plane.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_upgrade_target: '3.9' - openshift_upgrade_min: '3.7' - -- import_playbook: ../pre/config.yml - vars: - l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" - l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" - l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" - l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" - -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - set_fact: - pre_upgrade_complete: True - -# Pre-upgrade completed - -- import_playbook: ../upgrade_control_plane.yml - vars: - master_config_hook: "v3_7/master_config_upgrade.yml" - -# All controllers must be stopped at the same time then restarted -- name: Cycle all controller services to force new leader election mode - hosts: oo_masters_to_config - gather_facts: no - roles: - - role: openshift_facts - tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started - -- import_playbook: ../upgrade_nodes.yml - -- import_playbook: ../post_control_plane.yml +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index 05aa737c6..9c7677f1b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -14,37 +14,107 @@ - import_playbook: ../init.yml vars: l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_base_packages_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -- name: Configure the upgrade target for the common upgrade tasks +## Check to see if they're running 3.7 and if so upgrade them to 3.8 on control plan +## If they've specified pkg_version or image_tag preserve that for later use +- name: Configure the upgrade target for the common upgrade tasks 3.8 hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config tasks: - set_fact: - openshift_upgrade_target: '3.9' + openshift_upgrade_target: '3.8' openshift_upgrade_min: '3.7' + openshift_release: '3.8' + _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" + openshift_pkg_version: '' + _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}" + l_double_upgrade_cp: True + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') + + - name: set l_force_image_tag_to_version = True + set_fact: + # Need to set this during 3.8 upgrade to ensure image_tag is set correctly + # to match 3.8 version + l_force_image_tag_to_version: True + when: _requested_image_tag is defined - import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_no_proxy_hosts: "oo_masters_to_config" l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" l_upgrade_verify_targets_hosts: "oo_masters_to_config" l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors +- name: Flag pre-upgrade checks complete for hosts without errors 3.8 hosts: oo_masters_to_config:oo_etcd_to_config tasks: - set_fact: pre_upgrade_complete: True + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') # Pre-upgrade completed +- name: Intermediate 3.8 Upgrade + import_playbook: ../upgrade_control_plane.yml + when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') -- import_playbook: ../upgrade_control_plane.yml +## 3.8 upgrade complete we should now be able to upgrade to 3.9 + +- name: Configure the upgrade target for the common upgrade tasks 3.9 + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tasks: + - meta: clear_facts + - set_fact: + openshift_upgrade_target: '3.9' + openshift_upgrade_min: '3.8' + openshift_release: '3.9' + openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}" + # Set the user's specified image_tag for 3.9 upgrade if it was provided. + - set_fact: + openshift_image_tag: "{{ _requested_image_tag }}" + l_force_image_tag_to_version: False + when: _requested_image_tag is defined + # If the user didn't specify an image_tag, we need to force update image_tag + # because it will have already been set during 3.8. If we aren't running + # a double upgrade, then we can preserve image_tag because it will still + # be the user provided value. + - set_fact: + l_force_image_tag_to_version: True + when: + - l_double_upgrade_cp is defined and l_double_upgrade_cp + - _requested_image_tag is not defined + +- import_playbook: ../pre/config.yml + # These vars a meant to exclude oo_nodes from plays that would otherwise include + # them by default. vars: - master_config_hook: "v3_7/master_config_upgrade.yml" + l_openshift_version_set_hosts: "oo_etcd_to_config:oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:!oo_first_master" + l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_no_proxy_hosts: "oo_masters_to_config" + l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_upgrade_verify_targets_hosts: "oo_masters_to_config" + l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config" + l_upgrade_excluder_hosts: "oo_masters_to_config" + openshift_protect_installed_version: False + openshift_version_reinit: True + +- name: Flag pre-upgrade checks complete for hosts without errors + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - set_fact: + pre_upgrade_complete: True + +- import_playbook: ../upgrade_control_plane.yml # All controllers must be stopped at the same time then restarted - name: Cycle all controller services to force new leader election mode @@ -53,13 +123,21 @@ roles: - role: openshift_facts tasks: - - name: Stop {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: stopped - - name: Start {{ openshift.common.service_type }}-master-controllers - systemd: - name: "{{ openshift.common.service_type }}-master-controllers" - state: started + - name: Restart master controllers to force new leader election mode + service: + name: "{{ openshift_service_type }}-master-controllers" + state: restarted + when: openshift.common.rolling_restart_mode == 'services' + - name: Re-enable master controllers to force new leader election mode + service: + name: "{{ openshift_service_type }}-master-controllers" + enabled: true + when: openshift.common.rolling_restart_mode == 'system' - import_playbook: ../post_control_plane.yml + +- hosts: oo_masters + tasks: + - import_role: + name: openshift_web_console + tasks_from: remove_old_asset_config diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml index 1d1b255c1..859b1d88b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml @@ -12,6 +12,7 @@ - set_fact: openshift_upgrade_target: '3.9' openshift_upgrade_min: '3.7' + openshift_release: '3.9' - import_playbook: ../pre/config.yml vars: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml index 4bd2d87b1..d8540abfb 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/validator.yml @@ -1,5 +1,5 @@ --- -- name: Verify 3.9 specific upgrade checks +- name: Verify 3.8 specific upgrade checks hosts: oo_first_master roles: - { role: lib_openshift } diff --git a/playbooks/common/private/components.yml b/playbooks/common/private/components.yml new file mode 100644 index 000000000..739be93c5 --- /dev/null +++ b/playbooks/common/private/components.yml @@ -0,0 +1,40 @@ +--- +# These are the core component plays that configure the layers above the control +# plane. A component is generally considered any part of OpenShift that runs on +# top of the cluster and may be considered optional. Over time, much of OpenShift +# above the Kubernetes apiserver and masters may be considered components. +# +# Preconditions: +# +# 1. The control plane is configured and reachable from nodes inside the cluster +# 2. An admin kubeconfig file in /etc/origin/master/admin.kubeconfig that can +# perform root level actions against the cluster +# 3. On cloud providers, persistent volume provisioners are configured +# 4. A subset of nodes is available to allow components to schedule - this must +# include the masters and usually includes infra nodes. +# 5. The init/main.yml playbook has been invoked + +- import_playbook: ../../openshift-glusterfs/private/config.yml + when: groups.oo_glusterfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-hosted/private/config.yml + +- import_playbook: ../../openshift-web-console/private/config.yml + when: + - openshift_web_console_install | default(true) | bool + - openshift.common.version_gte_3_9 + +- import_playbook: ../../openshift-metrics/private/config.yml + when: openshift_metrics_install_metrics | default(false) | bool + +- import_playbook: ../../openshift-logging/private/config.yml + when: openshift_logging_install_logging | default(false) | bool + +- import_playbook: ../../openshift-prometheus/private/config.yml + when: openshift_hosted_prometheus_deploy | default(false) | bool + +- import_playbook: ../../openshift-service-catalog/private/config.yml + when: openshift_enable_service_catalog | default(true) | bool + +- import_playbook: ../../openshift-management/private/config.yml + when: openshift_management_install_management | default(false) | bool diff --git a/playbooks/common/private/control_plane.yml b/playbooks/common/private/control_plane.yml new file mode 100644 index 000000000..0a5f1142b --- /dev/null +++ b/playbooks/common/private/control_plane.yml @@ -0,0 +1,34 @@ +--- +# These are the control plane plays that configure a control plane on top of hosts +# identified as masters. Over time, some of the pieces of the current control plane +# may be moved to the components list. +# +# It is not required for any nodes to be configured, or passed to be configured, +# when this playbook is invoked. +# +# Preconditions: +# +# 1. A set of machines have been identified to act as masters +# 2. On cloud providers, a load balancer has been configured to point to the masters +# and that load balancer has a DNS name +# 3. The init/main.yml playbook has been invoked +# +# Postconditions: +# +# 1. The control plane is reachable from the outside of the cluster +# 2. The master has an /etc/origin/master/admin.kubeconfig file that gives cluster-admin +# access. + +- import_playbook: ../../openshift-checks/private/install.yml + +- import_playbook: ../../openshift-etcd/private/config.yml + +- import_playbook: ../../openshift-nfs/private/config.yml + when: groups.oo_nfs_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-loadbalancer/private/config.yml + when: groups.oo_lb_to_config | default([]) | count > 0 + +- import_playbook: ../../openshift-master/private/config.yml + +- import_playbook: ../../openshift-master/private/additional_config.yml diff --git a/playbooks/container-runtime/config.yml b/playbooks/container-runtime/config.yml index f15aa771f..d7f3634ec 100644 --- a/playbooks/container-runtime/config.yml +++ b/playbooks/container-runtime/config.yml @@ -1,6 +1,8 @@ --- - import_playbook: ../init/main.yml vars: - skip_verison: True + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" - import_playbook: private/config.yml diff --git a/playbooks/container-runtime/private/build_container_groups.yml b/playbooks/container-runtime/private/build_container_groups.yml new file mode 100644 index 000000000..8fb7b63e8 --- /dev/null +++ b/playbooks/container-runtime/private/build_container_groups.yml @@ -0,0 +1,8 @@ +--- +# l_build_container_groups_hosts is passed in via prerequisites.yml during +# etcd scaleup plays. +- name: create oo_hosts_containerized_managed_true host group + hosts: "{{ l_build_container_groups_hosts | default('oo_all_hosts:!oo_nodes_to_config') }}" + tasks: + - group_by: + key: oo_hosts_containerized_managed_{{ (openshift_is_containerized | default(False)) | ternary('true','false') }} diff --git a/playbooks/container-runtime/private/config.yml b/playbooks/container-runtime/private/config.yml index 67445edeb..d5312de15 100644 --- a/playbooks/container-runtime/private/config.yml +++ b/playbooks/container-runtime/private/config.yml @@ -1,26 +1,35 @@ --- -- hosts: "{{ l_containerized_host_groups }}" +# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. +# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd +# scaleup plays. + +- import_playbook: build_container_groups.yml + +- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_runtime_hosts) }}" vars: - l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" - l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" - # role: container_runtime is necessary here to bring role default variables - # into the play scope. + l_default_container_runtime_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true" roles: - role: container_runtime tasks: - - include_role: + - import_role: + name: openshift_excluder + tasks_from: enable.yml + vars: + r_openshift_excluder_action: enable + r_openshift_excluder_enable_openshift_excluder: false + - import_role: name: container_runtime tasks_from: package_docker.yml when: - not openshift_docker_use_system_container | bool - not openshift_use_crio_only | bool - - include_role: + - import_role: name: container_runtime tasks_from: systemcontainer_docker.yml when: - openshift_docker_use_system_container | bool - not openshift_use_crio_only | bool - - include_role: + - import_role: name: container_runtime tasks_from: systemcontainer_crio.yml when: diff --git a/playbooks/container-runtime/private/setup_storage.yml b/playbooks/container-runtime/private/setup_storage.yml index 97226d6b2..586149b1d 100644 --- a/playbooks/container-runtime/private/setup_storage.yml +++ b/playbooks/container-runtime/private/setup_storage.yml @@ -1,14 +1,21 @@ --- -- hosts: "{{ l_containerized_host_groups }}" +# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. +# l_etcd_scale_up_hosts may be passed in via prerequisites.yml during etcd +# scaleup plays. + +- import_playbook: build_container_groups.yml + +- hosts: "{{ l_etcd_scale_up_hosts | default(l_scale_up_hosts) | default(l_default_container_storage_hosts) }}" vars: - l_chg_temp: "{{ openshift_containerized_host_groups | default([]) }}" + l_default_container_storage_hosts: "oo_nodes_to_config:oo_hosts_containerized_managed_true" + l_chg_temp: "{{ hostvars[groups['oo_first_master'][0]]['openshift_containerized_host_groups'] | default([]) }}" l_containerized_host_groups: "{{ (['oo_nodes_to_config'] | union(l_chg_temp)) | join(':') }}" # role: container_runtime is necessary here to bring role default variables # into the play scope. roles: - role: container_runtime tasks: - - include_role: + - import_role: name: container_runtime tasks_from: docker_storage_setup_overlay.yml when: diff --git a/playbooks/container-runtime/setup_storage.yml b/playbooks/container-runtime/setup_storage.yml index 98e876b2c..17ff11cfd 100644 --- a/playbooks/container-runtime/setup_storage.yml +++ b/playbooks/container-runtime/setup_storage.yml @@ -1,6 +1,8 @@ --- - import_playbook: ../init/main.yml vars: - skip_verison: True + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" - import_playbook: private/setup_storage.yml diff --git a/playbooks/deploy_cluster.yml b/playbooks/deploy_cluster.yml index 0e6bde09a..c8e30ddbc 100644 --- a/playbooks/deploy_cluster.yml +++ b/playbooks/deploy_cluster.yml @@ -1,46 +1,8 @@ --- - import_playbook: init/main.yml -- import_playbook: openshift-checks/private/install.yml - -- import_playbook: openshift-etcd/private/config.yml - -- import_playbook: openshift-nfs/private/config.yml - when: groups.oo_nfs_to_config | default([]) | count > 0 - -- import_playbook: openshift-loadbalancer/private/config.yml - when: groups.oo_lb_to_config | default([]) | count > 0 - -- import_playbook: openshift-master/private/config.yml - -- import_playbook: openshift-master/private/additional_config.yml +- import_playbook: common/private/control_plane.yml - import_playbook: openshift-node/private/config.yml -- import_playbook: openshift-glusterfs/private/config.yml - when: groups.oo_glusterfs_to_config | default([]) | count > 0 - -- import_playbook: openshift-hosted/private/config.yml - -- import_playbook: openshift-metrics/private/config.yml - when: openshift_metrics_install_metrics | default(false) | bool - -- import_playbook: openshift-logging/private/config.yml - when: openshift_logging_install_logging | default(false) | bool - -- import_playbook: openshift-prometheus/private/config.yml - when: openshift_hosted_prometheus_deploy | default(false) | bool - -- import_playbook: openshift-service-catalog/private/config.yml - when: openshift_enable_service_catalog | default(true) | bool - -- import_playbook: openshift-management/private/config.yml - when: openshift_management_install_management | default(false) | bool - -- name: Print deprecated variable warning message if necessary - hosts: oo_first_master - gather_facts: no - tasks: - - debug: msg="{{__deprecation_message}}" - when: - - __deprecation_message | default ('') | length > 0 +- import_playbook: common/private/components.yml diff --git a/playbooks/gcp/openshift-cluster/build_base_image.yml b/playbooks/gcp/openshift-cluster/build_base_image.yml new file mode 100644 index 000000000..8e9b0024a --- /dev/null +++ b/playbooks/gcp/openshift-cluster/build_base_image.yml @@ -0,0 +1,163 @@ +--- +# This playbook ensures that a base image is up to date with all of the required settings +- name: Launch image build instance + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Require openshift_gcp_root_image + fail: + msg: "A root OS image name or family is required for base image building. Please ensure `openshift_gcp_root_image` is defined." + when: openshift_gcp_root_image is undefined + + - name: Create the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + disk_type: pd-ssd + image: "{{ openshift_gcp_root_image }}" + size_gb: 10 + state: present + + - name: Launch the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + machine_type: n1-standard-1 + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: present + tags: + - build-image-instance + disk_auto_delete: false + disks: + - "{{ openshift_gcp_prefix }}build-image-instance" + register: gce + + - add_host: + hostname: "{{ item.public_ip }}" + groupname: build_instance_ips + with_items: "{{ gce.instance_data }}" + + - name: Wait for instance to respond to SSH + wait_for: + delay: 1 + host: "{{ item.public_ip }}" + port: 22 + state: started + timeout: 120 + with_items: "{{ gce.instance_data }}" + +- name: Prepare instance content sources + pre_tasks: + - set_fact: + allow_rhel_subscriptions: "{{ rhsub_skip | default('no', True) | lower in ['no', 'false'] }}" + - set_fact: + using_rhel_subscriptions: "{{ (deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise'] or ansible_distribution == 'RedHat') and allow_rhel_subscriptions }}" + hosts: build_instance_ips + roles: + - role: rhel_subscribe + when: using_rhel_subscriptions + - role: openshift_repos + vars: + openshift_additional_repos: [] + post_tasks: + - name: Add custom repositories + include_role: + name: openshift_gcp + tasks_from: add_custom_repositories.yml + - name: Add the Google Cloud repo + yum_repository: + name: google-cloud + description: Google Cloud Compute + baseurl: https://packages.cloud.google.com/yum/repos/google-cloud-compute-el7-x86_64 + gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + gpgcheck: yes + repo_gpgcheck: yes + state: present + when: ansible_os_family == "RedHat" + - name: Add the jdetiber-qemu-user-static copr repo + yum_repository: + name: jdetiber-qemu-user-static + description: QEMU user static COPR + baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/epel-7-$basearch/ + gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/qemu-user-static/pubkey.gpg + gpgcheck: yes + repo_gpgcheck: no + state: present + when: ansible_os_family == "RedHat" + - name: Accept GPG keys for the repos + command: yum -q makecache -y --disablerepo='*' --enablerepo='google-cloud,jdetiber-qemu-user-static' + - name: Install qemu-user-static + package: + name: qemu-user-static + state: present + - name: Start and enable systemd-binfmt service + systemd: + name: systemd-binfmt + state: started + enabled: yes + +- name: Build image + hosts: build_instance_ips + pre_tasks: + - name: Set up core host GCP configuration + include_role: + name: openshift_gcp + tasks_from: configure_gcp_base_image.yml + roles: + - role: os_update_latest + post_tasks: + - name: Disable all repos on RHEL + command: subscription-manager repos --disable="*" + when: using_rhel_subscriptions + - name: Enable repos for packages on RHEL + command: subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms" + when: using_rhel_subscriptions + - name: Install common image prerequisites + package: name={{ item }} state=latest + with_items: + # required by Ansible + - PyYAML + - google-compute-engine + - google-compute-engine-init + - google-config + - wget + - git + - net-tools + - bind-utils + - iptables-services + - bridge-utils + - bash-completion + - name: Clean yum metadata + command: yum clean all + args: + warn: no + when: ansible_os_family == "RedHat" + +- name: Commit image + hosts: localhost + connection: local + tasks: + - name: Terminate the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent + - name: Save the new image + command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_base_image_name | default(openshift_gcp_base_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_base_image }}" + - name: Remove the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent diff --git a/playbooks/gcp/openshift-cluster/build_image.yml b/playbooks/gcp/openshift-cluster/build_image.yml new file mode 100644 index 000000000..0daf61122 --- /dev/null +++ b/playbooks/gcp/openshift-cluster/build_image.yml @@ -0,0 +1,112 @@ +--- +- name: Verify prerequisites for image build + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Require openshift_gcp_base_image + fail: + msg: "A base image name or family is required for image building. Please ensure `openshift_gcp_base_image` is defined." + when: openshift_gcp_base_image is undefined + +- name: Launch image build instance + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: Set facts + set_fact: + openshift_node_bootstrap: True + openshift_master_unsupported_embedded_etcd: True + + - name: Create the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + disk_type: pd-ssd + image: "{{ openshift_gcp_base_image }}" + size_gb: 10 + state: present + + - name: Launch the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + machine_type: n1-standard-1 + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: present + tags: + - build-image-instance + disk_auto_delete: false + disks: + - "{{ openshift_gcp_prefix }}build-image-instance" + register: gce + + - name: add host to nodes + add_host: + hostname: "{{ item.public_ip }}" + groupname: nodes + with_items: "{{ gce.instance_data }}" + + - name: Wait for instance to respond to SSH + wait_for: + delay: 1 + host: "{{ item.public_ip }}" + port: 22 + state: started + timeout: 120 + with_items: "{{ gce.instance_data }}" + +- name: Wait for full SSH connection + hosts: nodes + gather_facts: no + tasks: + - wait_for_connection: + +- hosts: nodes + tasks: + - name: Set facts + set_fact: + openshift_node_bootstrap: True + +# This is the part that installs all of the software and configs for the instance +# to become a node. +- import_playbook: ../../openshift-node/private/image_prep.yml + +# Add additional GCP specific behavior +- hosts: nodes + tasks: + - include_role: + name: openshift_gcp + tasks_from: node_cloud_config.yml + - include_role: + name: openshift_gcp + tasks_from: frequent_log_rotation.yml + +- name: Commit image + hosts: localhost + connection: local + tasks: + - name: Terminate the image build instance + gce: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + instance_names: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent + - name: Save the new image + command: gcloud --project "{{ openshift_gcp_project}}" compute images create "{{ openshift_gcp_image_name | default(openshift_gcp_image + '-' + lookup('pipe','date +%Y%m%d-%H%M%S')) }}" --source-disk "{{ openshift_gcp_prefix }}build-image-instance" --source-disk-zone "{{ openshift_gcp_zone }}" --family "{{ openshift_gcp_image }}" + - name: Remove the image instance disk + gce_pd: + service_account_email: "{{ (lookup('file', openshift_gcp_iam_service_account_keyfile ) | from_json ).client_email }}" + credentials_file: "{{ openshift_gcp_iam_service_account_keyfile }}" + project_id: "{{ openshift_gcp_project }}" + zone: "{{ openshift_gcp_zone }}" + name: "{{ openshift_gcp_prefix }}build-image-instance" + state: absent diff --git a/playbooks/gcp/openshift-cluster/deprovision.yml b/playbooks/gcp/openshift-cluster/deprovision.yml new file mode 100644 index 000000000..589fddd2f --- /dev/null +++ b/playbooks/gcp/openshift-cluster/deprovision.yml @@ -0,0 +1,10 @@ +# This playbook terminates a running cluster +--- +- name: Terminate running cluster and remove all supporting resources in GCE + hosts: localhost + connection: local + tasks: + - include_role: + name: openshift_gcp + vars: + state: absent diff --git a/playbooks/gcp/openshift-cluster/install.yml b/playbooks/gcp/openshift-cluster/install.yml new file mode 100644 index 000000000..fb35b4348 --- /dev/null +++ b/playbooks/gcp/openshift-cluster/install.yml @@ -0,0 +1,33 @@ +# This playbook installs onto a provisioned cluster +--- +- hosts: localhost + connection: local + tasks: + - name: place all scale groups into Ansible groups + include_role: + name: openshift_gcp + tasks_from: setup_scale_group_facts.yml + +- name: run the init + import_playbook: ../../init/main.yml + +- name: configure the control plane + import_playbook: ../../common/private/control_plane.yml + +- name: ensure the masters are configured as nodes + import_playbook: ../../openshift-node/private/config.yml + +- name: run the GCP specific post steps + import_playbook: install_gcp.yml + +- name: install components + import_playbook: ../../common/private/components.yml + +- hosts: primary_master + gather_facts: no + tasks: + - name: Retrieve cluster configuration + fetch: + src: "{{ openshift.common.config_base }}/master/admin.kubeconfig" + dest: "/tmp/" + flat: yes diff --git a/playbooks/gcp/openshift-cluster/install_gcp.yml b/playbooks/gcp/openshift-cluster/install_gcp.yml new file mode 100644 index 000000000..09db78971 --- /dev/null +++ b/playbooks/gcp/openshift-cluster/install_gcp.yml @@ -0,0 +1,21 @@ +--- +- hosts: masters + gather_facts: no + tasks: + - name: create master health check service + include_role: + name: openshift_gcp + tasks_from: configure_master_healthcheck.yml + - name: configure node bootstrapping + include_role: + name: openshift_gcp + tasks_from: configure_master_bootstrap.yml + when: + - openshift_master_bootstrap_enabled | default(False) + - name: configure node bootstrap autoapprover + include_role: + name: openshift_bootstrap_autoapprover + tasks_from: main + when: + - openshift_master_bootstrap_enabled | default(False) + - openshift_master_bootstrap_auto_approve | default(False) | bool diff --git a/playbooks/gcp/openshift-cluster/inventory.yml b/playbooks/gcp/openshift-cluster/inventory.yml new file mode 100644 index 000000000..96de6d6db --- /dev/null +++ b/playbooks/gcp/openshift-cluster/inventory.yml @@ -0,0 +1,10 @@ +--- +- name: Set up the connection variables for retrieving inventory from GCE + hosts: localhost + connection: local + gather_facts: no + tasks: + - name: materialize the inventory + include_role: + name: openshift_gcp + tasks_from: dynamic_inventory.yml diff --git a/playbooks/gcp/openshift-cluster/launch.yml b/playbooks/gcp/openshift-cluster/launch.yml new file mode 100644 index 000000000..02f00408a --- /dev/null +++ b/playbooks/gcp/openshift-cluster/launch.yml @@ -0,0 +1,12 @@ +# This playbook launches a new cluster or converges it if already launched +--- +- import_playbook: build_image.yml + when: openshift_gcp_build_image | default(False) | bool + +- import_playbook: provision.yml + +- hosts: localhost + tasks: + - meta: refresh_inventory + +- import_playbook: install.yml diff --git a/playbooks/gcp/provision.yml b/playbooks/gcp/openshift-cluster/provision.yml index 6016e6a78..293a195c9 100644 --- a/playbooks/gcp/provision.yml +++ b/playbooks/gcp/openshift-cluster/provision.yml @@ -3,11 +3,10 @@ hosts: localhost connection: local gather_facts: no + roles: + - openshift_gcp tasks: - - - name: provision a GCP cluster in the specified project - include_role: + - name: recalculate the dynamic inventory + import_role: name: openshift_gcp - -- name: run the cluster deploy - import_playbook: ../deploy_cluster.yml + tasks_from: dynamic_inventory.yml diff --git a/playbooks/gcp/openshift-cluster/publish_image.yml b/playbooks/gcp/openshift-cluster/publish_image.yml new file mode 100644 index 000000000..76fd49e9c --- /dev/null +++ b/playbooks/gcp/openshift-cluster/publish_image.yml @@ -0,0 +1,9 @@ +--- +- name: Publish the most recent image + hosts: localhost + connection: local + gather_facts: no + tasks: + - import_role: + name: openshift_gcp + tasks_from: publish_image.yml diff --git a/playbooks/gcp/openshift-cluster/roles b/playbooks/gcp/openshift-cluster/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/gcp/openshift-cluster/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml new file mode 100644 index 000000000..addb4f44d --- /dev/null +++ b/playbooks/init/base_packages.yml @@ -0,0 +1,42 @@ +--- +# l_base_packages_hosts may be passed in via prerequisites.yml during scaleup plays +# and upgrade_control_plane.yml upgrade plays. + +- name: Install packages necessary for installer + hosts: "{{ l_base_packages_hosts | default('oo_all_hosts') }}" + any_errors_fatal: true + tasks: + - when: + - not openshift_is_atomic | bool + block: + - name: Ensure openshift-ansible installer package deps are installed + package: + name: "{{ item }}" + state: present + with_items: + - iproute + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" + - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" + - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}" + - yum-utils + when: item != '' + register: result + until: result is succeeded + + - name: Ensure various deps for running system containers are installed + package: + name: "{{ item }}" + state: present + with_items: + - atomic + - ostree + - runc + when: + - > + (openshift_use_system_containers | default(False)) | bool + or (openshift_use_etcd_system_container | default(False)) | bool + or (openshift_use_openvswitch_system_container | default(False)) | bool + or (openshift_use_node_system_container | default(False)) | bool + or (openshift_use_master_system_container | default(False)) | bool + register: result + until: result is succeeded diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml new file mode 100644 index 000000000..a9bf06693 --- /dev/null +++ b/playbooks/init/basic_facts.yml @@ -0,0 +1,77 @@ +--- +- name: Ensure that all non-node hosts are accessible + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config + any_errors_fatal: true + tasks: + +- name: Initialize basic host facts + # l_init_fact_hosts is passed in via play during control-plane-only + # upgrades and scale-up plays; otherwise oo_all_hosts is used. + hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}" + roles: + - role: openshift_facts + tasks: + # TODO: Should this role be refactored into health_checks?? + - name: Run openshift_sanitize_inventory to set variables + import_role: + name: openshift_sanitize_inventory + + - name: Detecting Operating System from ostree_booted + stat: + path: /run/ostree-booted + register: ostree_booted + + # TODO(michaelgugino) remove this line once CI is updated. + - name: set openshift_deployment_type if unset + set_fact: + openshift_deployment_type: "{{ deployment_type }}" + when: + - openshift_deployment_type is undefined + - deployment_type is defined + + - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized + set_fact: + openshift_is_atomic: "{{ ostree_booted.stat.exists }}" + openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}" + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + # Fail as early as possible if Atomic and old version of Docker + - when: + - openshift_is_atomic | bool + block: + + # See https://access.redhat.com/articles/2317361 + # and https://github.com/ansible/ansible/issues/15892 + # NOTE: the "'s can not be removed at this level else the docker command will fail + # NOTE: When ansible >2.2.1.x is used this can be updated per + # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121 + - name: Determine Atomic Host Docker Version + shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"' + register: l_atomic_docker_version + + - name: assert atomic host docker version is 1.12 or later + assert: + that: + - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=') + msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. + +- name: Initialize special first-master variables + hosts: oo_first_master + roles: + - role: openshift_facts + tasks: + - set_fact: + # We need to setup openshift_client_binary here for special uses of delegate_to in + # later roles and plays. + first_master_client_binary: "{{ openshift_client_binary }}" + #Some roles may require this to be set for first master + openshift_client_binary: "{{ openshift_client_binary }}" + +- name: Disable web console if required + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + openshift_web_console_install: False + when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features ) diff --git a/playbooks/init/cluster_facts.yml b/playbooks/init/cluster_facts.yml new file mode 100644 index 000000000..636679e32 --- /dev/null +++ b/playbooks/init/cluster_facts.yml @@ -0,0 +1,42 @@ +--- +- name: Initialize cluster facts + # l_init_fact_hosts is passed in via play during control-plane-only + # upgrades and scale-up plays; otherwise oo_all_hosts is used. + hosts: "{{ l_init_fact_hosts | default('oo_all_hosts') }}" + roles: + - role: openshift_facts + tasks: + - name: Gather Cluster facts + openshift_facts: + role: common + local_facts: + deployment_type: "{{ openshift_deployment_type }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" + hostname: "{{ openshift_hostname | default(None) }}" + ip: "{{ openshift_ip | default(None) }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + http_proxy: "{{ openshift_http_proxy | default(None) }}" + https_proxy: "{{ openshift_https_proxy | default(None) }}" + no_proxy: "{{ openshift_no_proxy | default(None) }}" + generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" + + - name: Set fact of no_proxy_internal_hostnames + openshift_facts: + role: common + local_facts: + no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + + - name: Initialize openshift.node.sdn_mtu + openshift_facts: + role: node + local_facts: + sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" diff --git a/playbooks/init/evaluate_groups.yml b/playbooks/init/evaluate_groups.yml index 8087f6ffc..81d7d63ca 100644 --- a/playbooks/init/evaluate_groups.yml +++ b/playbooks/init/evaluate_groups.yml @@ -2,7 +2,6 @@ - name: Populate config host groups hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Load group name mapping variables @@ -46,9 +45,13 @@ - name: Evaluate groups - Fail if no etcd hosts group is defined fail: msg: > - Running etcd as an embedded service is no longer supported. + Running etcd as an embedded service is no longer supported. If this is a + new install please define an 'etcd' group with either one, three or five + hosts. These hosts may be the same hosts as your masters. If this is an + upgrade please see https://docs.openshift.com/container-platform/latest/install_config/upgrading/migrating_embedded_etcd.html + for documentation on how to migrate from embedded to external etcd. when: - - g_etcd_hosts | default([]) | length not in [3,1] + - g_etcd_hosts | default([]) | length == 0 - not (openshift_node_bootstrap | default(False)) - name: Evaluate oo_all_hosts diff --git a/playbooks/init/facts.yml b/playbooks/init/facts.yml deleted file mode 100644 index ac4429b23..000000000 --- a/playbooks/init/facts.yml +++ /dev/null @@ -1,152 +0,0 @@ ---- -- name: Ensure that all non-node hosts are accessible - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config - any_errors_fatal: true - tasks: - -- name: Initialize host facts - hosts: oo_all_hosts - tasks: - - name: load openshift_facts module - import_role: - name: openshift_facts - - # TODO: Should this role be refactored into health_checks?? - - name: Run openshift_sanitize_inventory to set variables - include_role: - name: openshift_sanitize_inventory - - - name: Detecting Operating System from ostree_booted - stat: - path: /run/ostree-booted - register: ostree_booted - - - name: initialize_facts set fact openshift_is_atomic and openshift_is_containerized - set_fact: - openshift_is_atomic: "{{ ostree_booted.stat.exists }}" - openshift_is_containerized: "{{ ostree_booted.stat.exists or (containerized | default(false) | bool) }}" - - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - - name: Validate python version - ans_dist is fedora and python is v3 - fail: - msg: | - openshift-ansible requires Python 3 for {{ ansible_distribution }}; - For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html - when: - - ansible_distribution == 'Fedora' - - ansible_python['version']['major'] != 3 - - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - - name: Validate python version - ans_dist not Fedora and python must be v2 - fail: - msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" - when: - - ansible_distribution != 'Fedora' - - ansible_python['version']['major'] != 2 - - # TODO: Should this be moved into health checks?? - # Seems as though any check that happens with a corresponding fail should move into health_checks - # Fail as early as possible if Atomic and old version of Docker - - when: - - openshift_is_atomic | bool - block: - - # See https://access.redhat.com/articles/2317361 - # and https://github.com/ansible/ansible/issues/15892 - # NOTE: the "'s can not be removed at this level else the docker command will fail - # NOTE: When ansible >2.2.1.x is used this can be updated per - # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121 - - name: Determine Atomic Host Docker Version - shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"' - register: l_atomic_docker_version - - - name: assert atomic host docker version is 1.12 or later - assert: - that: - - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=') - msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. - - - when: - - not openshift_is_atomic | bool - block: - - name: Ensure openshift-ansible installer package deps are installed - package: - name: "{{ item }}" - state: present - with_items: - - iproute - - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}" - - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" - - yum-utils - register: result - until: result is succeeded - - - name: Ensure various deps for running system containers are installed - package: - name: "{{ item }}" - state: present - with_items: - - atomic - - ostree - - runc - when: - - > - (openshift_use_system_containers | default(False)) | bool - or (openshift_use_etcd_system_container | default(False)) | bool - or (openshift_use_openvswitch_system_container | default(False)) | bool - or (openshift_use_node_system_container | default(False)) | bool - or (openshift_use_master_system_container | default(False)) | bool - register: result - until: result is succeeded - - - name: Gather Cluster facts - openshift_facts: - role: common - local_facts: - deployment_type: "{{ openshift_deployment_type }}" - deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" - hostname: "{{ openshift_hostname | default(None) }}" - ip: "{{ openshift_ip | default(None) }}" - public_hostname: "{{ openshift_public_hostname | default(None) }}" - public_ip: "{{ openshift_public_ip | default(None) }}" - portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" - http_proxy: "{{ openshift_http_proxy | default(None) }}" - https_proxy: "{{ openshift_https_proxy | default(None) }}" - no_proxy: "{{ openshift_no_proxy | default(None) }}" - generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" - - - name: Set fact of no_proxy_internal_hostnames - openshift_facts: - role: common - local_facts: - no_proxy_internal_hostnames: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | lib_utils_oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: - - openshift_http_proxy is defined or openshift_https_proxy is defined - - openshift_generate_no_proxy_hosts | default(True) | bool - - - name: Initialize openshift.node.sdn_mtu - openshift_facts: - role: node - local_facts: - sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}" - - - name: initialize_facts set_fact repoquery command - set_fact: - repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" - repoquery_installed: "{{ 'dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins --installed' }}" - -- name: Initialize special first-master variables - hosts: oo_first_master - roles: - - role: openshift_facts - tasks: - - set_fact: - # We need to setup openshift_client_binary here for special uses of delegate_to in - # later roles and plays. - first_master_client_binary: "{{ openshift_client_binary }}" diff --git a/playbooks/init/main.yml b/playbooks/init/main.yml index 06e8ba504..468d81fbe 100644 --- a/playbooks/init/main.yml +++ b/playbooks/init/main.yml @@ -1,4 +1,7 @@ --- +# skip_version and l_install_base_packages are passed in via prerequistes.yml. +# skip_sanity_checks is passed in via openshift-node/private/image_prep.yml + - name: Initialization Checkpoint Start hosts: all gather_facts: false @@ -15,16 +18,19 @@ - import_playbook: evaluate_groups.yml -- import_playbook: facts.yml +- import_playbook: basic_facts.yml -- import_playbook: sanity_checks.yml - when: not (skip_sanity_checks | default(False)) +# base_packages needs to be setup for openshift_facts.py to run correctly. +- import_playbook: base_packages.yml + when: l_install_base_packages | default(False) | bool -- import_playbook: validate_hostnames.yml - when: not (skip_validate_hostnames | default(False)) +- import_playbook: cluster_facts.yml - import_playbook: version.yml - when: not (skip_verison | default(False)) + when: not (skip_version | default(False)) + +- import_playbook: sanity_checks.yml + when: not (skip_sanity_checks | default(False)) - name: Initialization Checkpoint End hosts: all diff --git a/playbooks/init/repos.yml b/playbooks/init/repos.yml index 66786a41a..655a7e83a 100644 --- a/playbooks/init/repos.yml +++ b/playbooks/init/repos.yml @@ -1,16 +1,18 @@ --- +# l_scale_up_hosts may be passed in via prerequisites.yml during scaleup plays. + - name: Setup yum repositories for all hosts - hosts: oo_all_hosts + hosts: "{{ l_scale_up_hosts | default('oo_all_hosts') }}" gather_facts: no tasks: - name: subscribe instances to Red Hat Subscription Manager - include_role: + import_role: name: rhel_subscribe when: - ansible_distribution == 'RedHat' - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - rhsub_user is defined - rhsub_pass is defined - name: initialize openshift repos - include_role: + import_role: name: openshift_repos diff --git a/playbooks/init/sanity_checks.yml b/playbooks/init/sanity_checks.yml index 26716a92d..fbbb3f8fb 100644 --- a/playbooks/init/sanity_checks.yml +++ b/playbooks/init/sanity_checks.yml @@ -1,51 +1,16 @@ --- +# l_sanity_check_hosts may be passed in during scale-up plays - name: Verify Requirements - hosts: oo_all_hosts + hosts: oo_first_master + roles: + - role: lib_utils tasks: - - fail: - msg: Flannel can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use flannel - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool - - - fail: - msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Nuage sdn can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Contiv can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use contiv - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Contiv can not be used with flannel - when: openshift_use_flannel | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Contiv can not be used with nuage - when: openshift_use_nuage | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: Calico can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use Calico - when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_calico | default(false) | bool - - - fail: - msg: The Calico playbook does not yet integrate with the Flannel playbook in Openshift. Set either openshift_use_calico or openshift_use_flannel, but not both. - when: openshift_use_calico | default(false) | bool and openshift_use_flannel | default(false) | bool - - - fail: - msg: Calico can not be used with Nuage in Openshift. Set either openshift_use_calico or openshift_use_nuage, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_nuage | default(false) | bool - - - fail: - msg: Calico can not be used with Contiv in Openshift. Set either openshift_use_calico or openshift_use_contiv, but not both - when: openshift_use_calico | default(false) | bool and openshift_use_contiv | default(false) | bool - - - fail: - msg: openshift_hostname must be 63 characters or less - when: openshift_hostname is defined and openshift_hostname | length > 63 - - - fail: - msg: openshift_public_hostname must be 63 characters or less - when: openshift_public_hostname is defined and openshift_public_hostname | length > 63 + # sanity_checks is a custom action plugin defined in lib_utils. + # This module will loop through all the hostvars for each host + # specified in check_hosts. + # Since sanity_checks is an action_plugin, it executes on the control host. + # Thus, sanity_checks cannot gather new information about any hosts. + - name: Run variable sanity checks + sanity_checks: + check_hosts: "{{ l_sanity_check_hosts | default(groups['oo_all_hosts']) }}" + run_once: True diff --git a/playbooks/init/validate_hostnames.yml b/playbooks/init/validate_hostnames.yml index 86e0b2416..b49f7dd08 100644 --- a/playbooks/init/validate_hostnames.yml +++ b/playbooks/init/validate_hostnames.yml @@ -25,7 +25,7 @@ when: - lookupip.stdout != '127.0.0.1' - lookupip.stdout not in ansible_all_ipv4_addresses - - openshift_hostname_check | default(true) + - openshift_hostname_check | default(true) | bool - name: Validate openshift_ip exists on node when defined fail: @@ -40,4 +40,4 @@ when: - openshift_ip is defined - openshift_ip not in ansible_all_ipv4_addresses - - openshift_ip_check | default(true) + - openshift_ip_check | default(true) | bool diff --git a/playbooks/init/version.yml b/playbooks/init/version.yml index 37a5284d5..962ee7220 100644 --- a/playbooks/init/version.yml +++ b/playbooks/init/version.yml @@ -2,20 +2,32 @@ # NOTE: requires openshift_facts be run - name: Determine openshift_version to configure on first master hosts: oo_first_master - roles: - - openshift_version + tasks: + - include_role: + name: openshift_version + tasks_from: first_master.yml + - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version | default('') }}" # NOTE: We set this even on etcd hosts as they may also later run as masters, # and we don't want to install wrong version of docker and have to downgrade # later. - name: Set openshift_version for etcd, node, and master hosts - hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master + hosts: "{{ l_openshift_version_set_hosts | default(l_default_version_set_hosts) }}" vars: - openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" - pre_tasks: + l_default_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master" + l_first_master_openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}" + l_first_master_openshift_pkg_version: "{{ hostvars[groups.oo_first_master.0].openshift_pkg_version | default('') }}" + l_first_master_openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag}}" + tasks: - set_fact: - openshift_pkg_version: -{{ openshift_version }} - when: openshift_pkg_version is not defined - - debug: msg="openshift_pkg_version set to {{ openshift_pkg_version }}" - roles: - - openshift_version + openshift_version: "{{ l_first_master_openshift_version }}" + openshift_pkg_version: "{{ l_first_master_openshift_pkg_version }}" + openshift_image_tag: "{{ l_first_master_openshift_image_tag }}" + +# NOTE: These steps should only be run against masters and nodes. +- name: Ensure the requested version packages are available. + hosts: "{{ l_openshift_version_check_hosts | default('oo_nodes_to_config:oo_masters_to_config:!oo_first_master') }}" + tasks: + - include_role: + name: openshift_version + tasks_from: masters_and_nodes.yml diff --git a/playbooks/openshift-checks/adhoc.yml b/playbooks/openshift-checks/adhoc.yml index 414090733..249222ae4 100644 --- a/playbooks/openshift-checks/adhoc.yml +++ b/playbooks/openshift-checks/adhoc.yml @@ -11,6 +11,7 @@ # usage. Running this play only in localhost speeds up execution. hosts: localhost connection: local + gather_facts: false roles: - openshift_health_checker vars: diff --git a/playbooks/openshift-etcd/certificates.yml b/playbooks/openshift-etcd/certificates.yml index c06e3b575..86caba4e8 100644 --- a/playbooks/openshift-etcd/certificates.yml +++ b/playbooks/openshift-etcd/certificates.yml @@ -1,5 +1,11 @@ --- - import_playbook: ../init/main.yml + vars: + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/ca.yml diff --git a/playbooks/openshift-etcd/config.yml b/playbooks/openshift-etcd/config.yml index c7814207c..378edce85 100644 --- a/playbooks/openshift-etcd/config.yml +++ b/playbooks/openshift-etcd/config.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/config.yml diff --git a/playbooks/openshift-etcd/embedded2external.yml b/playbooks/openshift-etcd/embedded2external.yml index 7d090fa9b..34be38ac0 100644 --- a/playbooks/openshift-etcd/embedded2external.yml +++ b/playbooks/openshift-etcd/embedded2external.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/embedded2external.yml diff --git a/playbooks/openshift-etcd/migrate.yml b/playbooks/openshift-etcd/migrate.yml index 0340b74a5..4e8238ebd 100644 --- a/playbooks/openshift-etcd/migrate.yml +++ b/playbooks/openshift-etcd/migrate.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/migrate.yml diff --git a/playbooks/openshift-etcd/private/ca.yml b/playbooks/openshift-etcd/private/ca.yml index f3bb3c2d1..77e7b0ed0 100644 --- a/playbooks/openshift-etcd/private/ca.yml +++ b/playbooks/openshift-etcd/private/ca.yml @@ -5,12 +5,11 @@ - role: openshift_clock - role: openshift_etcd_facts tasks: - - include_role: + - import_role: name: etcd tasks_from: ca.yml vars: etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" when: - etcd_ca_setup | default(True) | bool diff --git a/playbooks/openshift-etcd/private/certificates-backup.yml b/playbooks/openshift-etcd/private/certificates-backup.yml index ce21a1f96..e1354de67 100644 --- a/playbooks/openshift-etcd/private/certificates-backup.yml +++ b/playbooks/openshift-etcd/private/certificates-backup.yml @@ -1,12 +1,12 @@ --- - name: Backup and remove generated etcd certificates - hosts: oo_first_etcd + hosts: oo_etcd_to_config any_errors_fatal: true tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_generated_certificates.yml - - include_role: + - import_role: name: etcd tasks_from: remove_generated_certificates.yml @@ -14,6 +14,6 @@ hosts: oo_etcd_to_config any_errors_fatal: true tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_server_certificates.yml diff --git a/playbooks/openshift-etcd/private/config.yml b/playbooks/openshift-etcd/private/config.yml index 35407969e..bbc952d8e 100644 --- a/playbooks/openshift-etcd/private/config.yml +++ b/playbooks/openshift-etcd/private/config.yml @@ -22,7 +22,6 @@ - role: openshift_clock - role: openshift_etcd etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - role: nickhammond.logrotate diff --git a/playbooks/openshift-etcd/private/embedded2external.yml b/playbooks/openshift-etcd/private/embedded2external.yml index be177b714..674bd5088 100644 --- a/playbooks/openshift-etcd/private/embedded2external.yml +++ b/playbooks/openshift-etcd/private/embedded2external.yml @@ -18,7 +18,7 @@ - role: openshift_facts tasks: - name: Check the master API is ready - include_role: + import_role: name: openshift_master tasks_from: check_master_api_is_ready.yml - set_fact: @@ -31,8 +31,8 @@ name: "{{ master_service }}" state: stopped # 2. backup embedded etcd - # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285 - - include_role: + # Can't use with_items with import_role: https://github.com/ansible/ansible/issues/21285 + - import_role: name: etcd tasks_from: backup.yml vars: @@ -40,7 +40,7 @@ r_etcd_common_embedded_etcd: "{{ true }}" r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}" - - include_role: + - import_role: name: etcd tasks_from: backup.archive.yml vars: @@ -56,7 +56,7 @@ - name: Backup etcd client certificates for master host hosts: oo_first_master tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_master_etcd_certificates.yml @@ -73,10 +73,10 @@ hosts: oo_etcd_to_config[0] gather_facts: no pre_tasks: - - include_role: + - import_role: name: etcd tasks_from: disable_etcd.yml - - include_role: + - import_role: name: etcd tasks_from: clean_data.yml @@ -89,9 +89,12 @@ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX register: g_etcd_client_mktemp changed_when: False - become: no - - include_role: + - name: Chmod local temp directory for syncing etcd backup + local_action: command chmod 777 "{{ g_etcd_client_mktemp.stdout }}" + changed_when: False + + - import_role: name: etcd tasks_from: backup.fetch.yml vars: @@ -101,7 +104,7 @@ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" delegate_to: "{{ groups.oo_first_master[0] }}" - - include_role: + - import_role: name: etcd tasks_from: backup.copy.yml vars: @@ -116,20 +119,19 @@ - name: Delete temporary directory local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent changed_when: False - become: no # 7. force new cluster from the backup - name: Force new etcd cluster hosts: oo_etcd_to_config[0] tasks: - - include_role: + - import_role: name: etcd tasks_from: backup.unarchive.yml vars: r_etcd_common_backup_tag: pre-migrate r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}" - - include_role: + - import_role: name: etcd tasks_from: backup.force_new_cluster.yml vars: @@ -143,7 +145,7 @@ - name: Configure master to use external etcd hosts: oo_first_master tasks: - - include_role: + - import_role: name: openshift_master tasks_from: configure_external_etcd.yml vars: diff --git a/playbooks/openshift-etcd/private/master_etcd_certificates.yml b/playbooks/openshift-etcd/private/master_etcd_certificates.yml index d98470db2..4e4972dba 100644 --- a/playbooks/openshift-etcd/private/master_etcd_certificates.yml +++ b/playbooks/openshift-etcd/private/master_etcd_certificates.yml @@ -5,9 +5,7 @@ roles: - role: openshift_etcd_facts - role: openshift_etcd_client_certificates - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/openshift-etcd/private/migrate.yml b/playbooks/openshift-etcd/private/migrate.yml index cad0ebcaa..3f8b44032 100644 --- a/playbooks/openshift-etcd/private/migrate.yml +++ b/playbooks/openshift-etcd/private/migrate.yml @@ -2,7 +2,6 @@ - name: Check if the master has embedded etcd hosts: localhost connection: local - become: no gather_facts: no tags: - always @@ -15,7 +14,7 @@ - name: Run pre-checks hosts: oo_etcd_to_migrate tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.pre_check.yml vars: @@ -43,7 +42,7 @@ roles: - role: openshift_facts post_tasks: - - include_role: + - import_role: name: etcd tasks_from: backup.yml vars: @@ -53,7 +52,6 @@ - name: Gate on etcd backup hosts: localhost connection: local - become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars @@ -70,7 +68,7 @@ hosts: oo_etcd_to_migrate gather_facts: no pre_tasks: - - include_role: + - import_role: name: etcd tasks_from: disable_etcd.yml @@ -78,7 +76,7 @@ hosts: oo_etcd_to_migrate[0] gather_facts: no tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.yml vars: @@ -90,7 +88,7 @@ hosts: oo_etcd_to_migrate[1:] gather_facts: no tasks: - - include_role: + - import_role: name: etcd tasks_from: clean_data.yml vars: @@ -126,7 +124,7 @@ - name: Add TTLs on the first master hosts: oo_first_master[0] tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.add_ttls.yml vars: @@ -138,7 +136,7 @@ - name: Configure masters if etcd data migration is succesfull hosts: oo_masters_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: migrate.configure_master.yml when: etcd_migration_failed | length == 0 diff --git a/playbooks/openshift-etcd/private/redeploy-ca.yml b/playbooks/openshift-etcd/private/redeploy-ca.yml index 0995945cc..55409e503 100644 --- a/playbooks/openshift-etcd/private/redeploy-ca.yml +++ b/playbooks/openshift-etcd/private/redeploy-ca.yml @@ -14,10 +14,10 @@ - name: Backup existing etcd CA certificate directories hosts: oo_etcd_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: backup_ca_certificates.yml - - include_role: + - import_role: name: etcd tasks_from: remove_ca_certificates.yml @@ -26,7 +26,6 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -34,15 +33,18 @@ register: g_etcd_mktemp changed_when: false + - name: Chmod local temp directory for syncing certs + local_action: command chmod 777 "{{ g_etcd_mktemp.stdout }}" + changed_when: false + - name: Distribute etcd CA to etcd hosts hosts: oo_etcd_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: distribute_ca.yml vars: etcd_sync_cert_dir: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - import_playbook: restart.yml # Do not restart etcd when etcd certificates were previously expired. @@ -54,7 +56,7 @@ - name: Retrieve etcd CA certificate hosts: oo_first_etcd tasks: - - include_role: + - import_role: name: etcd tasks_from: retrieve_ca_certificates.yml vars: @@ -74,7 +76,6 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: diff --git a/playbooks/openshift-etcd/private/restart.yml b/playbooks/openshift-etcd/private/restart.yml index 0751480e2..a2a53651b 100644 --- a/playbooks/openshift-etcd/private/restart.yml +++ b/playbooks/openshift-etcd/private/restart.yml @@ -3,7 +3,7 @@ hosts: oo_etcd_to_config serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: restart.yml when: @@ -12,7 +12,7 @@ - name: Restart etcd hosts: oo_etcd_to_config tasks: - - include_role: + - import_role: name: etcd tasks_from: restart.yml when: diff --git a/playbooks/openshift-etcd/private/scaleup.yml b/playbooks/openshift-etcd/private/scaleup.yml index dc667958f..162a5eba7 100644 --- a/playbooks/openshift-etcd/private/scaleup.yml +++ b/playbooks/openshift-etcd/private/scaleup.yml @@ -12,8 +12,6 @@ hosts: oo_new_etcd_to_config serial: 1 any_errors_fatal: true - vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" pre_tasks: - name: Add new etcd members to cluster command: > @@ -30,7 +28,7 @@ retries: 3 delay: 10 until: etcd_add_check.rc == 0 - - include_role: + - import_role: name: etcd tasks_from: server_certificates.yml vars: @@ -42,7 +40,6 @@ - role: openshift_etcd when: etcd_add_check.rc == 0 etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_initial_cluster_state: "existing" etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" @@ -66,8 +63,6 @@ hosts: oo_masters_to_config serial: 1 vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) )) | lib_utils_oo_collect('openshift.common.hostname') @@ -76,6 +71,6 @@ roles: - role: openshift_master_facts post_tasks: - - include_role: + - import_role: name: openshift_master tasks_from: update_etcd_client_urls.yml diff --git a/playbooks/openshift-etcd/private/server_certificates.yml b/playbooks/openshift-etcd/private/server_certificates.yml index 695b53990..0abfe1650 100644 --- a/playbooks/openshift-etcd/private/server_certificates.yml +++ b/playbooks/openshift-etcd/private/server_certificates.yml @@ -5,10 +5,9 @@ roles: - role: openshift_etcd_facts post_tasks: - - include_role: + - import_role: name: etcd tasks_from: server_certificates.yml vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" diff --git a/playbooks/openshift-etcd/private/upgrade_backup.yml b/playbooks/openshift-etcd/private/upgrade_backup.yml index 0d8943d93..081c024fc 100644 --- a/playbooks/openshift-etcd/private/upgrade_backup.yml +++ b/playbooks/openshift-etcd/private/upgrade_backup.yml @@ -4,7 +4,7 @@ roles: - role: openshift_etcd_facts post_tasks: - - include_role: + - import_role: name: etcd tasks_from: backup.yml vars: @@ -14,7 +14,6 @@ - name: Gate on etcd backup hosts: localhost connection: local - become: no tasks: - set_fact: etcd_backup_completed: "{{ hostvars diff --git a/playbooks/openshift-etcd/private/upgrade_image_members.yml b/playbooks/openshift-etcd/private/upgrade_image_members.yml index d4386249e..f9e50e748 100644 --- a/playbooks/openshift-etcd/private/upgrade_image_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_image_members.yml @@ -6,7 +6,7 @@ hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: upgrade_image.yml vars: diff --git a/playbooks/openshift-etcd/private/upgrade_main.yml b/playbooks/openshift-etcd/private/upgrade_main.yml index e373a4a4c..fea588260 100644 --- a/playbooks/openshift-etcd/private/upgrade_main.yml +++ b/playbooks/openshift-etcd/private/upgrade_main.yml @@ -1,4 +1,37 @@ --- +# Prior to 3.6, openshift-ansible created etcd serving certificates +# without a SubjectAlternativeName entry for the system hostname. The +# SAN list in Go 1.8 is now (correctly) authoritative and since +# openshift-ansible configures masters to talk to etcd hostnames +# rather than IP addresses, we must correct etcd certificates. +# +# This play examines the etcd serving certificate SANs on each etcd +# host and records whether or not the system hostname is missing. +- name: Examine etcd serving certificate SAN + hosts: oo_etcd_to_config + tasks: + - slurp: + src: /etc/etcd/server.crt + register: etcd_serving_cert + - set_fact: + __etcd_cert_lacks_hostname: "{{ (openshift.common.hostname not in (etcd_serving_cert.content | b64decode | lib_utils_oo_parse_certificate_san)) | bool }}" + +# Redeploy etcd certificates when hostnames were missing from etcd +# serving certificate SANs. +- import_playbook: redeploy-certificates.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: restart.yml + vars: + g_etcd_certificates_expired: "{{ ('expired' in (hostvars | lib_utils_oo_select_keys(groups['etcd']) | lib_utils_oo_collect('check_results.check_results.etcd') | lib_utils_oo_collect('health'))) | bool }}" + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + +- import_playbook: ../../openshift-master/private/restart.yml + when: + - true in hostvars | lib_utils_oo_select_keys(groups['oo_etcd_to_config']) | lib_utils_oo_collect('__etcd_cert_lacks_hostname') | default([false]) + # For 1.4/3.4 we want to upgrade everyone to etcd-3.0. etcd docs say to # upgrade from 2.0.x to 2.1.x to 2.2.x to 2.3.x to 3.0.x. While this is a tedius # task for RHEL and CENTOS it's simply not possible in Fedora unless you've @@ -14,7 +47,7 @@ - name: Drop etcdctl profiles hosts: oo_etcd_hosts_to_upgrade tasks: - - include_role: + - import_role: name: etcd tasks_from: drop_etcdctl.yml diff --git a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml index f7fe6cd9c..e78cc5826 100644 --- a/playbooks/openshift-etcd/private/upgrade_rpm_members.yml +++ b/playbooks/openshift-etcd/private/upgrade_rpm_members.yml @@ -6,7 +6,7 @@ hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: upgrade_rpm.yml vars: diff --git a/playbooks/openshift-etcd/private/upgrade_step.yml b/playbooks/openshift-etcd/private/upgrade_step.yml index 05c543d62..6aec838d4 100644 --- a/playbooks/openshift-etcd/private/upgrade_step.yml +++ b/playbooks/openshift-etcd/private/upgrade_step.yml @@ -2,7 +2,7 @@ - name: Determine etcd version hosts: oo_etcd_hosts_to_upgrade tasks: - - include_role: + - import_role: name: etcd tasks_from: version_detect.yml @@ -54,7 +54,7 @@ hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include_role: + - import_role: name: etcd tasks_from: upgrade_image.yml vars: diff --git a/playbooks/openshift-etcd/redeploy-ca.yml b/playbooks/openshift-etcd/redeploy-ca.yml index 769d694ba..93b68a257 100644 --- a/playbooks/openshift-etcd/redeploy-ca.yml +++ b/playbooks/openshift-etcd/redeploy-ca.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/redeploy-ca.yml diff --git a/playbooks/openshift-etcd/redeploy-certificates.yml b/playbooks/openshift-etcd/redeploy-certificates.yml index 8ea1994f7..202acb493 100644 --- a/playbooks/openshift-etcd/redeploy-certificates.yml +++ b/playbooks/openshift-etcd/redeploy-certificates.yml @@ -1,5 +1,11 @@ --- - import_playbook: ../init/main.yml + vars: + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/redeploy-certificates.yml diff --git a/playbooks/openshift-etcd/restart.yml b/playbooks/openshift-etcd/restart.yml index 041c1384d..05aaa9809 100644 --- a/playbooks/openshift-etcd/restart.yml +++ b/playbooks/openshift-etcd/restart.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + skip_version: True + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/restart.yml diff --git a/playbooks/openshift-etcd/scaleup.yml b/playbooks/openshift-etcd/scaleup.yml index 7e9ab6834..1b2229baa 100644 --- a/playbooks/openshift-etcd/scaleup.yml +++ b/playbooks/openshift-etcd/scaleup.yml @@ -1,4 +1,55 @@ --- +- import_playbook: ../init/evaluate_groups.yml + +- name: Ensure there are new_etcd + hosts: localhost + connection: local + gather_facts: no + tasks: + - fail: + msg: > + Detected no new_etcd in inventory. Please add hosts to the + new_etcd host group to add etcd hosts. + when: + - g_new_etcd_hosts | default([]) | length == 0 + + - fail: + msg: > + Detected new_etcd host is member of new_masters or new_nodes. Please + run playbooks/openshift-master/scaleup.yml or + playbooks/openshift-node/scaleup.yml before running this play. + when: > + inventory_hostname in (groups['new_masters'] | default([])) + or inventory_hostname in (groups['new_nodes'] | default([])) + +# We only need to run this if etcd is being installed on a standalone host; +# If etcd is part of master or node group, there's no need to +# re-run prerequisites +- import_playbook: ../prerequisites.yml + vars: + # We need to ensure container_runtime is only processed for containerized + # etcd hosts by setting l_build_container_groups_hosts and l_etcd_scale_up_hosts + l_build_container_groups_hosts: "oo_new_etcd_to_config" + l_etcd_scale_up_hosts: "oo_hosts_containerized_managed_true" + l_scale_up_hosts: "oo_new_etcd_to_config" + l_base_packages_hosts: "oo_new_etcd_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config" + l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}" + when: + - inventory_hostname not in groups['oo_masters'] + - inventory_hostname not in groups['oo_nodes_to_config'] + +# If this etcd host is part of a master or node, we don't need to run +# prerequisites, we can just init facts as normal. - import_playbook: ../init/main.yml + vars: + skip_version: True + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_new_etcd_to_config" + l_sanity_check_hosts: "{{ groups['oo_new_etcd_to_config'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}" + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + when: + - inventory_hostname in groups['oo_masters'] + - inventory_hostname in groups['oo_nodes_to_config'] - import_playbook: private/scaleup.yml diff --git a/playbooks/openshift-etcd/upgrade.yml b/playbooks/openshift-etcd/upgrade.yml index ccc797527..1edcd6819 100644 --- a/playbooks/openshift-etcd/upgrade.yml +++ b/playbooks/openshift-etcd/upgrade.yml @@ -1,4 +1,10 @@ --- -- import_playbook: ../init/evaluate_groups.yml +- import_playbook: ../init/main.yml + vars: + skip_version: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" + l_sanity_check_hosts: "{{ groups['oo_etcd_to_config'] | union(groups['oo_masters_to_config']) }}" - import_playbook: private/upgrade_main.yml diff --git a/playbooks/openshift-glusterfs/README.md b/playbooks/openshift-glusterfs/README.md index 107bbfff6..19c381490 100644 --- a/playbooks/openshift-glusterfs/README.md +++ b/playbooks/openshift-glusterfs/README.md @@ -63,7 +63,7 @@ glusterfs [OSEv3:vars] ansible_ssh_user=root -deployment_type=origin +openshift_deployment_type=origin [masters] master diff --git a/playbooks/openshift-glusterfs/config.yml b/playbooks/openshift-glusterfs/config.yml index c7814207c..ccdd8d069 100644 --- a/playbooks/openshift-glusterfs/config.yml +++ b/playbooks/openshift-glusterfs/config.yml @@ -1,4 +1,9 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config:oo_glusterfs_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_glusterfs_to_config']) }}" - import_playbook: private/config.yml diff --git a/playbooks/openshift-glusterfs/private/config.yml b/playbooks/openshift-glusterfs/private/config.yml index 19e14ab3e..9a5bc143d 100644 --- a/playbooks/openshift-glusterfs/private/config.yml +++ b/playbooks/openshift-glusterfs/private/config.yml @@ -14,12 +14,12 @@ - name: Open firewall ports for GlusterFS nodes hosts: glusterfs tasks: - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: firewall.yml when: - openshift_storage_glusterfs_is_native | default(True) | bool - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: kernel_modules.yml when: @@ -28,12 +28,12 @@ - name: Open firewall ports for GlusterFS registry nodes hosts: glusterfs_registry tasks: - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: firewall.yml when: - openshift_storage_glusterfs_registry_is_native | default(True) | bool - - include_role: + - import_role: name: openshift_storage_glusterfs tasks_from: kernel_modules.yml when: @@ -43,7 +43,7 @@ hosts: oo_first_master tasks: - name: setup glusterfs - include_role: + import_role: name: openshift_storage_glusterfs when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/openshift-glusterfs/registry.yml b/playbooks/openshift-glusterfs/registry.yml index 5e3b18536..cc2846cb3 100644 --- a/playbooks/openshift-glusterfs/registry.yml +++ b/playbooks/openshift-glusterfs/registry.yml @@ -1,4 +1,9 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config:oo_glusterfs_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_glusterfs_to_config']) }}" - import_playbook: private/registry.yml diff --git a/playbooks/openshift-grafana/config.yml b/playbooks/openshift-grafana/config.yml new file mode 100644 index 000000000..62d954d29 --- /dev/null +++ b/playbooks/openshift-grafana/config.yml @@ -0,0 +1,9 @@ +--- +- import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + +- import_playbook: private/config.yml diff --git a/playbooks/openshift-grafana/private/config.yml b/playbooks/openshift-grafana/private/config.yml new file mode 100644 index 000000000..ac753d63b --- /dev/null +++ b/playbooks/openshift-grafana/private/config.yml @@ -0,0 +1,6 @@ +--- +- name: Deploy grafana server + hosts: masters + tasks: + - include_role: + name: openshift_grafana diff --git a/playbooks/openshift-grafana/private/filter_plugins b/playbooks/openshift-grafana/private/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/openshift-grafana/private/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/openshift-grafana/private/lookup_plugins b/playbooks/openshift-grafana/private/lookup_plugins new file mode 120000 index 000000000..ac79701db --- /dev/null +++ b/playbooks/openshift-grafana/private/lookup_plugins @@ -0,0 +1 @@ +../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/openshift-grafana/private/roles b/playbooks/openshift-grafana/private/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openshift-grafana/private/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/openshift-hosted/config.yml b/playbooks/openshift-hosted/config.yml index c7814207c..62d954d29 100644 --- a/playbooks/openshift-hosted/config.yml +++ b/playbooks/openshift-hosted/config.yml @@ -1,4 +1,9 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" - import_playbook: private/config.yml diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml new file mode 100644 index 000000000..e42af7149 --- /dev/null +++ b/playbooks/openshift-hosted/deploy_registry.yml @@ -0,0 +1,9 @@ +--- +- import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + +- import_playbook: private/openshift_hosted_registry.yml diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml new file mode 100644 index 000000000..a3564fe51 --- /dev/null +++ b/playbooks/openshift-hosted/deploy_router.yml @@ -0,0 +1,9 @@ +--- +- import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + +- import_playbook: private/openshift_hosted_router.yml diff --git a/playbooks/openshift-hosted/private/config.yml b/playbooks/openshift-hosted/private/config.yml index 036fe654d..4e7b98da2 100644 --- a/playbooks/openshift-hosted/private/config.yml +++ b/playbooks/openshift-hosted/private/config.yml @@ -21,6 +21,10 @@ - import_playbook: openshift_hosted_registry.yml +- import_playbook: openshift_hosted_wait_for_pods.yml + +- import_playbook: openshift_hosted_registry_storage.yml + - import_playbook: cockpit-ui.yml - import_playbook: install_docker_gc.yml diff --git a/playbooks/openshift-hosted/private/install_docker_gc.yml b/playbooks/openshift-hosted/private/install_docker_gc.yml index 1e3dfee07..03eb542d3 100644 --- a/playbooks/openshift-hosted/private/install_docker_gc.yml +++ b/playbooks/openshift-hosted/private/install_docker_gc.yml @@ -3,5 +3,5 @@ hosts: oo_first_master gather_facts: false tasks: - - include_role: + - import_role: name: openshift_docker_gc diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml index 62fe0dd60..c59ebcead 100644 --- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml +++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml @@ -3,4 +3,6 @@ hosts: oo_first_master roles: - role: openshift_default_storage_class - when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') + when: + - openshift_cloudprovider_kind is defined + - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere'] diff --git a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml index d5ca5185c..b09432da2 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_create_projects.yml @@ -2,6 +2,6 @@ - name: Create Hosted Resources - openshift projects hosts: oo_first_master tasks: - - include_role: + - import_role: name: openshift_hosted tasks_from: create_projects.yml diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml index 2a91a827c..659c95eda 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_registry.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_registry.yml @@ -5,7 +5,7 @@ - set_fact: openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - include_role: + - import_role: name: openshift_hosted tasks_from: registry.yml when: diff --git a/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml new file mode 100644 index 000000000..cfc47c9b2 --- /dev/null +++ b/playbooks/openshift-hosted/private/openshift_hosted_registry_storage.yml @@ -0,0 +1,13 @@ +--- +# This playbook waits for registry and router pods after both have been +# created. It is intended to allow the tasks of deploying both to complete +# before polling to save time. +- name: Poll for hosted pod deployments + hosts: oo_first_master + tasks: + - import_role: + name: openshift_hosted + tasks_from: registry_storage.yml + when: + - openshift_hosted_manage_registry | default(True) | bool + - openshift_hosted_registry_registryurl is defined diff --git a/playbooks/openshift-hosted/private/openshift_hosted_router.yml b/playbooks/openshift-hosted/private/openshift_hosted_router.yml index bcb5a34a4..353377189 100644 --- a/playbooks/openshift-hosted/private/openshift_hosted_router.yml +++ b/playbooks/openshift-hosted/private/openshift_hosted_router.yml @@ -5,7 +5,7 @@ - set_fact: openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" - - include_role: + - import_role: name: openshift_hosted tasks_from: router.yml when: diff --git a/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml new file mode 100644 index 000000000..1f6868c2a --- /dev/null +++ b/playbooks/openshift-hosted/private/openshift_hosted_wait_for_pods.yml @@ -0,0 +1,26 @@ +--- +# This playbook waits for registry and router pods after both have been +# created. It is intended to allow the tasks of deploying both to complete +# before polling to save time. +- name: Poll for hosted pod deployments + hosts: oo_first_master + tasks: + - import_role: + name: openshift_hosted + tasks_from: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_router_wait }}" + l_openshift_hosted_wfp_items: "{{ openshift_hosted_routers }}" + when: + - openshift_hosted_manage_router | default(True) | bool + - openshift_hosted_router_registryurl is defined + + - import_role: + name: openshift_hosted + tasks_from: wait_for_pod.yml + vars: + l_openshift_hosted_wait_for_pod: "{{ openshift_hosted_registry_wait }}" + l_openshift_hosted_wfp_items: "{{ r_openshift_hosted_registry_list }}" + when: + - openshift_hosted_manage_registry | default(True) | bool + - openshift_hosted_registry_registryurl is defined diff --git a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml index b817221b8..d88209593 100644 --- a/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-registry-certificates.yml @@ -88,8 +88,7 @@ - name: Redeploy docker registry command: > - {{ openshift_client_binary }} deploy dc/docker-registry - --latest + {{ openshift_client_binary }} rollout latest dc/docker-registry --config={{ mktemp.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml index c19147d41..952a5f4ee 100644 --- a/playbooks/openshift-hosted/private/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/private/redeploy-router-certificates.yml @@ -115,7 +115,7 @@ - ('service.alpha.openshift.io/serving-cert-secret-name') not in router_service_annotations - ('service.alpha.openshift.io/serving-cert-signed-by') not in router_service_annotations - - include_role: + - import_role: name: openshift_hosted tasks_from: main vars: @@ -129,8 +129,7 @@ - name: Redeploy router command: > - {{ openshift_client_binary }} deploy dc/router - --latest + {{ openshift_client_binary }} rollout latest dc/router --config={{ router_cert_redeploy_tempdir.stdout }}/admin.kubeconfig -n default diff --git a/playbooks/openshift-hosted/redeploy-registry-certificates.yml b/playbooks/openshift-hosted/redeploy-registry-certificates.yml index 518a1d624..1ab237558 100644 --- a/playbooks/openshift-hosted/redeploy-registry-certificates.yml +++ b/playbooks/openshift-hosted/redeploy-registry-certificates.yml @@ -1,4 +1,9 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" - import_playbook: private/redeploy-registry-certificates.yml diff --git a/playbooks/openshift-hosted/redeploy-router-certificates.yml b/playbooks/openshift-hosted/redeploy-router-certificates.yml index a74dd8c79..4b44be405 100644 --- a/playbooks/openshift-hosted/redeploy-router-certificates.yml +++ b/playbooks/openshift-hosted/redeploy-router-certificates.yml @@ -1,4 +1,9 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" - import_playbook: private/redeploy-router-certificates.yml diff --git a/playbooks/openshift-loadbalancer/config.yml b/playbooks/openshift-loadbalancer/config.yml index c7814207c..13903ee17 100644 --- a/playbooks/openshift-loadbalancer/config.yml +++ b/playbooks/openshift-loadbalancer/config.yml @@ -1,4 +1,9 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config:oo_lb_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_lb_to_config']) }}" - import_playbook: private/config.yml diff --git a/playbooks/openshift-loadbalancer/private/config.yml b/playbooks/openshift-loadbalancer/private/config.yml index 54c8483c8..4a83dd955 100644 --- a/playbooks/openshift-loadbalancer/private/config.yml +++ b/playbooks/openshift-loadbalancer/private/config.yml @@ -24,7 +24,7 @@ openshift_use_nuage | default(false), nuage_mon_rest_server_port | default(none))) + openshift_loadbalancer_additional_backends | default([]) }}" - openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" + openshift_image_tag: "{{ hostvars[groups.oo_masters_to_config.0].openshift_image_tag }}" roles: - role: openshift_loadbalancer - role: tuned diff --git a/playbooks/openshift-logging/config.yml b/playbooks/openshift-logging/config.yml index 83d330284..419dcbc3f 100644 --- a/playbooks/openshift-logging/config.yml +++ b/playbooks/openshift-logging/config.yml @@ -5,5 +5,10 @@ # currently supported method. # - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" - import_playbook: private/config.yml diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index bc59bd95a..f2a57f9f8 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -11,18 +11,51 @@ status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" +- name: Update vm.max_map_count for ES 5.x + hosts: all + gather_facts: false + tasks: + - when: + - openshift_logging_es5_techpreview | default(false) | bool + - openshift_deployment_type in ['origin'] + block: + - name: Checking vm max_map_count value + command: + cat /proc/sys/vm/max_map_count + register: _vm_max_map_count + + - stat: + path: /etc/sysctl.d/99-elasticsearch.conf + register: _99_es_conf + + - name: Check for current value of vm.max_map_count in 99-elasticsearch.conf + command: > + sed /etc/sysctl.d/99-elasticsearch.conf -e 's/vm.max_map_count=\(.*\)/\1/' + register: _curr_vm_max_map_count + when: _99_es_conf.stat.exists + + - name: Updating vm.max_map_count value + sysctl: + name: vm.max_map_count + value: 262144 + sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf" + reload: yes + when: + - _vm_max_map_count.stdout | default(0) | int < 262144 | int or _curr_vm_max_map_count.stdout | default(0) | int < 262144 + - name: OpenShift Aggregated Logging hosts: oo_first_master roles: - openshift_logging +# TODO: Remove when master config property is removed - name: Update Master configs hosts: oo_masters:!oo_first_master tasks: - - block: - - include_role: - name: openshift_logging - tasks_from: update_master_config + - include_role: + name: openshift_logging + tasks_from: update_master_config + when: not openshift.common.version_gte_3_9 - name: Logging Install Checkpoint End hosts: all diff --git a/playbooks/openshift-management/add_many_container_providers.yml b/playbooks/openshift-management/add_many_container_providers.yml index 62fdb11c5..45231a495 100644 --- a/playbooks/openshift-management/add_many_container_providers.yml +++ b/playbooks/openshift-management/add_many_container_providers.yml @@ -27,7 +27,7 @@ register: results # Include openshift_management for access to filter_plugins. - - include_role: + - import_role: name: openshift_management tasks_from: noop diff --git a/playbooks/openshift-management/config.yml b/playbooks/openshift-management/config.yml index c7814207c..62d954d29 100644 --- a/playbooks/openshift-management/config.yml +++ b/playbooks/openshift-management/config.yml @@ -1,4 +1,9 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" - import_playbook: private/config.yml diff --git a/playbooks/openshift-management/private/add_container_provider.yml b/playbooks/openshift-management/private/add_container_provider.yml index facb3a5b9..25d4058e5 100644 --- a/playbooks/openshift-management/private/add_container_provider.yml +++ b/playbooks/openshift-management/private/add_container_provider.yml @@ -3,6 +3,6 @@ hosts: oo_first_master tasks: - name: Run the Management Integration Tasks - include_role: + import_role: name: openshift_management tasks_from: add_container_provider diff --git a/playbooks/openshift-management/private/config.yml b/playbooks/openshift-management/private/config.yml index 3f1cdf713..22f3ee8f3 100644 --- a/playbooks/openshift-management/private/config.yml +++ b/playbooks/openshift-management/private/config.yml @@ -21,7 +21,7 @@ tasks: - name: Run the CFME Setup Role - include_role: + import_role: name: openshift_management vars: template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" diff --git a/playbooks/openshift-management/private/uninstall.yml b/playbooks/openshift-management/private/uninstall.yml index 9f35cc276..6097ea45a 100644 --- a/playbooks/openshift-management/private/uninstall.yml +++ b/playbooks/openshift-management/private/uninstall.yml @@ -3,6 +3,6 @@ hosts: masters[0] tasks: - name: Run the CFME Uninstall Role Tasks - include_role: + import_role: name: openshift_management tasks_from: uninstall diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index 81bb8cc5c..ca514ed26 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -16,7 +16,6 @@ vars: cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" etcd_urls: "{{ openshift.master.etcd_urls }}" - openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" roles: - role: openshift_project_request_template @@ -31,7 +30,7 @@ - role: cockpit when: - not openshift_is_atomic | bool - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' - osm_use_cockpit is undefined or osm_use_cockpit | bool - openshift.common.deployment_subtype != 'registry' - role: flannel_register diff --git a/playbooks/openshift-master/private/certificates-backup.yml b/playbooks/openshift-master/private/certificates-backup.yml index 4dbc041b0..56af18ca7 100644 --- a/playbooks/openshift-master/private/certificates-backup.yml +++ b/playbooks/openshift-master/private/certificates-backup.yml @@ -28,6 +28,7 @@ path: "{{ openshift.common.config_base }}/master/{{ item }}" state: absent with_items: + # certificates_to_synchronize is a custom filter in lib_utils - "{{ hostvars[inventory_hostname] | certificates_to_synchronize(include_keys=false, include_ca=false) }}" - "etcd.server.crt" - "etcd.server.key" diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index 3093444b4..d2fc2eed8 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -47,7 +47,7 @@ state: absent when: - rpmgenerated_config.stat.exists == true - - deployment_type == 'openshift-enterprise' + - openshift_deployment_type == 'openshift-enterprise' with_items: - master - node @@ -78,7 +78,6 @@ console_url: "{{ openshift_master_console_url | default(None) }}" console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" public_console_url: "{{ openshift_master_public_console_url | default(None) }}" - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - name: Inspect state of first master config settings @@ -166,7 +165,6 @@ hosts: oo_masters_to_config any_errors_fatal: true vars: - openshift_master_ha: "{{ openshift.master.ha }}" openshift_master_count: "{{ openshift.master.master_count }}" openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" @@ -185,10 +183,8 @@ - role: openshift_builddefaults - role: openshift_buildoverrides - role: nickhammond.logrotate - - role: contiv - contiv_role: netmaster - when: openshift_use_contiv | default(False) | bool - role: openshift_master + openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}" openshift_master_hosts: "{{ groups.oo_masters_to_config }}" r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" @@ -206,13 +202,13 @@ - role: calico_master when: openshift_use_calico | default(false) | bool tasks: - - include_role: + - import_role: name: kuryr tasks_from: master when: openshift_use_kuryr | default(false) | bool - name: Setup the node group config maps - include_role: + import_role: name: openshift_node_group when: openshift_master_bootstrap_enabled | default(false) | bool run_once: True diff --git a/playbooks/openshift-master/private/redeploy-openshift-ca.yml b/playbooks/openshift-master/private/redeploy-openshift-ca.yml index 9d3c12ba1..663c39868 100644 --- a/playbooks/openshift-master/private/redeploy-openshift-ca.yml +++ b/playbooks/openshift-master/private/redeploy-openshift-ca.yml @@ -125,7 +125,6 @@ - name: Create temp directory for syncing certs hosts: localhost connection: local - become: no gather_facts: no tasks: - name: Create local temp directory for syncing certs @@ -133,6 +132,10 @@ register: g_master_mktemp changed_when: false + - name: Chmod local temp directory for syncing certs + local_action: command chmod 777 "{{ g_master_mktemp.stdout }}" + changed_when: false + - name: Retrieve OpenShift CA hosts: oo_first_master vars: @@ -264,7 +267,6 @@ - name: Delete temporary directory on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml index 5cb284935..17d90533c 100644 --- a/playbooks/openshift-master/private/restart.yml +++ b/playbooks/openshift-master/private/restart.yml @@ -3,16 +3,13 @@ - name: Restart masters hosts: oo_masters_to_config - vars: - openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" serial: 1 - handlers: - - import_tasks: ../../../roles/openshift_master/handlers/main.yml roles: - openshift_facts post_tasks: - include_tasks: tasks/restart_hosts.yml when: openshift_rolling_restart_mode | default('services') == 'system' - - - include_tasks: tasks/restart_services.yml + - import_role: + name: openshift_master + tasks_from: restart.yml when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml index 007b23ea3..5aaa0b156 100644 --- a/playbooks/openshift-master/private/scaleup.yml +++ b/playbooks/openshift-master/private/scaleup.yml @@ -8,7 +8,6 @@ - openshift_facts: role: master local_facts: - ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - name: Update master count modify_yaml: @@ -46,7 +45,7 @@ - import_playbook: set_network_facts.yml -- import_playbook: ../../openshift-etcd/private/certificates.yml +- import_playbook: ../../openshift-etcd/private/master_etcd_certificates.yml - import_playbook: config.yml diff --git a/playbooks/openshift-master/private/tasks/restart_hosts.yml b/playbooks/openshift-master/private/tasks/restart_hosts.yml index a5dbe0590..76e1ea5f3 100644 --- a/playbooks/openshift-master/private/tasks/restart_hosts.yml +++ b/playbooks/openshift-master/private/tasks/restart_hosts.yml @@ -27,7 +27,6 @@ delay=10 timeout=600 port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" - become: no # Now that ssh is back up we can wait for API on the remote system, # avoiding some potential connection issues from local system: diff --git a/playbooks/openshift-master/private/tasks/restart_services.yml b/playbooks/openshift-master/private/tasks/restart_services.yml index 4e1b3a3be..cf2c282e3 100644 --- a/playbooks/openshift-master/private/tasks/restart_services.yml +++ b/playbooks/openshift-master/private/tasks/restart_services.yml @@ -1,4 +1,4 @@ --- -- include_role: +- import_role: name: openshift_master tasks_from: restart.yml diff --git a/playbooks/openshift-master/private/tasks/wire_aggregator.yml b/playbooks/openshift-master/private/tasks/wire_aggregator.yml index 59e2b515c..cc812c300 100644 --- a/playbooks/openshift-master/private/tasks/wire_aggregator.yml +++ b/playbooks/openshift-master/private/tasks/wire_aggregator.yml @@ -142,11 +142,6 @@ state: absent changed_when: False -- name: Setup extension file for service console UI - template: - src: ../templates/openshift-ansible-catalog-console.js - dest: /etc/origin/master/openshift-ansible-catalog-console.js - - name: Update master config yedit: state: present @@ -166,8 +161,6 @@ value: [X-Remote-Group] - key: authConfig.requestHeader.extraHeaderPrefixes value: [X-Remote-Extra-] - - key: assetConfig.extensionScripts - value: [/etc/origin/master/openshift-ansible-catalog-console.js] - key: kubernetesMasterConfig.apiServerArguments.runtime-config value: [apis/settings.k8s.io/v1alpha1=true] - key: admissionConfig.pluginConfig.PodPreset.configuration.kind @@ -178,37 +171,50 @@ value: false register: yedit_output -#restart master serially here -- name: restart master api - systemd: name={{ openshift_service_type }}-master-api state=restarted - when: - - yedit_output.changed - -# We retry the controllers because the API may not be 100% initialized yet. -- name: restart master controllers - command: "systemctl restart {{ openshift_service_type }}-master-controllers" - retries: 3 - delay: 5 - register: result - until: result.rc == 0 - when: - - yedit_output.changed +# Only add the catalog extension script if not 3.9. From 3.9 on, the console +# can discover if template service broker is running. +- when: not openshift.common.version_gte_3_9 + block: + - name: Setup extension file for service console UI + template: + src: ../templates/openshift-ansible-catalog-console.js + dest: /etc/origin/master/openshift-ansible-catalog-console.js + + - name: Update master config + yedit: + state: present + src: /etc/origin/master/master-config.yaml + key: assetConfig.extensionScripts + value: [/etc/origin/master/openshift-ansible-catalog-console.js] + register: yedit_asset_config_output -- name: Verify API Server - # Using curl here since the uri module requires python-httplib2 and - # wait_for port doesn't provide health information. - command: > - curl --silent --tlsv1.2 - --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt - {{ openshift.master.api_url }}/healthz/ready - args: - # Disables the following warning: - # Consider using get_url or uri module rather than running curl - warn: no - register: api_available_output - until: api_available_output.stdout == 'ok' - retries: 120 - delay: 1 - changed_when: false - when: - - yedit_output.changed +#restart master serially here +- when: yedit_output.changed or (yedit_asset_config_output is defined and yedit_asset_config_output.changed) + block: + - name: restart master api + systemd: name={{ openshift_service_type }}-master-api state=restarted + + # We retry the controllers because the API may not be 100% initialized yet. + - name: restart master controllers + command: "systemctl restart {{ openshift_service_type }}-master-controllers" + retries: 3 + delay: 5 + register: result + until: result.rc == 0 + + - name: Verify API Server + # Using curl here since the uri module requires python-httplib2 and + # wait_for port doesn't provide health information. + command: > + curl --silent --tlsv1.2 + --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt + {{ openshift.master.api_url }}/healthz/ready + args: + # Disables the following warning: + # Consider using get_url or uri module rather than running curl + warn: no + register: api_available_output + until: api_available_output.stdout == 'ok' + retries: 120 + delay: 1 + changed_when: false diff --git a/playbooks/openshift-master/private/validate_restart.yml b/playbooks/openshift-master/private/validate_restart.yml index 1077d0b9c..60b0e5bb6 100644 --- a/playbooks/openshift-master/private/validate_restart.yml +++ b/playbooks/openshift-master/private/validate_restart.yml @@ -21,7 +21,6 @@ - name: Create temp file on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - local_action: command mktemp @@ -38,7 +37,6 @@ - name: Cleanup temp file on localhost hosts: localhost connection: local - become: no gather_facts: no tasks: - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent diff --git a/playbooks/openshift-master/scaleup.yml b/playbooks/openshift-master/scaleup.yml index f717cd0e9..0ca5d1a61 100644 --- a/playbooks/openshift-master/scaleup.yml +++ b/playbooks/openshift-master/scaleup.yml @@ -1,23 +1,44 @@ --- - import_playbook: ../init/evaluate_groups.yml -- name: Ensure there are new_masters or new_nodes +- name: Ensure there are new_masters and new_nodes hosts: localhost connection: local - become: no gather_facts: no tasks: - fail: + # new_masters must be part of new_nodes as well; otherwise if new_nodes + # is not present, oo_nodes_to_config will contain all existing nodes. msg: > - Detected no new_masters or no new_nodes in inventory. Please - add hosts to the new_masters and new_nodes host groups to add - masters. - when: - - g_new_master_hosts | default([]) | length == 0 - - g_new_node_hosts | default([]) | length == 0 + Detected no new_masters and/or no new_nodes in inventory. New + masters must be part of both new_masters and new_nodes groups. + If you are adding just new_nodes, use the + playbooks/openshift-node/scaleup.yml play. + when: > + g_new_master_hosts | default([]) | length == 0 + or g_new_node_hosts | default([]) | length == 0 -# Need a better way to do the above check for node without -# running evaluate_groups and init/main.yml -- import_playbook: ../init/main.yml +- name: Ensure there are new_masters and new_nodes + hosts: oo_masters_to_config + connection: local + gather_facts: no + tasks: + - fail: + # new_masters must be part of new_nodes as well; + msg: > + Each host in new_masters must also appear in new_nodes + when: inventory_hostname not in groups['oo_nodes_to_config'] + +- import_playbook: ../prerequisites.yml + vars: + l_scale_up_hosts: "oo_nodes_to_config:oo_masters_to_config" + l_base_packages_hosts: "oo_nodes_to_config:oo_masters_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config" + l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}" + +- import_playbook: ../init/version.yml + vars: + l_openshift_version_set_hosts: "oo_masters_to_config:oo_nodes_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_masters_to_config:oo_nodes_to_config" - import_playbook: private/scaleup.yml diff --git a/playbooks/openshift-metrics/config.yml b/playbooks/openshift-metrics/config.yml index c7814207c..1ca68fb9e 100644 --- a/playbooks/openshift-metrics/config.yml +++ b/playbooks/openshift-metrics/config.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + - import_playbook: private/config.yml diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml index 80cd93e5f..889ea77b1 100644 --- a/playbooks/openshift-metrics/private/config.yml +++ b/playbooks/openshift-metrics/private/config.yml @@ -16,14 +16,16 @@ roles: - role: openshift_metrics +# TODO: Remove when master config property is removed - name: OpenShift Metrics hosts: oo_masters:!oo_first_master serial: 1 tasks: - name: Setup the non-first masters configs - include_role: + import_role: name: openshift_metrics tasks_from: update_master_config.yaml + when: not openshift.common.version_gte_3_9 - name: Metrics Install Checkpoint End hosts: all diff --git a/playbooks/openshift-nfs/config.yml b/playbooks/openshift-nfs/config.yml index c7814207c..b22796228 100644 --- a/playbooks/openshift-nfs/config.yml +++ b/playbooks/openshift-nfs/config.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config:oo_nfs_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] | union(groups['oo_nfs_to_config']) }}" + - import_playbook: private/config.yml diff --git a/playbooks/openshift-node/private/additional_config.yml b/playbooks/openshift-node/private/additional_config.yml index b86cb3cc2..0881121c9 100644 --- a/playbooks/openshift-node/private/additional_config.yml +++ b/playbooks/openshift-node/private/additional_config.yml @@ -47,17 +47,23 @@ - role: nuage_node when: openshift_use_nuage | default(false) | bool -- name: Additional node config - hosts: oo_nodes_use_contiv +- name: Configure Contiv masters + hosts: oo_masters_to_config + roles: + - role: contiv + contiv_master: true + when: openshift_use_contiv | default(false) | bool + +- name: Configure rest of Contiv nodes + hosts: "{{ groups.oo_nodes_use_contiv | default([]) | difference(groups.oo_masters_to_config) }}" roles: - role: contiv - contiv_role: netplugin when: openshift_use_contiv | default(false) | bool - name: Configure Kuryr node hosts: oo_nodes_use_kuryr tasks: - - include_role: + - import_role: name: kuryr tasks_from: node when: openshift_use_kuryr | default(false) | bool diff --git a/playbooks/openshift-node/private/configure_nodes.yml b/playbooks/openshift-node/private/configure_nodes.yml index 548ff7c4f..a13173e63 100644 --- a/playbooks/openshift-node/private/configure_nodes.yml +++ b/playbooks/openshift-node/private/configure_nodes.yml @@ -11,6 +11,7 @@ }}" roles: - role: openshift_clock + - role: openshift_cloud_provider - role: openshift_node - role: tuned - role: nickhammond.logrotate diff --git a/playbooks/openshift-node/private/containerized_nodes.yml b/playbooks/openshift-node/private/containerized_nodes.yml index dc68d7585..644e6a69c 100644 --- a/playbooks/openshift-node/private/containerized_nodes.yml +++ b/playbooks/openshift-node/private/containerized_nodes.yml @@ -13,6 +13,7 @@ roles: - role: openshift_clock + - role: openshift_cloud_provider - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" - role: nickhammond.logrotate diff --git a/playbooks/openshift-node/private/etcd_client_config.yml b/playbooks/openshift-node/private/etcd_client_config.yml index c3fa38a81..148bdc769 100644 --- a/playbooks/openshift-node/private/etcd_client_config.yml +++ b/playbooks/openshift-node/private/etcd_client_config.yml @@ -6,6 +6,5 @@ - role: openshift_etcd_facts - role: openshift_etcd_client_certificates etcd_cert_prefix: flannel.etcd- - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" diff --git a/playbooks/openshift-node/private/image_prep.yml b/playbooks/openshift-node/private/image_prep.yml index 6b517197d..adcbb0fdb 100644 --- a/playbooks/openshift-node/private/image_prep.yml +++ b/playbooks/openshift-node/private/image_prep.yml @@ -12,6 +12,13 @@ - name: run node config import_playbook: configure_nodes.yml +- name: node bootstrap config + hosts: oo_nodes_to_config:!oo_containerized_master_nodes + tasks: + - import_role: + name: openshift_node + tasks_from: bootstrap.yml + - name: Re-enable excluders import_playbook: enable_excluders.yml diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index 7249ced70..7371bd7ac 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,6 +16,7 @@ until: not (l_docker_restart_docker_in_node_result is failed) retries: 3 delay: 30 + when: openshift_node_restart_docker_required | default(True) - name: Restart containerized services service: diff --git a/playbooks/openshift-node/private/setup.yml b/playbooks/openshift-node/private/setup.yml index 802dce37e..41c323f2b 100644 --- a/playbooks/openshift-node/private/setup.yml +++ b/playbooks/openshift-node/private/setup.yml @@ -8,7 +8,6 @@ - name: Evaluate node groups hosts: localhost - become: no connection: local tasks: - name: Evaluate oo_containerized_master_nodes diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml index 8b7272485..cdf816fbf 100644 --- a/playbooks/openshift-node/redeploy-certificates.yml +++ b/playbooks/openshift-node/redeploy-certificates.yml @@ -4,3 +4,5 @@ - import_playbook: private/redeploy-certificates.yml - import_playbook: private/restart.yml + vars: + openshift_node_restart_docker_required: False diff --git a/playbooks/openshift-node/scaleup.yml b/playbooks/openshift-node/scaleup.yml index bdfd3d3e6..bda251fa5 100644 --- a/playbooks/openshift-node/scaleup.yml +++ b/playbooks/openshift-node/scaleup.yml @@ -4,7 +4,6 @@ - name: Ensure there are new_nodes hosts: localhost connection: local - become: no gather_facts: no tasks: - fail: @@ -13,9 +12,28 @@ new_nodes host group to add nodes. when: - g_new_node_hosts | default([]) | length == 0 + - fail: + msg: > + Please run playbooks/openshift-master/scaleup.yml if you need to + scale up both masters and nodes. This playbook is only needed if + you are only adding new nodes and not new masters. + when: + - g_new_node_hosts | default([]) | length > 0 + - g_new_master_hosts | default([]) | length > 0 + +# if g_new_node_hosts is not empty, oo_nodes_to_config will be set to +# g_new_node_hosts via evaluate_groups.yml + +- import_playbook: ../prerequisites.yml + vars: + l_scale_up_hosts: "oo_nodes_to_config" + l_base_packages_hosts: "oo_nodes_to_config" + l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_config" + l_sanity_check_hosts: "{{ groups['oo_nodes_to_config'] | union(groups['oo_masters_to_config']) }}" -# Need a better way to do the above check for node without -# running evaluate_groups and init/main.yml -- import_playbook: ../init/main.yml +- import_playbook: ../init/version.yml + vars: + l_openshift_version_set_hosts: "oo_nodes_to_config:!oo_first_master" + l_openshift_version_check_hosts: "oo_nodes_to_config" - import_playbook: private/config.yml diff --git a/playbooks/openshift-prometheus/config.yml b/playbooks/openshift-prometheus/config.yml index c7814207c..1ca68fb9e 100644 --- a/playbooks/openshift-prometheus/config.yml +++ b/playbooks/openshift-prometheus/config.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + - import_playbook: private/config.yml diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml new file mode 100644 index 000000000..b01f7f988 --- /dev/null +++ b/playbooks/openshift-prometheus/private/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall Prometheus + hosts: masters[0] + tasks: + - name: Run the Prometheus Uninstall Role Tasks + include_role: + name: openshift_prometheus + tasks_from: uninstall_prometheus diff --git a/playbooks/openshift-prometheus/uninstall.yml b/playbooks/openshift-prometheus/uninstall.yml new file mode 100644 index 000000000..c92ade786 --- /dev/null +++ b/playbooks/openshift-prometheus/uninstall.yml @@ -0,0 +1,2 @@ +--- +- import_playbook: private/uninstall.yml diff --git a/playbooks/openshift-provisioners/config.yml b/playbooks/openshift-provisioners/config.yml index c7814207c..1ca68fb9e 100644 --- a/playbooks/openshift-provisioners/config.yml +++ b/playbooks/openshift-provisioners/config.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + - import_playbook: private/config.yml diff --git a/playbooks/openshift-service-catalog/config.yml b/playbooks/openshift-service-catalog/config.yml index c7814207c..1ca68fb9e 100644 --- a/playbooks/openshift-service-catalog/config.yml +++ b/playbooks/openshift-service-catalog/config.yml @@ -1,4 +1,10 @@ --- - import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + - import_playbook: private/config.yml diff --git a/playbooks/openshift-web-console/config.yml b/playbooks/openshift-web-console/config.yml new file mode 100644 index 000000000..62d954d29 --- /dev/null +++ b/playbooks/openshift-web-console/config.yml @@ -0,0 +1,9 @@ +--- +- import_playbook: ../init/main.yml + vars: + l_init_fact_hosts: "oo_masters_to_config" + l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master" + l_openshift_version_check_hosts: "all:!all" + l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}" + +- import_playbook: private/config.yml diff --git a/playbooks/openshift-web-console/private/config.yml b/playbooks/openshift-web-console/private/config.yml new file mode 100644 index 000000000..ffd702d20 --- /dev/null +++ b/playbooks/openshift-web-console/private/config.yml @@ -0,0 +1,31 @@ +--- +- name: Web Console Install Checkpoint Start + hosts: all + gather_facts: false + tasks: + - name: Set Web Console install 'In Progress' + run_once: true + set_stats: + data: + installer_phase_web_console: + status: "In Progress" + start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" + +- name: Web Console + hosts: oo_first_master + roles: + - openshift_web_console + vars: + first_master: "{{ groups.oo_first_master[0] }}" + +- name: Web Console Install Checkpoint End + hosts: all + gather_facts: false + tasks: + - name: Set Web Console install 'Complete' + run_once: true + set_stats: + data: + installer_phase_web_console: + status: "Complete" + end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/openshift-web-console/private/roles b/playbooks/openshift-web-console/private/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/openshift-web-console/private/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index d361d6278..842bb34de 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -30,15 +30,17 @@ version 10) or newer. It must also satisfy these requirements: - look at the [Minimum Hardware Requirements page][hardware-requirements] for production -* The keypair for SSH must be available in openstack -* `keystonerc` file that lets you talk to the openstack services +* The keypair for SSH must be available in OpenStack +* `keystonerc` file that lets you talk to the OpenStack services * NOTE: only Keystone V2 is currently supported +* A host with the supported version of [Ansible][ansible] installed, see the + [Setup section of the openshift-ansible README][openshift-ansible-setup] + for details on the requirements. Optional: * External Neutron network with a floating IP address pool - ## Installation There are four main parts to the installation: @@ -68,12 +70,11 @@ First, you need to select where to run [Ansible][ansible] from (the *Ansible host*). This can be the computer you read this guide on or an OpenStack VM you'll create specifically for this purpose. -We will use -a +This guide will use a [Docker image that has all the dependencies installed][control-host-image] to make things easier. If you don't want to use Docker, take a look at the [Ansible host dependencies][ansible-dependencies] and make sure -they're installed. +they are installed. Your *Ansible host* needs to have the following: @@ -183,13 +184,16 @@ Then run the provision + install playbook -- this will create the OpenStack resources: ```bash -$ ansible-playbook --user openshift -i inventory \ - openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yaml \ - -e openshift_repos_enable_testing=true +$ ansible-playbook --user openshift \ + -i openshift-ansible/playbooks/openstack/inventory.py \ + -i inventory \ + openshift-ansible/playbooks/openstack/openshift-cluster/provision_install.yml ``` -Note, you may want to use the testing repo for development purposes only. -Normally, `openshift_repos_enable_testing` should not be specified. +In addition to *your* inventory with your OpenShift and OpenStack +configuration, we are also supplying the [dynamic inventory][dynamic] from +`openshift-ansible/inventory`. It's a script that will look at the Nova servers +and other resources that will be created and let Ansible know about them. If you're using multiple inventories, make sure you pass the path to the right one to `-i`. @@ -219,6 +223,7 @@ advanced configuration: [ansible]: https://www.ansible.com/ [openshift-ansible]: https://github.com/openshift/openshift-ansible +[openshift-ansible-setup]: https://github.com/openshift/openshift-ansible#setup [devstack]: https://docs.openstack.org/devstack/ [tripleo]: http://tripleo.org/ [ansible-dependencies]: ./advanced-configuration.md#dependencies-for-localhost-ansible-controladmin-node @@ -233,3 +238,4 @@ advanced configuration: [loadbalancer]: ./advanced-configuration.md#multi-master-configuration [external-dns]: ./advanced-configuration.md#dns-configuration-variables [cinder-registry]: ./advanced-configuration.md#creating-and-using-a-cinder-volume-for-the-openshift-registry +[dynamic]: http://docs.ansible.com/ansible/latest/intro_dynamic_inventory.html diff --git a/playbooks/openstack/advanced-configuration.md b/playbooks/openstack/advanced-configuration.md index 2c9b70b5f..8df3c40b0 100644 --- a/playbooks/openstack/advanced-configuration.md +++ b/playbooks/openstack/advanced-configuration.md @@ -1,9 +1,8 @@ ## Dependencies for localhost (ansible control/admin node) -* [Ansible 2.3](https://pypi.python.org/pypi/ansible) -* [Ansible-galaxy](https://pypi.python.org/pypi/ansible-galaxy-local-deps) -* [jinja2](http://jinja.pocoo.org/docs/2.9/) -* [shade](https://pypi.python.org/pypi/shade) +* [Ansible](https://pypi.python.org/pypi/ansible) version >=2.4.0 +* [jinja2](http://jinja.pocoo.org/docs/2.9/) version >= 2.10 +* [shade](https://pypi.python.org/pypi/shade) version >= 1.26 * python-jmespath / [jmespath](https://pypi.python.org/pypi/jmespath) * python-dns / [dnspython](https://pypi.python.org/pypi/dnspython) * Become (sudo) is not required. @@ -133,7 +132,7 @@ You can also access the OpenShift cluster with a web browser by going to: https://master-0.openshift.example.com:8443 Note that for this to work, the OpenShift nodes must be accessible -from your computer and it's DNS configuration must use the cruster's +from your computer and its DNS configuration must use the cluster's DNS. @@ -153,7 +152,7 @@ openstack stack delete --wait --yes openshift.example.com Pay special attention to the values in the first paragraph -- these will depend on your OpenStack environment. -Note that the provsisioning playbooks update the original Neutron subnet +Note that the provisioning playbooks update the original Neutron subnet created with the Heat stack to point to the configured DNS servers. So the provisioned cluster nodes will start using those natively as default nameservers. Technically, this allows to deploy OpenShift clusters @@ -162,7 +161,7 @@ without dnsmasq proxies. The `openshift_openstack_clusterid` and `openshift_openstack_public_dns_domain` will form the cluster's public DNS domain all your servers will be under. With the default values, this will be `openshift.example.com`. For workloads, the -default subdomain is 'apps'. That sudomain can be set as well by the +default subdomain is 'apps'. That subdomain can be set as well by the `openshift_openstack_app_subdomain` variable in the inventory. If you want to use a two sets of hostnames for public and private/prefixed DNS @@ -274,6 +273,38 @@ openshift_openstack_cluster_node_labels: mylabel: myvalue ``` +`openshift_openstack_provision_user_commands` allows users to execute +shell commands via cloud-init for all of the created Nova servers in +the Heat stack, before they are available for SSH connections. +Note that you should use custom ansible playbooks whenever +possible, like this `provision_install_custom.yml` example playbook: +``` +- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/provision.yml + +- name: My custom actions + hosts: cluster_hosts + tasks: + - do whatever you want here + +- import_playbook: openshift-ansible/playbooks/openstack/openshift-cluster/install.yml +``` +The playbook leverages a two existing provider interfaces: `provision.yml` and +`install.yml`. For some cases, like SSH keys configuration and coordinated reboots of +servers, the cloud-init runcmd directive may be a better choice though. User specified +shell commands for cloud-init need to be either strings or lists, for example: +``` +- openshift_openstack_provision_user_commands: + - set -vx + - systemctl stop sshd # fences off ansible playbooks as we want to reboot later + - ['echo', 'foo', '>', '/tmp/foo'] + - [ ls, /tmp/foo, '||', true ] + - reboot # unfences ansible playbooks to continue after reboot +``` + +**Note** To protect Nova servers from recreating when the user-data changes via +`openshift_openstack_provision_user_commands`, the +`user_data_update_policy` parameter configured to `IGNORE` for Heat resources. + The `openshift_openstack_nodes_to_remove` allows you to specify the numerical indexes of App nodes that should be removed; for example, ['0', '2'], @@ -334,7 +365,7 @@ or your trusted network. The most important is the `openshift_openstack_node_ing that restricts public access to the deployed DNS server and cluster nodes' ephemeral ports range. -Note, the command ``curl https://api.ipify.org`` helps fiding an external +Note, the command ``curl https://api.ipify.org`` helps finding an external IP address of your box (the ansible admin node). There is also the `manage_packages` variable (defaults to True) you @@ -372,6 +403,112 @@ In order to set a custom entrypoint, update `openshift_master_cluster_public_hos Note than an empty hostname does not work, so if your domain is `openshift.example.com`, you cannot set this value to simply `openshift.example.com`. + +## Using Cinder-backed Persistent Volumes + +You will need to set up OpenStack credentials. You can try putting this in your +`inventory/group_vars/OSEv3.yml`: + + openshift_cloudprovider_kind: openstack + openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" + openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" + openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" + openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_PROJECT_NAME') }}" + openshift_cloudprovider_openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" + openshift_cloudprovider_openstack_blockstorage_version: v2 + +**NOTE**: you must specify the Block Storage version as v2, because OpenShift +does not support the v3 API yet and the version detection is currently not +working properly. + +For more information, consult the [Configuring for OpenStack page in the OpenShift documentation][openstack-credentials]. + +[openstack-credentials]: https://docs.openshift.org/latest/install_config/configuring_openstack.html#install-config-configuring-openstack + +**NOTE** the OpenStack integration currently requires DNS to be configured and +running and the `openshift_hostname` variable must match the Nova server name +for each node. The cluster deployment will fail without it. If you use the +provided OpenStack dynamic inventory and configure the +`openshift_openstack_dns_nameservers` Ansible variable, this will be handled +for you. + +After a successful deployment, the cluster is configured for Cinder persistent +volumes. + +### Validation + +1. Log in and create a new project (with `oc login` and `oc new-project`) +2. Create a file called `cinder-claim.yaml` with the following contents: + +```yaml +apiVersion: "v1" +kind: "PersistentVolumeClaim" +metadata: + name: "claim1" +spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "1Gi" +``` +3. Run `oc create -f cinder-claim.yaml` to create the Persistent Volume Claim object in OpenShift +4. Run `oc describe pvc claim1` to verify that the claim was created and its Status is `Bound` +5. Run `openstack volume list` + * A new volume called `kubernetes-dynamic-pvc-UUID` should be created + * Its size should be `1` + * It should not be attached to any server +6. Create a file called `mysql-pod.yaml` with the following contents: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: mysql + labels: + name: mysql +spec: + containers: + - resources: + limits : + cpu: 0.5 + image: openshift/mysql-55-centos7 + name: mysql + env: + - name: MYSQL_ROOT_PASSWORD + value: yourpassword + - name: MYSQL_USER + value: wp_user + - name: MYSQL_PASSWORD + value: wp_pass + - name: MYSQL_DATABASE + value: wp_db + ports: + - containerPort: 3306 + name: mysql + volumeMounts: + - name: mysql-persistent-storage + mountPath: /var/lib/mysql/data + volumes: + - name: mysql-persistent-storage + persistentVolumeClaim: + claimName: claim1 +``` + +7. Run `oc create -f mysql-pod.yaml` to create the pod +8. Run `oc describe pod mysql` + * Its events should show that the pod has successfully attached the volume above + * It should show no errors + * `openstack volume list` should show the volume attached to an OpenShift app node + * NOTE: this can take several seconds +9. After a while, `oc get pod` should show the `mysql` pod as running +10. Run `oc delete pod mysql` to remove the pod + * The Cinder volume should no longer be attached +11. Run `oc delete pvc claim1` to remove the volume claim + * The Cinder volume should be deleted + + + ## Creating and using a Cinder volume for the OpenShift registry You can optionally have the playbooks create a Cinder volume and set @@ -415,7 +552,7 @@ OpenStack)[openstack] for more information. [openstack]: https://docs.openshift.org/latest/install_config/configuring_openstack.html -Next, we need to instruct OpenShift to use the Cinder volume for it's +Next, we need to instruct OpenShift to use the Cinder volume for its registry. Again in `OSEv3.yml`: #openshift_hosted_registry_storage_kind: openstack @@ -470,12 +607,12 @@ The **Cinder volume ID**, **filesystem** and **volume size** variables must correspond to the values in your volume. The volume ID must be the **UUID** of the Cinder volume, *not its name*. -We can do formate the volume for you if you ask for it in +The volume can also be formatted if you configure it in `inventory/group_vars/all.yml`: openshift_openstack_prepare_and_format_registry_volume: true -**NOTE:** doing so **will destroy any data that's currently on the volume**! +**NOTE:** Formatting **will destroy any data that's currently on the volume**! You can also run the registry setup playbook directly: diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/inventory.py index ad3fd936b..d5a8c3e24 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/inventory.py @@ -9,23 +9,16 @@ environment. from __future__ import print_function +from collections import Mapping import json import shade -def build_inventory(): - '''Build the dynamic inventory.''' - cloud = shade.openstack_cloud() - +def base_openshift_inventory(cluster_hosts): + '''Set the base openshift inventory.''' inventory = {} - # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` - # environment variable. - cluster_hosts = [ - server for server in cloud.list_servers() - if 'metadata' in server and 'clusterid' in server.metadata] - masters = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'master'] @@ -42,7 +35,10 @@ def build_inventory(): if server.metadata['host-type'] == 'node' and server.metadata['sub-host-type'] == 'app'] - nodes = list(set(masters + infra_hosts + app)) + cns = [server.name for server in cluster_hosts + if server.metadata['host-type'] == 'cns'] + + nodes = list(set(masters + infra_hosts + app + cns)) dns = [server.name for server in cluster_hosts if server.metadata['host-type'] == 'dns'] @@ -59,9 +55,38 @@ def build_inventory(): inventory['nodes'] = {'hosts': nodes} inventory['infra_hosts'] = {'hosts': infra_hosts} inventory['app'] = {'hosts': app} + inventory['glusterfs'] = {'hosts': cns} inventory['dns'] = {'hosts': dns} inventory['lb'] = {'hosts': load_balancers} + return inventory + + +def get_docker_storage_mountpoints(volumes): + '''Check volumes to see if they're being used for docker storage''' + docker_storage_mountpoints = {} + for volume in volumes: + if volume.metadata.get('purpose') == "openshift_docker_storage": + for attachment in volume.attachments: + if attachment.server_id in docker_storage_mountpoints: + docker_storage_mountpoints[attachment.server_id].append(attachment.device) + else: + docker_storage_mountpoints[attachment.server_id] = [attachment.device] + return docker_storage_mountpoints + + +def build_inventory(): + '''Build the dynamic inventory.''' + cloud = shade.openstack_cloud() + + # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` + # environment variable. + cluster_hosts = [ + server for server in cloud.list_servers() + if 'metadata' in server and 'clusterid' in server.metadata] + + inventory = base_openshift_inventory(cluster_hosts) + for server in cluster_hosts: if 'group' in server.metadata: group = server.metadata.group @@ -71,6 +96,9 @@ def build_inventory(): inventory['_meta'] = {'hostvars': {}} + # cinder volumes used for docker storage + docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes()) + for server in cluster_hosts: ssh_ip_address = server.public_v4 or server.private_v4 hostvars = { @@ -84,19 +112,33 @@ def build_inventory(): # TODO(shadower): what about multiple networks? if server.private_v4: hostvars['private_v4'] = server.private_v4 + hostvars['openshift_ip'] = server.private_v4 + # NOTE(shadower): Yes, we set both hostname and IP to the private # IP address for each node. OpenStack doesn't resolve nodes by # name at all, so using a hostname here would require an internal # DNS which would complicate the setup and potentially introduce # performance issues. - hostvars['openshift_ip'] = server.private_v4 - hostvars['openshift_hostname'] = server.private_v4 + hostvars['openshift_hostname'] = server.metadata.get( + 'openshift_hostname', server.private_v4) hostvars['openshift_public_hostname'] = server.name + if server.metadata['host-type'] == 'cns': + hostvars['glusterfs_devices'] = ['/dev/nvme0n1'] + node_labels = server.metadata.get('node_labels') + # NOTE(shadower): the node_labels value must be a dict not string + if not isinstance(node_labels, Mapping): + node_labels = json.loads(node_labels) + if node_labels: hostvars['openshift_node_labels'] = node_labels + # check for attached docker storage volumes + if 'os-extended-volumes:volumes_attached' in server: + if server.id in docker_storage_mountpoints: + hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id]) + inventory['_meta']['hostvars'][server.name] = hostvars return inventory diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml index 3211f619a..2ab7d14a0 100644 --- a/playbooks/openstack/openshift-cluster/install.yml +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -9,4 +9,7 @@ # some logic here? - name: run the cluster deploy + import_playbook: ../../prerequisites.yml + +- name: run the cluster deploy import_playbook: ../../deploy_cluster.yml diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml index 0356b37dd..8bb700501 100644 --- a/playbooks/openstack/openshift-cluster/prerequisites.yml +++ b/playbooks/openstack/openshift-cluster/prerequisites.yml @@ -2,11 +2,11 @@ - hosts: localhost tasks: - name: Check dependencies and OpenStack prerequisites - include_role: + import_role: name: openshift_openstack tasks_from: check-prerequisites.yml - name: Check network configuration - include_role: + import_role: name: openshift_openstack tasks_from: net_vars_check.yaml diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml index fa5c91ace..73c1926a0 100644 --- a/playbooks/openstack/openshift-cluster/provision.yml +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -3,7 +3,7 @@ hosts: localhost tasks: - name: provision cluster - include_role: + import_role: name: openshift_openstack tasks_from: provision.yml @@ -26,8 +26,8 @@ - name: Gather facts for the new nodes setup: -- name: set common facts - import_playbook: ../../init/facts.yml +- import_playbook: ../../init/basic_facts.yml +- import_playbook: ../../init/cluster_facts.yml # TODO(shadower): consider splitting this up so people can stop here @@ -36,7 +36,7 @@ hosts: localhost tasks: - name: Populate DNS entries - include_role: + import_role: name: openshift_openstack tasks_from: populate-dns.yml when: @@ -49,7 +49,7 @@ gather_facts: yes tasks: - name: Subscribe RHEL instances - include_role: + import_role: name: rhel_subscribe when: - ansible_distribution == "RedHat" @@ -57,18 +57,18 @@ - rhsub_pass is defined - name: Enable required YUM repositories - include_role: + import_role: name: openshift_repos when: - ansible_distribution == "RedHat" - rh_subscribed is defined - name: Install dependencies - include_role: + import_role: name: openshift_openstack tasks_from: node-packages.yml - name: Configure Node - include_role: + import_role: name: openshift_openstack tasks_from: node-configuration.yml diff --git a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml index 933117127..1287b25f3 100644 --- a/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml +++ b/playbooks/openstack/sample-inventory/group_vars/OSEv3.yml @@ -14,12 +14,13 @@ openshift_hosted_router_wait: True openshift_hosted_registry_wait: True ## Openstack credentials -#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_kind: openstack #openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" #openshift_cloudprovider_openstack_username: "{{ lookup('env','OS_USERNAME') }}" #openshift_cloudprovider_openstack_password: "{{ lookup('env','OS_PASSWORD') }}" #openshift_cloudprovider_openstack_tenant_name: "{{ lookup('env','OS_TENANT_NAME') }}" -#openshift_cloudprovider_openstack_region="{{ lookup('env', 'OS_REGION_NAME') }}" +#openshift_cloudprovider_openstack_region: "{{ lookup('env', 'OS_REGION_NAME') }}" +#openshift_cloudprovider_openstack_blockstorage_version: v2 ## Use Cinder volume for Openshift registry: @@ -42,7 +43,7 @@ openshift_hosted_registry_wait: True # NOTE(shadower): the hostname check seems to always fail because the # host's floating IP address doesn't match the address received from # inside the host. -openshift_override_hostname_check: true +openshift_hostname_check: false # For POCs or demo environments that are using smaller instances than # the official recommended values for RAM and DISK, uncomment the line below. diff --git a/playbooks/openstack/sample-inventory/group_vars/all.yml b/playbooks/openstack/sample-inventory/group_vars/all.yml index c7afe9a24..101ac52ad 100644 --- a/playbooks/openstack/sample-inventory/group_vars/all.yml +++ b/playbooks/openstack/sample-inventory/group_vars/all.yml @@ -7,6 +7,7 @@ openshift_openstack_dns_nameservers: [] # # - set custom hostnames for roles by uncommenting corresponding lines #openshift_openstack_master_hostname: "master" #openshift_openstack_infra_hostname: "infra-node" +#openshift_openstack_cns_hostname: "cns" #openshift_openstack_node_hostname: "app-node" #openshift_openstack_lb_hostname: "lb" #openshift_openstack_etcd_hostname: "etcd" @@ -30,6 +31,7 @@ openshift_openstack_external_network_name: "public" # # - note: do not remove openshift_openstack_default_image_name definition #openshift_openstack_master_image_name: "centos7" #openshift_openstack_infra_image_name: "centos7" +#openshift_openstack_cns_image_name: "centos7" #openshift_openstack_node_image_name: "centos7" #openshift_openstack_lb_image_name: "centos7" #openshift_openstack_etcd_image_name: "centos7" @@ -37,6 +39,7 @@ openshift_openstack_default_image_name: "centos7" openshift_openstack_num_masters: 1 openshift_openstack_num_infra: 1 +openshift_openstack_num_cns: 0 openshift_openstack_num_nodes: 2 # # Used Flavors @@ -44,6 +47,7 @@ openshift_openstack_num_nodes: 2 # # - note: do note remove openshift_openstack_default_flavor definition #openshift_openstack_master_flavor: "m1.medium" #openshift_openstack_infra_flavor: "m1.medium" +#openshift_openstack_cns_flavor: "m1.medium" #openshift_openstack_node_flavor: "m1.medium" #openshift_openstack_lb_flavor: "m1.medium" #openshift_openstack_etcd_flavor: "m1.medium" @@ -57,6 +61,7 @@ openshift_openstack_default_flavor: "m1.medium" # # - note: do not remove docker_default_volume_size definition #openshift_openstack_docker_master_volume_size: "15" #openshift_openstack_docker_infra_volume_size: "15" +#openshift_openstack_docker_cns_volume_size: "15" #openshift_openstack_docker_node_volume_size: "15" #openshift_openstack_docker_etcd_volume_size: "2" #openshift_openstack_docker_lb_volume_size: "5" @@ -80,7 +85,12 @@ openshift_openstack_docker_volume_size: "15" ## WARNING: This will delete any data on the volume! #openshift_openstack_prepare_and_format_registry_volume: False -openshift_openstack_subnet_prefix: "192.168.99" +# The Classless Inter-Domain Routing (CIDR) for the OpenStack VM subnet. +openshift_openstack_subnet_cidr: "192.168.99.0/24" +# The starting IP address for the OpenStack subnet allocation pool. +openshift_openstack_pool_start: "192.168.99.3" +# The ending IP address for the OpenStack subnet allocation pool. +openshift_openstack_pool_end: "192.168.99.254" ## Red Hat subscription: #rhsub_user: '<username>' diff --git a/playbooks/prerequisites.yml b/playbooks/prerequisites.yml index 68d7f3359..544adbd4d 100644 --- a/playbooks/prerequisites.yml +++ b/playbooks/prerequisites.yml @@ -1,13 +1,23 @@ --- +# l_scale_up_hosts may be passed in via various scaleup plays. + - import_playbook: init/main.yml vars: - skip_verison: True + skip_version: True + l_install_base_packages: True + l_openshift_version_set_hosts: "all:!all" + l_openshift_version_check_hosts: "all:!all" + +- import_playbook: init/validate_hostnames.yml + when: not (skip_validate_hostnames | default(False)) - import_playbook: init/repos.yml # This is required for container runtime for crio, only needs to run once. - name: Configure os_firewall - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config + hosts: "{{ l_scale_up_hosts | default(l_default_firewall_hosts) }}" + vars: + l_default_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nfs_to_config:oo_nodes_to_config" roles: - role: os_firewall |